Add/remove nova files

Delete files that are not needed.

Add back in some nova files that are needed.

Also, resync the gantt/version.py from the nova tree, we
need the new version of this file.

Change-Id: I48ab670502417d588eb8ffd6044667ed5a9e1560
This commit is contained in:
Don Dugger 2014-01-28 23:08:59 -07:00
parent b973ae3952
commit e62cc17eb7
105 changed files with 2654 additions and 18209 deletions

View File

@ -1,143 +0,0 @@
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Availability zone helper functions."""
from oslo.config import cfg
from nova import db
from nova.openstack.common import memorycache
# NOTE(vish): azs don't change that often, so cache them for an hour to
# avoid hitting the db multiple times on every request.
AZ_CACHE_SECONDS = 60 * 60
MC = None
availability_zone_opts = [
cfg.StrOpt('internal_service_availability_zone',
default='internal',
help='availability_zone to show internal services under'),
cfg.StrOpt('default_availability_zone',
default='nova',
help='default compute node availability_zone'),
]
CONF = cfg.CONF
CONF.register_opts(availability_zone_opts)
def _get_cache():
global MC
if MC is None:
MC = memorycache.get_client()
return MC
def reset_cache():
"""Reset the cache, mainly for testing purposes and update
availability_zone for host aggregate
"""
global MC
MC = None
def _make_cache_key(host):
return "azcache-%s" % host.encode('utf-8')
def set_availability_zones(context, services):
# Makes sure services isn't a sqlalchemy object
services = [dict(service.iteritems()) for service in services]
metadata = db.aggregate_host_get_by_metadata_key(context,
key='availability_zone')
for service in services:
az = CONF.internal_service_availability_zone
if service['topic'] == "compute":
if metadata.get(service['host']):
az = u','.join(list(metadata[service['host']]))
else:
az = CONF.default_availability_zone
# update the cache
cache = _get_cache()
cache_key = _make_cache_key(service['host'])
cache.delete(cache_key)
cache.set(cache_key, az, AZ_CACHE_SECONDS)
service['availability_zone'] = az
return services
def get_host_availability_zone(context, host, conductor_api=None):
if conductor_api:
metadata = conductor_api.aggregate_metadata_get_by_host(
context, host, key='availability_zone')
else:
metadata = db.aggregate_metadata_get_by_host(
context, host, key='availability_zone')
if 'availability_zone' in metadata:
az = list(metadata['availability_zone'])[0]
else:
az = CONF.default_availability_zone
return az
def get_availability_zones(context, get_only_available=False):
"""Return available and unavailable zones on demand.
:param get_only_available: flag to determine whether to return
available zones only, default False indicates return both
available zones and not available zones, True indicates return
available zones only
"""
enabled_services = db.service_get_all(context, False)
enabled_services = set_availability_zones(context, enabled_services)
available_zones = []
for zone in [service['availability_zone'] for service
in enabled_services]:
if zone not in available_zones:
available_zones.append(zone)
if not get_only_available:
disabled_services = db.service_get_all(context, True)
disabled_services = set_availability_zones(context, disabled_services)
not_available_zones = []
zones = [service['availability_zone'] for service in disabled_services
if service['availability_zone'] not in available_zones]
for zone in zones:
if zone not in not_available_zones:
not_available_zones.append(zone)
return (available_zones, not_available_zones)
else:
return available_zones
def get_instance_availability_zone(context, instance):
"""Return availability zone of specified instance."""
host = str(instance.get('host'))
if not host:
return None
cache_key = _make_cache_key(host)
cache = _get_cache()
az = cache.get(cache_key)
if not az:
elevated = context.elevated()
az = get_host_availability_zone(elevated, host)
cache.set(cache_key, az, AZ_CACHE_SECONDS)
return az

View File

@ -1,93 +0,0 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Base RPC client and server common to all services.
"""
from oslo.config import cfg
from nova.openstack.common import jsonutils
from nova import rpcclient
CONF = cfg.CONF
rpcapi_cap_opt = cfg.StrOpt('baseapi',
help='Set a version cap for messages sent to the base api in any '
'service')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
_NAMESPACE = 'baseapi'
class BaseAPI(rpcclient.RpcProxy):
"""Client side of the base rpc API.
API version history:
1.0 - Initial version.
1.1 - Add get_backdoor_port
"""
#
# NOTE(russellb): This is the default minimum version that the server
# (manager) side must implement unless otherwise specified using a version
# argument to self.call()/cast()/etc. here. It should be left as X.0 where
# X is the current major API version (1.0, 2.0, ...). For more information
# about rpc API versioning, see the docs in
# openstack/common/rpc/dispatcher.py.
#
BASE_RPC_API_VERSION = '1.0'
VERSION_ALIASES = {
# baseapi was added in havana
}
def __init__(self, topic):
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.baseapi,
CONF.upgrade_levels.baseapi)
super(BaseAPI, self).__init__(topic=topic,
default_version=self.BASE_RPC_API_VERSION,
version_cap=version_cap)
self.client = self.get_client(namespace=_NAMESPACE)
def ping(self, context, arg, timeout=None):
arg_p = jsonutils.to_primitive(arg)
cctxt = self.client.prepare(timeout=timeout)
return cctxt.call(context, 'ping', arg=arg_p)
def get_backdoor_port(self, context, host):
cctxt = self.client.prepare(server=host, version='1.1')
return cctxt.call(context, 'get_backdoor_port')
class BaseRPCAPI(object):
"""Server side of the base RPC API."""
RPC_API_NAMESPACE = _NAMESPACE
RPC_API_VERSION = '1.1'
def __init__(self, service_name, backdoor_port):
self.service_name = service_name
self.backdoor_port = backdoor_port
def ping(self, context, arg):
resp = {'service': self.service_name, 'arg': arg}
return jsonutils.to_primitive(resp)
def get_backdoor_port(self, context):
return self.backdoor_port

View File

@ -1,504 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Isaku Yamahata <yamahata@valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova import utils
from nova.virt import driver
CONF = cfg.CONF
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
_DEFAULT_MAPPINGS = {'ami': 'sda1',
'ephemeral0': 'sda2',
'root': DEFAULT_ROOT_DEV_NAME,
'swap': 'sda3'}
bdm_legacy_fields = set(['device_name', 'delete_on_termination',
'virtual_name', 'snapshot_id',
'volume_id', 'volume_size', 'no_device',
'connection_info'])
bdm_new_fields = set(['source_type', 'destination_type',
'guest_format', 'device_type', 'disk_bus', 'boot_index',
'device_name', 'delete_on_termination', 'snapshot_id',
'volume_id', 'volume_size', 'image_id', 'no_device',
'connection_info'])
bdm_db_only_fields = set(['id', 'instance_uuid'])
bdm_db_inherited_fields = set(['created_at', 'updated_at',
'deleted_at', 'deleted'])
bdm_new_non_api_fields = set(['volume_id', 'snapshot_id',
'image_id', 'connection_info'])
bdm_new_api_only_fields = set(['uuid'])
bdm_new_api_fields = ((bdm_new_fields - bdm_new_non_api_fields) |
bdm_new_api_only_fields)
class BlockDeviceDict(dict):
"""Represents a Block Device Mapping in Nova."""
_fields = bdm_new_fields
_db_only_fields = (bdm_db_only_fields |
bdm_db_inherited_fields)
_required_fields = set(['source_type'])
def __init__(self, bdm_dict=None, do_not_default=None):
super(BlockDeviceDict, self).__init__()
bdm_dict = bdm_dict or {}
do_not_default = do_not_default or set()
self._validate(bdm_dict)
# NOTE (ndipanov): Never default db fields
self.update(
dict((field, None)
for field in self._fields - do_not_default))
self.update(bdm_dict)
def _validate(self, bdm_dict):
"""Basic data format validations."""
dict_fields = set(key for key, _ in bdm_dict.iteritems())
# Check that there are no bogus fields
if not (dict_fields <=
(self._fields | self._db_only_fields)):
raise exception.InvalidBDMFormat(
details="Some fields are invalid.")
if bdm_dict.get('no_device'):
return
# Check that all required fields are there
if (self._required_fields and
not ((dict_fields & self._required_fields) ==
self._required_fields)):
raise exception.InvalidBDMFormat(
details="Some required fields are missing")
if 'delete_on_termination' in bdm_dict:
bdm_dict['delete_on_termination'] = strutils.bool_from_string(
bdm_dict['delete_on_termination'])
if bdm_dict.get('device_name') is not None:
validate_device_name(bdm_dict['device_name'])
validate_and_default_volume_size(bdm_dict)
if bdm_dict.get('boot_index'):
try:
bdm_dict['boot_index'] = int(bdm_dict['boot_index'])
except ValueError:
raise exception.InvalidBDMFormat(
details="Boot index is invalid.")
@classmethod
def from_legacy(cls, legacy_bdm):
copy_over_fields = bdm_legacy_fields & bdm_new_fields
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
# NOTE (ndipanov): These fields cannot be computed
# from legacy bdm, so do not default them
# to avoid overwriting meaningful values in the db
non_computable_fields = set(['boot_index', 'disk_bus',
'guest_format', 'device_type'])
new_bdm = dict((fld, val) for fld, val in legacy_bdm.iteritems()
if fld in copy_over_fields)
virt_name = legacy_bdm.get('virtual_name')
if is_swap_or_ephemeral(virt_name):
new_bdm['source_type'] = 'blank'
new_bdm['delete_on_termination'] = True
new_bdm['destination_type'] = 'local'
if virt_name == 'swap':
new_bdm['guest_format'] = 'swap'
else:
new_bdm['guest_format'] = CONF.default_ephemeral_format
elif legacy_bdm.get('snapshot_id'):
new_bdm['source_type'] = 'snapshot'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('volume_id'):
new_bdm['source_type'] = 'volume'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('no_device'):
# NOTE (ndipanov): Just keep the BDM for now,
pass
else:
raise exception.InvalidBDMFormat(
details="Unrecognized legacy format.")
return cls(new_bdm, non_computable_fields)
@classmethod
def from_api(cls, api_dict):
"""Transform the API format of data to the internally used one.
Only validate if the source_type field makes sense.
"""
if not api_dict.get('no_device'):
source_type = api_dict.get('source_type')
device_uuid = api_dict.get('uuid')
if source_type not in ('volume', 'image', 'snapshot', 'blank'):
raise exception.InvalidBDMFormat(
details="Invalid source_type field.")
elif source_type != 'blank':
if not device_uuid:
raise exception.InvalidBDMFormat(
details="Missing device UUID.")
api_dict[source_type + '_id'] = device_uuid
api_dict.pop('uuid', None)
return cls(api_dict)
def legacy(self):
copy_over_fields = bdm_legacy_fields - set(['virtual_name'])
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
legacy_block_device = dict((field, self.get(field))
for field in copy_over_fields if field in self)
source_type = self.get('source_type')
destination_type = self.get('destination_type')
no_device = self.get('no_device')
if source_type == 'blank':
if self['guest_format'] == 'swap':
legacy_block_device['virtual_name'] = 'swap'
else:
# NOTE (ndipanov): Always label as 0, it is up to
# the calling routine to re-enumerate them
legacy_block_device['virtual_name'] = 'ephemeral0'
elif source_type in ('volume', 'snapshot') or no_device:
legacy_block_device['virtual_name'] = None
elif source_type == 'image':
if destination_type != 'volume':
# NOTE(ndipanov): Image bdms with local destination
# have no meaning in the legacy format - raise
raise exception.InvalidBDMForLegacy()
legacy_block_device['virtual_name'] = None
return legacy_block_device
def is_safe_for_update(block_device_dict):
"""Determine if passed dict is a safe subset for update.
Safe subset in this case means a safe subset of both legacy
and new versions of data, that can be passed to an UPDATE query
without any transformation.
"""
fields = set(block_device_dict.keys())
return fields <= (bdm_new_fields |
bdm_db_inherited_fields |
bdm_db_only_fields)
def create_image_bdm(image_ref, boot_index=0):
"""Create a block device dict based on the image_ref.
This is useful in the API layer to keep the compatibility
with having an image_ref as a field in the instance requests
"""
return BlockDeviceDict(
{'source_type': 'image',
'image_id': image_ref,
'delete_on_termination': True,
'boot_index': boot_index,
'device_type': 'disk',
'destination_type': 'local'})
def legacy_mapping(block_device_mapping):
"""Transform a list of block devices of an instance back to the
legacy data format.
"""
legacy_block_device_mapping = []
for bdm in block_device_mapping:
try:
legacy_block_device = BlockDeviceDict(bdm).legacy()
except exception.InvalidBDMForLegacy:
continue
legacy_block_device_mapping.append(legacy_block_device)
# Re-enumerate the ephemeral devices
for i, dev in enumerate(dev for dev in legacy_block_device_mapping
if dev['virtual_name'] and
is_ephemeral(dev['virtual_name'])):
dev['virtual_name'] = dev['virtual_name'][:-1] + str(i)
return legacy_block_device_mapping
def from_legacy_mapping(legacy_block_device_mapping, image_uuid='',
root_device_name=None):
"""Transform a legacy list of block devices to the new data format."""
new_bdms = [BlockDeviceDict.from_legacy(legacy_bdm)
for legacy_bdm in legacy_block_device_mapping]
image_bdm = None
volume_backed = False
# Try to assign boot_device
if not root_device_name and not image_uuid:
# NOTE (ndipanov): If there is no root_device, pick the first non
# blank one.
non_blank = [bdm for bdm in new_bdms if bdm['source_type'] != 'blank']
if non_blank:
non_blank[0]['boot_index'] = 0
else:
for bdm in new_bdms:
if (bdm['source_type'] in ('volume', 'snapshot', 'image') and
root_device_name is not None and
(strip_dev(bdm.get('device_name')) ==
strip_dev(root_device_name))):
bdm['boot_index'] = 0
volume_backed = True
elif not bdm['no_device']:
bdm['boot_index'] = -1
else:
bdm['boot_index'] = None
if not volume_backed and image_uuid:
image_bdm = create_image_bdm(image_uuid, boot_index=0)
return ([image_bdm] if image_bdm else []) + new_bdms
def properties_root_device_name(properties):
"""get root device name from image meta data.
If it isn't specified, return None.
"""
root_device_name = None
# NOTE(yamahata): see image_service.s3.s3create()
for bdm in properties.get('mappings', []):
if bdm['virtual'] == 'root':
root_device_name = bdm['device']
# NOTE(yamahata): register_image's command line can override
# <machine>.manifest.xml
if 'root_device_name' in properties:
root_device_name = properties['root_device_name']
return root_device_name
def validate_device_name(value):
try:
# NOTE (ndipanov): Do not allow empty device names
# until assigning default values
# is supported by nova.compute
utils.check_string_length(value, 'Device name',
min_length=1, max_length=255)
except exception.InvalidInput as e:
raise exception.InvalidBDMFormat(
details="Device name empty or too long.")
if ' ' in value:
raise exception.InvalidBDMFormat(
details="Device name contains spaces.")
def validate_and_default_volume_size(bdm):
if bdm.get('volume_size'):
try:
bdm['volume_size'] = utils.validate_integer(
bdm['volume_size'], 'volume_size', min_value=0)
except exception.InvalidInput as e:
raise exception.InvalidBDMFormat(
details="Invalid volume_size.")
_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$')
def is_ephemeral(device_name):
return _ephemeral.match(device_name) is not None
def ephemeral_num(ephemeral_name):
assert is_ephemeral(ephemeral_name)
return int(_ephemeral.sub('\\1', ephemeral_name))
def is_swap_or_ephemeral(device_name):
return (device_name and
(device_name == 'swap' or is_ephemeral(device_name)))
def new_format_is_swap(bdm):
if (bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local' and
bdm.get('guest_format') == 'swap'):
return True
return False
def new_format_is_ephemeral(bdm):
if (bdm.get('source_type') == 'blank' and not
new_format_is_swap(bdm)):
return True
return False
def get_root_bdm(bdms):
try:
return (bdm for bdm in bdms if bdm.get('boot_index', -1) == 0).next()
except StopIteration:
return None
def mappings_prepend_dev(mappings):
"""Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type."""
for m in mappings:
virtual = m['virtual']
if (is_swap_or_ephemeral(virtual) and
(not m['device'].startswith('/'))):
m['device'] = '/dev/' + m['device']
return mappings
_dev = re.compile('^/dev/')
def strip_dev(device_name):
"""remove leading '/dev/'."""
return _dev.sub('', device_name) if device_name else device_name
def prepend_dev(device_name):
"""Make sure there is a leading '/dev/'."""
return device_name and '/dev/' + strip_dev(device_name)
_pref = re.compile('^((x?v|s)d)')
def strip_prefix(device_name):
"""remove both leading /dev/ and xvd or sd or vd."""
device_name = strip_dev(device_name)
return _pref.sub('', device_name)
def instance_block_mapping(instance, bdms):
root_device_name = instance['root_device_name']
# NOTE(clayg): remove this when xenapi is setting default_root_device
if root_device_name is None:
if driver.compute_driver_matches('xenapi.XenAPIDriver'):
root_device_name = '/dev/xvda'
else:
return _DEFAULT_MAPPINGS
mappings = {}
mappings['ami'] = strip_dev(root_device_name)
mappings['root'] = root_device_name
default_ephemeral_device = instance.get('default_ephemeral_device')
if default_ephemeral_device:
mappings['ephemeral0'] = default_ephemeral_device
default_swap_device = instance.get('default_swap_device')
if default_swap_device:
mappings['swap'] = default_swap_device
ebs_devices = []
# 'ephemeralN', 'swap' and ebs
for bdm in bdms:
if bdm['no_device']:
continue
# ebs volume case
if (bdm['volume_id'] or bdm['snapshot_id']):
ebs_devices.append(bdm['device_name'])
continue
virtual_name = bdm['virtual_name']
if not virtual_name:
continue
if is_swap_or_ephemeral(virtual_name):
mappings[virtual_name] = bdm['device_name']
# NOTE(yamahata): I'm not sure how ebs device should be numbered.
# Right now sort by device name for deterministic
# result.
if ebs_devices:
nebs = 0
ebs_devices.sort()
for ebs in ebs_devices:
mappings['ebs%d' % nebs] = ebs
nebs += 1
return mappings
def match_device(device):
"""Matches device name and returns prefix, suffix."""
match = re.match("(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$", device)
if not match:
return None
return match.groups()
def volume_in_mapping(mount_device, block_device_info):
block_device_list = [strip_dev(vol['mount_device'])
for vol in
driver.block_device_info_get_mapping(
block_device_info)]
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
block_device_list.append(strip_dev(swap['device_name']))
block_device_list += [strip_dev(ephemeral['device_name'])
for ephemeral in
driver.block_device_info_get_ephemerals(
block_device_info)]
LOG.debug(_("block_device_list %s"), block_device_list)
return strip_dev(mount_device) in block_device_list

38
gantt/config.py Normal file
View File

@ -0,0 +1,38 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import rpc
from nova import paths
from gantt import version
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('$sqlite_db')
def parse_args(argv, default_config_files=None):
db_session.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
sqlite_db='nova.sqlite')
rpc.set_defaults(control_exchange='nova')
cfg.CONF(argv[1:],
project='gantt',
version=version.version_string(),
default_config_files=default_config_files)

233
gantt/context.py Normal file
View File

@ -0,0 +1,233 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of nova."""
import copy
import uuid
import six
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from gantt import policy
LOG = logging.getLogger(__name__)
def generate_request_id():
return 'req-' + str(uuid.uuid4())
class RequestContext(object):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
quota_class=None, user_name=None, project_name=None,
service_catalog=None, instance_lock_checked=False, **kwargs):
"""
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
indicates deleted records are visible, 'only' indicates that
*only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
if kwargs:
LOG.warn(_('Arguments dropped when creating context: %s') %
str(kwargs))
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, six.string_types):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
self.auth_token = auth_token
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in ('volume',)]
else:
# if list is empty or none
self.service_catalog = []
self.instance_lock_checked = instance_lock_checked
# NOTE(markmc): this attribute is currently only used by the
# rs_limits turnstile pre-processor.
# See https://lists.launchpad.net/openstack/msg12200.html
self.quota_class = quota_class
self.user_name = user_name
self.project_name = project_name
self.is_admin = is_admin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
if overwrite or not hasattr(local.store, 'context'):
self.update_store()
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def update_store(self):
local.store.context = self
def to_dict(self):
return {'user_id': self.user_id,
'project_id': self.project_id,
'is_admin': self.is_admin,
'read_deleted': self.read_deleted,
'roles': self.roles,
'remote_address': self.remote_address,
'timestamp': timeutils.strtime(self.timestamp),
'request_id': self.request_id,
'auth_token': self.auth_token,
'quota_class': self.quota_class,
'user_name': self.user_name,
'service_catalog': self.service_catalog,
'project_name': self.project_name,
'instance_lock_checked': self.instance_lock_checked,
'tenant': self.tenant,
'user': self.user}
@classmethod
def from_dict(cls, values):
values.pop('user', None)
values.pop('tenant', None)
return cls(**values)
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
# NOTE(sirp): the openstack/common version of RequestContext uses
# tenant/user whereas the Nova version uses project_id/user_id. We need
# this shim in order to use context-aware code from openstack/common, like
# logging, until we make the switch to using openstack/common's version of
# RequestContext.
@property
def tenant(self):
return self.project_id
@property
def user(self):
return self.user_id
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def require_admin_context(ctxt):
"""Raise exception.AdminRequired() if context is an admin context."""
if not ctxt.is_admin:
raise exception.AdminRequired()
def require_context(ctxt):
"""Raise exception.NotAuthorized() if context is not a user or an
admin context.
"""
if not ctxt.is_admin and not is_user_context(ctxt):
raise exception.NotAuthorized()
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.NotAuthorized()
elif context.quota_class != class_name:
raise exception.NotAuthorized()

File diff suppressed because it is too large Load Diff

View File

@ -1,116 +0,0 @@
# Copyright (c) 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Generic Loadable class support.
Meant to be used by such things as scheduler filters and weights where we
want to load modules from certain directories and find certain types of
classes within those modules. Note that this is quite different than
generic plugins and the pluginmanager code that exists elsewhere.
Usage:
Create a directory with an __init__.py with code such as:
class SomeLoadableClass(object):
pass
class MyLoader(nova.loadables.BaseLoader)
def __init__(self):
super(MyLoader, self).__init__(SomeLoadableClass)
If you create modules in the same directory and subclass SomeLoadableClass
within them, MyLoader().get_all_classes() will return a list
of such classes.
"""
import inspect
import os
import sys
from nova import exception
from nova.openstack.common import importutils
class BaseLoader(object):
def __init__(self, loadable_cls_type):
mod = sys.modules[self.__class__.__module__]
self.path = os.path.abspath(mod.__path__[0])
self.package = mod.__package__
self.loadable_cls_type = loadable_cls_type
def _is_correct_class(self, obj):
"""Return whether an object is a class of the correct type and
is not prefixed with an underscore.
"""
return (inspect.isclass(obj) and
(not obj.__name__.startswith('_')) and
issubclass(obj, self.loadable_cls_type))
def _get_classes_from_module(self, module_name):
"""Get the classes from a module that match the type we want."""
classes = []
module = importutils.import_module(module_name)
for obj_name in dir(module):
# Skip objects that are meant to be private.
if obj_name.startswith('_'):
continue
itm = getattr(module, obj_name)
if self._is_correct_class(itm):
classes.append(itm)
return classes
def get_all_classes(self):
"""Get the classes of the type we want from all modules found
in the directory that defines this class.
"""
classes = []
for dirpath, dirnames, filenames in os.walk(self.path):
relpath = os.path.relpath(dirpath, self.path)
if relpath == '.':
relpkg = ''
else:
relpkg = '.%s' % '.'.join(relpath.split(os.sep))
for fname in filenames:
root, ext = os.path.splitext(fname)
if ext != '.py' or root == '__init__':
continue
module_name = "%s%s.%s" % (self.package, relpkg, root)
mod_classes = self._get_classes_from_module(module_name)
classes.extend(mod_classes)
return classes
def get_matching_classes(self, loadable_class_names):
"""Get loadable classes from a list of names. Each name can be
a full module path or the full path to a method that returns
classes to use. The latter behavior is useful to specify a method
that returns a list of classes to use in a default case.
"""
classes = []
for cls_name in loadable_class_names:
obj = importutils.import_class(cls_name)
if self._is_correct_class(obj):
classes.append(obj)
elif inspect.isfunction(obj):
# Get list of classes from a function
for cls in obj():
classes.append(cls)
else:
error_str = 'Not a class of the correct type'
raise exception.ClassNotFound(class_name=cls_name,
exception=error_str)
return classes

View File

@ -1,64 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
from oslo.config import cfg
from nova import utils
CONF = cfg.CONF
def _get_my_ip():
"""
Returns the actual ip of the local machine.
This code figures out what source address would be used if some traffic
were to be sent out to some well known address on the Internet. In this
case, a Google DNS server is used, but the specific address does not
matter much. No traffic is actually sent.
"""
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return utils.get_my_ipv4_address()
netconf_opts = [
cfg.StrOpt('my_ip',
default=_get_my_ip(),
help='ip address of this host'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address. '
'However, the node name must be valid within '
'an AMQP key, and if using ZeroMQ, a valid '
'hostname, FQDN, or IP address'),
cfg.BoolOpt('use_ipv6',
default=False,
help='use ipv6'),
]
CONF.register_opts(netconf_opts)

View File

@ -1,422 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functionality related to notifications common to multiple layers of
the system.
"""
import datetime
from oslo.config import cfg
from nova.compute import flavors
import nova.context
from nova import db
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova import notifier as notify
from nova.openstack.common import context as common_context
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log
from nova.openstack.common import timeutils
from nova import utils
LOG = log.getLogger(__name__)
notify_opts = [
cfg.StrOpt('notify_on_state_change',
help='If set, send compute.instance.update notifications on instance '
'state changes. Valid values are None for no notifications, '
'"vm_state" for notifications on VM state changes, or '
'"vm_and_task_state" for notifications on VM and task state '
'changes.'),
cfg.BoolOpt('notify_api_faults', default=False,
help='If set, send api.fault notifications on caught exceptions '
'in the API service.'),
]
CONF = cfg.CONF
CONF.register_opts(notify_opts)
CONF.import_opt('default_notification_level',
'nova.openstack.common.notifier.api')
CONF.import_opt('default_publisher_id',
'nova.openstack.common.notifier.api')
def notify_decorator(name, fn):
"""Decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param function: - object of the function
:returns: function -- decorated function
"""
def wrapped_func(*args, **kwarg):
body = {}
body['args'] = []
body['kwarg'] = {}
for arg in args:
body['args'].append(arg)
for key in kwarg:
body['kwarg'][key] = kwarg[key]
ctxt = common_context.get_context_from_function_and_args(
fn, args, kwarg)
notifier = notify.get_notifier(publisher_id=(CONF.default_publisher_id
or CONF.host))
method = notifier.getattr(CONF.default_notification_level.lower(),
'info')
method(ctxt, name, body)
return fn(*args, **kwarg)
return wrapped_func
def send_api_fault(url, status, exception):
"""Send an api.fault notification."""
if not CONF.notify_api_faults:
return
payload = {'url': url, 'exception': str(exception), 'status': status}
notify.get_notifier('api').error(None, 'api.fault', payload)
def send_update(context, old_instance, new_instance, service=None, host=None):
"""Send compute.instance.update notification to report any changes occurred
in that instance
"""
if not CONF.notify_on_state_change:
# skip all this if updates are disabled
return
update_with_state_change = False
old_vm_state = old_instance["vm_state"]
new_vm_state = new_instance["vm_state"]
old_task_state = old_instance["task_state"]
new_task_state = new_instance["task_state"]
# we should check if we need to send a state change or a regular
# notification
if old_vm_state != new_vm_state:
# yes, the vm state is changing:
update_with_state_change = True
elif CONF.notify_on_state_change:
if (CONF.notify_on_state_change.lower() == "vm_and_task_state" and
old_task_state != new_task_state):
# yes, the task state is changing:
update_with_state_change = True
if update_with_state_change:
# send a notification with state changes
# value of verify_states need not be True as the check for states is
# already done here
send_update_with_states(context, new_instance, old_vm_state,
new_vm_state, old_task_state, new_task_state, service, host)
else:
try:
old_display_name = None
if new_instance["display_name"] != old_instance["display_name"]:
old_display_name = old_instance["display_name"]
_send_instance_update_notification(context, new_instance,
service=service, host=host,
old_display_name=old_display_name)
except Exception:
LOG.exception(_("Failed to send state update notification"),
instance=new_instance)
def send_update_with_states(context, instance, old_vm_state, new_vm_state,
old_task_state, new_task_state, service="compute", host=None,
verify_states=False):
"""Send compute.instance.update notification to report changes if there
are any, in the instance
"""
if not CONF.notify_on_state_change:
# skip all this if updates are disabled
return
fire_update = True
# send update notification by default
if verify_states:
# check whether we need to send notification related to state changes
fire_update = False
# do not send notification if the conditions for vm and(or) task state
# are not satisfied
if old_vm_state != new_vm_state:
# yes, the vm state is changing:
fire_update = True
elif CONF.notify_on_state_change:
if (CONF.notify_on_state_change.lower() == "vm_and_task_state" and
old_task_state != new_task_state):
# yes, the task state is changing:
fire_update = True
if fire_update:
# send either a state change or a regular notification
try:
_send_instance_update_notification(context, instance,
old_vm_state=old_vm_state, old_task_state=old_task_state,
new_vm_state=new_vm_state, new_task_state=new_task_state,
service=service, host=host)
except Exception:
LOG.exception(_("Failed to send state update notification"),
instance=instance)
def _send_instance_update_notification(context, instance, old_vm_state=None,
old_task_state=None, new_vm_state=None, new_task_state=None,
service="compute", host=None, old_display_name=None):
"""Send 'compute.instance.update' notification to inform observers
about instance state changes.
"""
payload = info_from_instance(context, instance, None, None)
if not new_vm_state:
new_vm_state = instance["vm_state"]
if not new_task_state:
new_task_state = instance["task_state"]
states_payload = {
"old_state": old_vm_state,
"state": new_vm_state,
"old_task_state": old_task_state,
"new_task_state": new_task_state,
}
payload.update(states_payload)
# add audit fields:
(audit_start, audit_end) = audit_period_bounds(current_period=True)
payload["audit_period_beginning"] = audit_start
payload["audit_period_ending"] = audit_end
# add bw usage info:
bw = bandwidth_usage(instance, audit_start)
payload["bandwidth"] = bw
# add old display name if it is changed
if old_display_name:
payload["old_display_name"] = old_display_name
notify.get_notifier(service, host).info(context,
'compute.instance.update', payload)
def audit_period_bounds(current_period=False):
"""Get the start and end of the relevant audit usage period
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
"""
begin, end = utils.last_completed_audit_period()
if current_period:
audit_start = end
audit_end = timeutils.utcnow()
else:
audit_start = begin
audit_end = end
return (audit_start, audit_end)
def bandwidth_usage(instance_ref, audit_start,
ignore_missing_network_data=True):
"""Get bandwidth usage information for the instance for the
specified audit period.
"""
admin_context = nova.context.get_admin_context(read_deleted='yes')
def _get_nwinfo_old_skool():
"""Support for getting network info without objects."""
if (instance_ref.get('info_cache') and
instance_ref['info_cache'].get('network_info') is not None):
cached_info = instance_ref['info_cache']['network_info']
if isinstance(cached_info, network_model.NetworkInfo):
return cached_info
return network_model.NetworkInfo.hydrate(cached_info)
try:
return network.API().get_instance_nw_info(admin_context,
instance_ref)
except Exception:
try:
with excutils.save_and_reraise_exception():
LOG.exception(_('Failed to get nw_info'),
instance=instance_ref)
except Exception:
if ignore_missing_network_data:
return
raise
# FIXME(comstud): Temporary as we transition to objects. This import
# is here to avoid circular imports.
from nova.objects import instance as instance_obj
if isinstance(instance_ref, instance_obj.Instance):
nw_info = instance_ref.info_cache.network_info
if nw_info is None:
nw_info = network_model.NetworkInfo()
else:
nw_info = _get_nwinfo_old_skool()
macs = [vif['address'] for vif in nw_info]
uuids = [instance_ref["uuid"]]
bw_usages = db.bw_usage_get_by_uuids(admin_context, uuids, audit_start)
bw_usages = [b for b in bw_usages if b.mac in macs]
bw = {}
for b in bw_usages:
label = 'net-name-not-found-%s' % b['mac']
for vif in nw_info:
if vif['address'] == b['mac']:
label = vif['network']['label']
break
bw[label] = dict(bw_in=b.bw_in, bw_out=b.bw_out)
return bw
def image_meta(system_metadata):
"""Format image metadata for use in notifications from the instance
system metadata.
"""
image_meta = {}
for md_key, md_value in system_metadata.iteritems():
if md_key.startswith('image_'):
image_meta[md_key[6:]] = md_value
return image_meta
def info_from_instance(context, instance_ref, network_info,
system_metadata, **kw):
"""Get detailed instance information for an instance which is common to all
notifications.
:param network_info: network_info provided if not None
:param system_metadata: system_metadata DB entries for the instance,
if not None. *NOTE*: Currently unused here in trunk, but needed for
potential custom modifications.
"""
def null_safe_str(s):
return str(s) if s else ''
def null_safe_isotime(s):
if isinstance(s, datetime.datetime):
return timeutils.strtime(s)
else:
return str(s) if s else ''
image_ref_url = glance.generate_image_url(instance_ref['image_ref'])
instance_type = flavors.extract_flavor(instance_ref)
instance_type_name = instance_type.get('name', '')
instance_flavorid = instance_type.get('flavorid', '')
if system_metadata is None:
system_metadata = utils.instance_sys_meta(instance_ref)
instance_info = dict(
# Owner properties
tenant_id=instance_ref['project_id'],
user_id=instance_ref['user_id'],
# Identity properties
instance_id=instance_ref['uuid'],
display_name=instance_ref['display_name'],
reservation_id=instance_ref['reservation_id'],
hostname=instance_ref['hostname'],
# Type properties
instance_type=instance_type_name,
instance_type_id=instance_ref['instance_type_id'],
instance_flavor_id=instance_flavorid,
architecture=instance_ref['architecture'],
# Capacity properties
memory_mb=instance_ref['memory_mb'],
disk_gb=instance_ref['root_gb'] + instance_ref['ephemeral_gb'],
vcpus=instance_ref['vcpus'],
# Note(dhellmann): This makes the disk_gb value redundant, but
# we are keeping it for backwards-compatibility with existing
# users of notifications.
root_gb=instance_ref['root_gb'],
ephemeral_gb=instance_ref['ephemeral_gb'],
# Location properties
host=instance_ref['host'],
node=instance_ref['node'],
availability_zone=instance_ref['availability_zone'],
# Date properties
created_at=str(instance_ref['created_at']),
# Terminated and Deleted are slightly different (although being
# terminated and not deleted is a transient state), so include
# both and let the recipient decide which they want to use.
terminated_at=null_safe_isotime(instance_ref.get('terminated_at')),
deleted_at=null_safe_isotime(instance_ref.get('deleted_at')),
launched_at=null_safe_isotime(instance_ref.get('launched_at')),
# Image properties
image_ref_url=image_ref_url,
os_type=instance_ref['os_type'],
kernel_id=instance_ref['kernel_id'],
ramdisk_id=instance_ref['ramdisk_id'],
# Status properties
state=instance_ref['vm_state'],
state_description=null_safe_str(instance_ref.get('task_state')),
# accessIPs
access_ip_v4=instance_ref['access_ip_v4'],
access_ip_v6=instance_ref['access_ip_v6'],
)
if network_info is not None:
fixed_ips = []
for vif in network_info:
for ip in vif.fixed_ips():
ip["label"] = vif["network"]["label"]
fixed_ips.append(ip)
instance_info['fixed_ips'] = fixed_ips
# add image metadata
image_meta_props = image_meta(system_metadata)
instance_info["image_meta"] = image_meta_props
# add instance metadata
instance_info['metadata'] = instance_ref['metadata']
instance_info.update(kw)
return instance_info

View File

@ -1,73 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A temporary helper which emulates oslo.messaging.Notifier.
This helper method allows us to do the tedious porting to the new Notifier API
as a standalone commit so that the commit which switches us to oslo.messaging
is smaller and easier to review. This file will be removed as part of that
commit.
"""
from oslo.config import cfg
from nova.openstack.common.notifier import api as notifier_api
CONF = cfg.CONF
class Notifier(object):
def __init__(self, publisher_id):
super(Notifier, self).__init__()
self.publisher_id = publisher_id
_marker = object()
def prepare(self, publisher_id=_marker):
ret = self.__class__(self.publisher_id)
if publisher_id is not self._marker:
ret.publisher_id = publisher_id
return ret
def _notify(self, ctxt, event_type, payload, priority):
notifier_api.notify(ctxt,
self.publisher_id,
event_type,
priority,
payload)
def debug(self, ctxt, event_type, payload):
self._notify(ctxt, event_type, payload, 'DEBUG')
def info(self, ctxt, event_type, payload):
self._notify(ctxt, event_type, payload, 'INFO')
def warn(self, ctxt, event_type, payload):
self._notify(ctxt, event_type, payload, 'WARN')
def error(self, ctxt, event_type, payload):
self._notify(ctxt, event_type, payload, 'ERROR')
def critical(self, ctxt, event_type, payload):
self._notify(ctxt, event_type, payload, 'CRITICAL')
def get_notifier(service=None, host=None, publisher_id=None):
if not publisher_id:
publisher_id = "%s.%s" % (service, host or CONF.host)
return Notifier(publisher_id)

View File

@ -1,13 +0,0 @@
openstack-common
----------------
A number of modules from openstack-common are imported into this project.
These modules are "incubating" in openstack-common and are kept in sync
with the help of openstack-common's update.py script. See:
http://wiki.openstack.org/CommonLibrary#Incubation
The copy of the code should never be directly modified here. Please
always update openstack-common first and then run the script to copy
the changes across.

View File

@ -1,63 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
class MissingArgs(Exception):
def __init__(self, missing):
self.missing = missing
def __str__(self):
if len(self.missing) == 1:
return "An argument is missing"
else:
return ("%(num)d arguments are missing" %
dict(num=len(self.missing)))
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: An argument is missing
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: 2 arguments are missing
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, 'im_self', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)

View File

@ -1,256 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 SINA Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
from nova.openstack.common import gettextutils
from nova.openstack.common import importutils
gettextutils.install('nova')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def generate(srcfiles):
mods_by_pkg = dict()
for filepath in srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
pkg_names.sort()
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
ext_names.sort()
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group, opts in opts_by_group.items():
print_group_opts(group, opts)
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except ImportError as ie:
sys.stderr.write("%s\n" % str(ie))
return None
except Exception:
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for key, value in group._opts.items():
if value['opt'] == opt:
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for key, value in cfg.CONF.items():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value == socket.gethostname() and 'host' in name:
return 'nova'
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, basestring))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
if len(sys.argv) < 2:
print("usage: %s [srcfile]...\n" % sys.argv[0])
sys.exit(0)
generate(sys.argv[1:])
if __name__ == '__main__':
main()

View File

@ -1,83 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple class that stores security context information in the web request.
Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import itertools
from nova.openstack.common import uuidutils
def generate_request_id():
return 'req-%s' % uuidutils.generate_uuid()
class RequestContext(object):
"""Helper class to represent useful information about a request context.
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id}
def get_admin_context(show_deleted=False):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, RequestContext):
return arg
return None

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudscaling Group, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,106 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Multiple DB API backend support.
Supported configuration options:
The following two parameters are in the 'database' group:
`backend`: DB backend name or full module path to DB backend module.
`use_tpool`: Enable thread pooling of DB API calls.
A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB
API methods.
*NOTE*: There are bugs in eventlet when using tpool combined with
threading locks. The python logging module happens to use such locks. To
work around this issue, be sure to specify thread=False with
eventlet.monkey_patch().
A bug for eventlet has been filed here:
https://bitbucket.org/eventlet/eventlet/issue/137/
"""
import functools
from oslo.config import cfg
from nova.openstack.common import importutils
from nova.openstack.common import lockutils
db_opts = [
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
cfg.BoolOpt('use_tpool',
default=False,
deprecated_name='dbapi_use_tpool',
deprecated_group='DEFAULT',
help='Enable the experimental use of thread pooling for '
'all DB API calls')
]
CONF = cfg.CONF
CONF.register_opts(db_opts, 'database')
class DBAPI(object):
def __init__(self, backend_mapping=None):
if backend_mapping is None:
backend_mapping = {}
self.__backend = None
self.__backend_mapping = backend_mapping
@lockutils.synchronized('dbapi_backend', 'nova-')
def __get_backend(self):
"""Get the actual backend. May be a module or an instance of
a class. Doesn't matter to us. We do this synchronized as it's
possible multiple greenthreads started very quickly trying to do
DB calls and eventlet can switch threads before self.__backend gets
assigned.
"""
if self.__backend:
# Another thread assigned it
return self.__backend
backend_name = CONF.database.backend
self.__use_tpool = CONF.database.use_tpool
if self.__use_tpool:
from eventlet import tpool
self.__tpool = tpool
# Import the untranslated name if we don't have a
# mapping.
backend_path = self.__backend_mapping.get(backend_name,
backend_name)
backend_mod = importutils.import_module(backend_path)
self.__backend = backend_mod.get_backend()
return self.__backend
def __getattr__(self, key):
backend = self.__backend or self.__get_backend()
attr = getattr(backend, key)
if not self.__use_tpool or not hasattr(attr, '__call__'):
return attr
def tpool_wrapper(*args, **kwargs):
return self.__tpool.execute(attr, *args, **kwargs)
functools.update_wrapper(tpool_wrapper, attr)
return tpool_wrapper

View File

@ -1,45 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""DB related custom exceptions."""
from nova.openstack.common.gettextutils import _ # noqa
class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
class DBDuplicateEntry(DBError):
"""Wraps an implementation specific exception."""
def __init__(self, columns=[], inner_exception=None):
self.columns = columns
super(DBDuplicateEntry, self).__init__(inner_exception)
class DBDeadlock(DBError):
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)
class DBInvalidUnicodeParameter(Exception):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")

View File

@ -1,108 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models.
"""
import six
from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
from nova.openstack.common.db.sqlalchemy import session as sa
from nova.openstack.common import timeutils
class ModelBase(object):
"""Base class for models."""
__table_initialized__ = False
def save(self, session=None):
"""Save this object."""
if not session:
session = sa.get_session()
# NOTE(boris-42): This part of code should be look like:
# sesssion.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicity.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
columns = dict(object_mapper(self).columns).keys()
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
if hasattr(self, '_extra_keys'):
columns.extend(self._extra_keys())
self._i = iter(columns)
return self
def next(self):
n = six.advance_iterator(self._i)
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in six.iteritems(values):
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict(self)
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
if not k[0] == '_'])
local.update(joined)
return local.iteritems()
class TimestampMixin(object):
created_at = Column(DateTime, default=timeutils.utcnow)
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
def soft_delete(self, session=None):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)

View File

@ -1,785 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Initializing:
* Call set_defaults with the minimal of the following kwargs:
sql_connection, sqlite_db
Example:
session.set_defaults(
sql_connection="sqlite:///var/lib/nova/sqlite.db",
sqlite_db="/var/lib/nova/sqlite.db")
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
model_query() will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
Note: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and reservation_rollback().
Examples:
def get_foo(context, foo):
return model_query(context, models.Foo).\
filter_by(foo=foo).\
first()
def update_foo(context, id, newfoo):
model_query(context, models.Foo).\
filter_by(id=id).\
update({'foo': newfoo})
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keeping all the reads and writes within
the context managed by a single session. In this way, the session's __exit__
handler will take care of calling flush() and commit() for you.
If using this approach, you should not explicitly call flush() or commit().
Any error within the context of the session will cause the session to emit
a ROLLBACK. If the connection is dropped before this is possible, the
database will implicitly rollback the transaction.
Note: statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call model.save()
def create_many_foo(context, foos):
session = get_session()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = get_session()
with session.begin():
foo_ref = model_query(context, models.Foo, session).\
filter_by(id=foo_id).\
first()
model_query(context, models.Bar, session).\
filter_by(id=foo_ref['bar_id']).\
update({'bar': newbar})
Note: update_bar is a trivially simple example of using "with session.begin".
Whereas create_many_foo is a good example of when a transaction is needed,
it is always best to use as few queries as possible. The two queries in
update_bar can be better expressed using a single query which avoids
the need for an explicit transaction. It can be expressed like so:
def update_bar(context, foo_id, newbar):
subq = model_query(context, models.Foo.id).\
filter_by(id=foo_id).\
limit(1).\
subquery()
model_query(context, models.Bar).\
filter_by(id=subq.as_scalar()).\
update({'bar': newbar})
For reference, this emits approximagely the following SQL statement:
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call session.begin() on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
def myfunc(foo):
session = get_session()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = get_session()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your "with session.begin()" block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid "with_lockmode('UPDATE')" when possible.
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the SoftDeleteMixin must be added
to your model class. For example:
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
model.soft_delete() and query.soft_delete().
model.soft_delete() method works with single already fetched entry.
query.soft_delete() makes only one db request for all entries that correspond
to query.
* In almost all cases you should use query.soft_delete(). Some examples:
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = get_session()
with session.begin(subtransactions=True):
count = model_query(BarModel).\
find(some_condition).\
soft_delete(synchronize_session=True)
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where model.soft_delete() is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
def soft_delete_bar_model():
session = get_session()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use query.soft_delete() method:
def soft_delete_multi_models():
session = get_session()
with session.begin():
query = model_query(BarModel, session=session).\
find(some_condition)
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using model.soft_delete(), as in the following
example, is very inefficient.
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import os.path
import re
import time
from eventlet import greenthread
from oslo.config import cfg
import six
from sqlalchemy import exc as sqla_exc
import sqlalchemy.interfaces
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from nova.openstack.common.db import exception
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
sqlite_db_opts = [
cfg.StrOpt('sqlite_db',
default='nova.sqlite',
help='the filename to use with sqlite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If true, use synchronous mode for sqlite'),
]
database_opts = [
cfg.StrOpt('connection',
default='sqlite:///' +
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database',
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE')],
secret=True),
cfg.StrOpt('slave_connection',
default='',
help='The SQLAlchemy connection string used to connect to the '
'slave database',
secret=True),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE')],
help='timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
]
CONF = cfg.CONF
CONF.register_opts(sqlite_db_opts)
CONF.register_opts(database_opts, 'database')
LOG = logging.getLogger(__name__)
_ENGINE = None
_MAKER = None
_SLAVE_ENGINE = None
_SLAVE_MAKER = None
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection)
cfg.set_defaults(sqlite_db_opts,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def cleanup():
global _ENGINE, _MAKER
global _SLAVE_ENGINE, _SLAVE_MAKER
if _MAKER:
_MAKER.close_all()
_MAKER = None
if _ENGINE:
_ENGINE.dispose()
_ENGINE = None
if _SLAVE_MAKER:
_SLAVE_MAKER.close_all()
_SLAVE_MAKER = None
if _SLAVE_ENGINE:
_SLAVE_ENGINE.dispose()
_SLAVE_ENGINE = None
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
def get_session(autocommit=True, expire_on_commit=False,
sqlite_fk=False, slave_session=False):
"""Return a SQLAlchemy session."""
global _MAKER
global _SLAVE_MAKER
maker = _MAKER
if slave_session:
maker = _SLAVE_MAKER
if maker is None:
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session)
maker = get_maker(engine, autocommit, expire_on_commit)
if slave_session:
_SLAVE_MAKER = maker
else:
_MAKER = maker
session = maker()
return session
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
_DUP_KEY_RE_DB = {
"sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
"postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
"mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
}
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""Raise exception if two entries are duplicated.
In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""
def get_columns_from_uniq_cons_or_name(columns):
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["mysql", "sqlite", "postgresql"]:
return
m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
if not m:
return
columns = m.group(1)
if engine_name == "sqlite":
columns = columns.strip().split(", ")
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}
def _raise_if_deadlock_error(operational_error, engine_name):
"""Raise exception on deadlock condition.
Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)
def _wrap_db_error(f):
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.OperationalError as e:
_raise_if_deadlock_error(e, get_engine().name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, get_engine().name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_('DB exception wrapped.'))
raise exception.DBError(e)
_wrap.func_name = f.func_name
return _wrap
def get_engine(sqlite_fk=False, slave_engine=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
global _SLAVE_ENGINE
engine = _ENGINE
db_uri = CONF.database.connection
if slave_engine:
engine = _SLAVE_ENGINE
db_uri = CONF.database.slave_connection
if engine is None:
engine = create_engine(db_uri,
sqlite_fk=sqlite_fk)
if slave_engine:
_SLAVE_ENGINE = engine
else:
_ENGINE = engine
return engine
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def _greenthread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
greenthread.sleep(0)
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL connections checked out of the pool are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
try:
dbapi_conn.cursor().execute('select 1')
except dbapi_conn.OperationalError as ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
LOG.warn(_('Got mysql server has gone away: %s'), ex)
raise sqla_exc.DisconnectionError("Database server went away")
else:
raise
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
# For the db2, the error code is -30081 since the db2 is still not ready
conn_err_codes = ('2002', '2003', '2006', '-30081')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def create_engine(sql_connection, sqlite_fk=False):
"""Return a new SQLAlchemy engine."""
# NOTE(geekinutah): At this point we could be connecting to the normal
# db handle or the slave db handle. Things like
# _wrap_db_error aren't going to work well if their
# backends don't match. Let's check.
_assert_matching_drivers()
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": CONF.database.idle_timeout,
"echo": False,
'convert_unicode': True,
}
# Map our SQL debug level to SQLAlchemy's options
if CONF.database.connection_debug >= 100:
engine_args['echo'] = 'debug'
elif CONF.database.connection_debug >= 50:
engine_args['echo'] = True
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
if CONF.database.connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
if CONF.database.max_pool_size is not None:
engine_args['pool_size'] = CONF.database.max_pool_size
if CONF.database.max_overflow is not None:
engine_args['max_overflow'] = CONF.database.max_overflow
if CONF.database.pool_timeout is not None:
engine_args['pool_timeout'] = CONF.database.pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _greenthread_yield)
if 'mysql' in connection_dict.drivername:
sqlalchemy.event.listen(engine, 'checkout', _ping_listener)
elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if (CONF.database.connection_trace and
engine.dialect.dbapi.__name__ == 'MySQLdb'):
_patch_mysqldb_with_stacktrace_comments()
try:
engine.connect()
except sqla_exc.OperationalError as e:
if not _is_db_connection_error(e.args[0]):
raise
remaining = CONF.database.max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
LOG.warn(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(CONF.database.retry_interval)
try:
engine.connect()
break
except sqla_exc.OperationalError as e:
if (remaining != 'infinite' and remaining == 0) or \
not _is_db_connection_error(e.args[0]):
raise
return engine
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
@_wrap_db_error
def query(self, *args, **kwargs):
return super(Session, self).query(*args, **kwargs)
@_wrap_db_error
def flush(self, *args, **kwargs):
return super(Session, self).flush(*args, **kwargs)
@_wrap_db_error
def execute(self, *args, **kwargs):
return super(Session, self).execute(*args, **kwargs)
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query)
def _patch_mysqldb_with_stacktrace_comments():
"""Adds current stack trace as a comment in queries.
Patches MySQLdb.cursors.BaseCursor._do_query.
"""
import MySQLdb.cursors
import traceback
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
def _do_query(self, q):
stack = ''
for file, line, method, function in traceback.extract_stack():
# exclude various common things from trace
if file.endswith('session.py') and method == '_do_query':
continue
if file.endswith('api.py') and method == 'wrapper':
continue
if file.endswith('utils.py') and method == '_inner':
continue
if file.endswith('exception.py') and method == '_wrap':
continue
# db/api is just a wrapper around db/sqlalchemy/api
if file.endswith('db/api.py'):
continue
# only trace inside nova
index = file.rfind('nova')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
% (file[index:], line, method, function)
# strip trailing " | " from stack
if stack:
stack = stack[:-3]
qq = "%s /* %s */" % (q, stack)
else:
qq = q
old_mysql_do_query(self, qq)
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
def _assert_matching_drivers():
"""Make sure slave handle and normal handle have the same driver."""
# NOTE(geekinutah): There's no use case for writing to one backend and
# reading from another. Who knows what the future holds?
if CONF.database.slave_connection == '':
return
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
assert normal.drivername == slave.drivername

View File

@ -1,132 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of paginate query."""
import sqlalchemy
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidSortKey(Exception):
message = _("Sort key supplied was not valid.")
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(0, len(sort_keys)):
crit_attrs = []
for j in range(0, i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
elif sort_dirs[i] == 'asc':
crit_attrs.append((model_attr > marker_values[i]))
else:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query

View File

@ -1,146 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import errno
import gc
import os
import pprint
import socket
import sys
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
help_for_backdoor_port = 'Acceptable ' + \
'values are 0, <port> and <start>:<end>, where 0 results in ' + \
'listening on a random tcp port number, <port> results in ' + \
'listening on the specified port number and not enabling backdoor' + \
'if it is in use and <start>:<end> results in listening on the ' + \
'smallest unused port number within the specified range of port ' + \
'numbers. The chosen port is displayed in the service\'s log file.'
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
default=None,
help='Enable eventlet backdoor. %s' % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return filter(lambda o: isinstance(o, t), gc.get_objects())
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()})
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port

View File

@ -1,99 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception related utilities.
"""
import logging
import sys
import time
import traceback
import six
from nova.openstack.common.gettextutils import _ # noqa
class save_and_reraise_exception(object):
"""Save current exception, run some code and then re-raise.
In some cases the exception context can be cleared, resulting in None
being attempted to be re-raised after an exception handler is run. This
can happen when eventlet switches greenthreads or when running an
exception handler, code raises and catches an exception. In both
cases the exception context will be cleared.
To work around this, we save the exception state, run handler code, and
then re-raise the original exception. If another exception occurs, the
saved exception is logged and the new exception is re-raised.
In some cases the caller may not want to re-raise the exception, and
for those circumstances this context provides a reraise flag that
can be used to suppress the exception. For example:
except Exception:
with save_and_reraise_exception() as ctxt:
decide_if_need_reraise()
if not should_be_reraised:
ctxt.reraise = False
"""
def __init__(self):
self.reraise = True
def __enter__(self):
self.type_, self.value, self.tb, = sys.exc_info()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
return False
if self.reraise:
six.reraise(self.type_, self.value, self.tb)
def forever_retry_uncaught_exceptions(infunc):
def inner_func(*args, **kwargs):
last_log_time = 0
last_exc_message = None
exc_count = 0
while True:
try:
return infunc(*args, **kwargs)
except Exception as exc:
this_exc_message = six.u(str(exc))
if this_exc_message == last_exc_message:
exc_count += 1
else:
exc_count = 1
# Do not log any more frequently than once a minute unless
# the exception message changes
cur_time = int(time.time())
if (cur_time - last_log_time > 60 or
this_exc_message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
last_exc_message = this_exc_message
exc_count = 0
# This should be a very rare event. In case it isn't, do
# a sleep.
time.sleep(1)
return inner_func

View File

@ -1,137 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import os
import tempfile
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload and filename in _FILE_CACHE:
del _FILE_CACHE[filename]
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug(_("Reloading cached file %s") % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
:param remove: Optional function to remove passed path
"""
try:
remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
:param remove: Optional function to remove passed path
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
remove(path)
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
"""Create temporary file or use existing file.
This util is needed for creating temporary file with
specified content, suffix and prefix. If path is not None,
it will be used for writing content. If the path doesn't
exist it'll be created.
:param content: content for temporary file.
:param path: same as parameter 'dir' for mkstemp
:param suffix: same as parameter 'suffix' for mkstemp
:param prefix: same as parameter 'prefix' for mkstemp
For example: it can be used in database tests for creating
configuration files.
"""
if path:
ensure_tree(path)
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
try:
os.write(fd, content)
finally:
os.close(fd)
return path

View File

@ -1,46 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Mirantis, Inc.
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo.config import cfg
import six
class Config(fixtures.Fixture):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the reset() method, which is registred by addCleanup().
"""
def __init__(self, conf=cfg.CONF):
self.conf = conf
def setUp(self):
super(Config, self).setUp()
self.addCleanup(self.conf.reset)
def config(self, **kw):
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
self.conf.set_override(k, v, group)

View File

@ -1,51 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
class PatchObject(fixtures.Fixture):
"""Deal with code around mock."""
def __init__(self, obj, attr, **kwargs):
self.obj = obj
self.attr = attr
self.kwargs = kwargs
def setUp(self):
super(PatchObject, self).setUp()
_p = mock.patch.object(self.obj, self.attr, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
class Patch(fixtures.Fixture):
"""Deal with code around mock.patch."""
def __init__(self, obj, **kwargs):
self.obj = obj
self.kwargs = kwargs
def setUp(self):
super(Patch, self).setUp()
_p = mock.patch(self.obj, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)

View File

@ -1,34 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mox
class MoxStubout(fixtures.Fixture):
"""Deal with code around mox and stubout as a fixture."""
def setUp(self):
super(MoxStubout, self).setUp()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = self.mox.stubs
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.mox.VerifyAll)

View File

@ -1,371 +0,0 @@
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from nova.openstack.common.gettextutils import _
"""
import copy
import gettext
import logging
import os
import re
try:
import UserString as _userString
except ImportError:
import collections as _userString
from babel import localedata
import six
_localedir = os.environ.get('nova'.upper() + '_LOCALEDIR')
_t = gettext.translation('nova', localedir=_localedir, fallback=True)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, 'nova')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
#
# Also included below is an example LocaleHandler that translates
# Messages to an associated locale, effectively allowing many logs,
# each with their own locale.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(_userString.UserString, object):
"""Class used to encapsulate translatable messages."""
def __init__(self, msg, domain):
# _msg is the gettext msgid and should never change
self._msg = msg
self._left_extra_msg = ''
self._right_extra_msg = ''
self._locale = None
self.params = None
self.domain = domain
@property
def data(self):
# NOTE(mrodden): this should always resolve to a unicode string
# that best represents the state of the message currently
localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
if self.locale:
lang = gettext.translation(self.domain,
localedir=localedir,
languages=[self.locale],
fallback=True)
else:
# use system locale for translations
lang = gettext.translation(self.domain,
localedir=localedir,
fallback=True)
if six.PY3:
ugettext = lang.gettext
else:
ugettext = lang.ugettext
full_msg = (self._left_extra_msg +
ugettext(self._msg) +
self._right_extra_msg)
if self.params is not None:
full_msg = full_msg % self.params
return six.text_type(full_msg)
@property
def locale(self):
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
if not self.params:
return
# This Message object may have been constructed with one or more
# Message objects as substitution parameters, given as a single
# Message, or a tuple or Map containing some, so when setting the
# locale for this Message we need to set it for those Messages too.
if isinstance(self.params, Message):
self.params.locale = value
return
if isinstance(self.params, tuple):
for param in self.params:
if isinstance(param, Message):
param.locale = value
return
if isinstance(self.params, dict):
for param in self.params.values():
if isinstance(param, Message):
param.locale = value
def _save_dictionary_parameter(self, dict_param):
full_msg = self.data
# look for %(blah) fields in string;
# ignore %% and deal with the
# case where % is first character on the line
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg)
# if we don't find any %(blah) blocks but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
# apparently the full dictionary is the parameter
params = copy.deepcopy(dict_param)
else:
params = {}
for key in keys:
try:
params[key] = copy.deepcopy(dict_param[key])
except TypeError:
# cast uncopyable thing to unicode string
params[key] = six.text_type(dict_param[key])
return params
def _save_parameters(self, other):
# we check for None later to see if
# we actually have parameters to inject,
# so encapsulate if our parameter is actually None
if other is None:
self.params = (other, )
elif isinstance(other, dict):
self.params = self._save_dictionary_parameter(other)
else:
# fallback to casting to unicode,
# this will handle the problematic python code-like
# objects that cannot be deep-copied
try:
self.params = copy.deepcopy(other)
except TypeError:
self.params = six.text_type(other)
return self
# overrides to be more string-like
def __unicode__(self):
return self.data
def __str__(self):
if six.PY3:
return self.__unicode__()
return self.data.encode('utf-8')
def __getstate__(self):
to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
'domain', 'params', '_locale']
new_dict = self.__dict__.fromkeys(to_copy)
for attr in to_copy:
new_dict[attr] = copy.deepcopy(self.__dict__[attr])
return new_dict
def __setstate__(self, state):
for (k, v) in state.items():
setattr(self, k, v)
# operator overloads
def __add__(self, other):
copied = copy.deepcopy(self)
copied._right_extra_msg += other.__str__()
return copied
def __radd__(self, other):
copied = copy.deepcopy(self)
copied._left_extra_msg += other.__str__()
return copied
def __mod__(self, other):
# do a format string to catch and raise
# any possible KeyErrors from missing parameters
self.data % other
copied = copy.deepcopy(self)
return copied._save_parameters(other)
def __mul__(self, other):
return self.data * other
def __rmul__(self, other):
return other * self.data
def __getitem__(self, key):
return self.data[key]
def __getslice__(self, start, end):
return self.data.__getslice__(start, end)
def __getattribute__(self, name):
# NOTE(mrodden): handle lossy operations that we can't deal with yet
# These override the UserString implementation, since UserString
# uses our __class__ attribute to try and build a new message
# after running the inner data string through the operation.
# At that point, we have lost the gettext message id and can just
# safely resolve to a string instead.
ops = ['capitalize', 'center', 'decode', 'encode',
'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
if name in ops:
return getattr(self.data, name)
else:
return _userString.UserString.__getattribute__(self, name)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def get_localized_message(message, user_locale):
"""Gets a localized version of the given message in the given locale.
If the message is not a Message object the message is returned as-is.
If the locale is None the message is translated to the default locale.
:returns: the translated message in unicode, or the original message if
it could not be translated
"""
translated = message
if isinstance(message, Message):
original_locale = message.locale
message.locale = user_locale
translated = six.text_type(message)
message.locale = original_locale
return translated
class LocaleHandler(logging.Handler):
"""Handler that can have a locale associated to translate Messages.
A quick example of how to utilize the Message class above.
LocaleHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating the internal Message.
"""
def __init__(self, locale, target):
"""Initialize a LocaleHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
logging.Handler.__init__(self)
self.locale = locale
self.target = target
def emit(self, record):
if isinstance(record.msg, Message):
# set the locale and resolve to a string
record.msg.locale = self.locale
self.target.emit(record)

View File

@ -1,144 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods to deal with images.
"""
import re
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import strutils
class QemuImgInfo(object):
BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:"
r"\s+(.*?)\)\s*$"), re.I)
TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$")
SIZE_RE = re.compile(r"\(\s*(\d+)\s+bytes\s*\)", re.I)
def __init__(self, cmd_output=None):
details = self._parse(cmd_output or '')
self.image = details.get('image')
self.backing_file = details.get('backing_file')
self.file_format = details.get('file_format')
self.virtual_size = details.get('virtual_size')
self.cluster_size = details.get('cluster_size')
self.disk_size = details.get('disk_size')
self.snapshots = details.get('snapshot_list', [])
self.encryption = details.get('encryption')
def __str__(self):
lines = [
'image: %s' % self.image,
'file_format: %s' % self.file_format,
'virtual_size: %s' % self.virtual_size,
'disk_size: %s' % self.disk_size,
'cluster_size: %s' % self.cluster_size,
'backing_file: %s' % self.backing_file,
]
if self.snapshots:
lines.append("snapshots: %s" % self.snapshots)
return "\n".join(lines)
def _canonicalize(self, field):
# Standardize on underscores/lc/no dash and no spaces
# since qemu seems to have mixed outputs here... and
# this format allows for better integration with python
# - ie for usage in kwargs and such...
field = field.lower().strip()
for c in (" ", "-"):
field = field.replace(c, '_')
return field
def _extract_bytes(self, details):
# Replace it with the byte amount
real_size = self.SIZE_RE.search(details)
if real_size:
details = real_size.group(1)
try:
details = strutils.to_bytes(details)
except TypeError:
pass
return details
def _extract_details(self, root_cmd, root_details, lines_after):
real_details = root_details
if root_cmd == 'backing_file':
# Replace it with the real backing file
backing_match = self.BACKING_FILE_RE.match(root_details)
if backing_match:
real_details = backing_match.group(2).strip()
elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']:
# Replace it with the byte amount (if we can convert it)
real_details = self._extract_bytes(root_details)
elif root_cmd == 'file_format':
real_details = real_details.strip().lower()
elif root_cmd == 'snapshot_list':
# Next line should be a header, starting with 'ID'
if not lines_after or not lines_after[0].startswith("ID"):
msg = _("Snapshot list encountered but no header found!")
raise ValueError(msg)
del lines_after[0]
real_details = []
# This is the sprintf pattern we will try to match
# "%-10s%-20s%7s%20s%15s"
# ID TAG VM SIZE DATE VM CLOCK (current header)
while lines_after:
line = lines_after[0]
line_pieces = line.split()
if len(line_pieces) != 6:
break
# Check against this pattern in the final position
# "%02d:%02d:%02d.%03d"
date_pieces = line_pieces[5].split(":")
if len(date_pieces) != 3:
break
real_details.append({
'id': line_pieces[0],
'tag': line_pieces[1],
'vm_size': line_pieces[2],
'date': line_pieces[3],
'vm_clock': line_pieces[4] + " " + line_pieces[5],
})
del lines_after[0]
return real_details
def _parse(self, cmd_output):
# Analysis done of qemu-img.c to figure out what is going on here
# Find all points start with some chars and then a ':' then a newline
# and then handle the results of those 'top level' items in a separate
# function.
#
# TODO(harlowja): newer versions might have a json output format
# we should switch to that whenever possible.
# see: http://bit.ly/XLJXDX
contents = {}
lines = [x for x in cmd_output.splitlines() if x.strip()]
while lines:
line = lines.pop(0)
top_level = self.TOP_LEVEL_RE.match(line)
if top_level:
root = self._canonicalize(top_level.group(1))
if not root:
continue
root_details = top_level.group(2).strip()
details = self._extract_details(root, root_details, lines)
contents[root] = details
return contents

View File

@ -1,66 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ValueError, AttributeError):
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default

View File

@ -1,178 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
try:
import xmlrpclib
except ImportError:
# NOTE(jd): xmlrpclib is not shipped with Python 3
xmlrpclib = None
import six
from nova.openstack.common import gettextutils
from nova.openstack.common import importutils
from nova.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)

View File

@ -1,45 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Local storage of variables using weak references"""
import threading
import weakref
class WeakLocal(threading.local):
def __getattribute__(self, attr):
rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()

View File

@ -1,303 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from oslo.config import cfg
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import local
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.environ.get("NOVA_LOCK_PATH"),
help=('Directory to use for lock files.'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
with _semaphores_lock:
try:
sem = _semaphores[name]
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
{'lock': name})
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
LOG.info(_('Created lock path: %s'), local_lock_path)
def add_prefix(name, prefix):
if not prefix:
return name
sep = '' if prefix.endswith('-') else '-'
return '%s%s%s' % (prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
lock_file_name = add_prefix(name.replace(os.sep, '_'),
lock_file_prefix)
lock_file_path = os.path.join(local_lock_path, lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock as lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
yield lock
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
else:
yield sem
finally:
local.strong_store.locks_held.remove(name)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'),
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["NOVA_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -1,626 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of logging configuration file. It does not '
'disable existing loggers, but just appends specified '
'logging configuration to any other existing logging '
'options. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'keystone=INFO',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'iso8601=WARN',
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords, defaults to "***".
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid', None) or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project})
extra.update({"version": self.version})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (exc_type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not CONF.log_file:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"nova.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))

View File

@ -1,31 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from nova.openstack.common import notifier
from oslo.config import cfg
class PublishErrorsHandler(logging.Handler):
def emit(self, record):
if ('nova.openstack.common.notifier.log_notifier' in
cfg.CONF.notification_driver):
return
notifier.api.notify(None, 'error.publisher',
'error_notification',
notifier.api.ERROR,
dict(error=record.msg))

View File

@ -1,147 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from eventlet import event
from eventlet import greenthread
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = timeutils.utcnow()
self.f(*self.args, **self.kw)
end = timeutils.utcnow()
if not self._running:
break
delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') %
-delay)
greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
# TODO(mikal): this class name is deprecated in Havana and should be removed
# in the I release
LoopingCall = FixedIntervalLoopingCall
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f '
'seconds'), idle)
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done

View File

@ -1,97 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Super simple fake memcache client."""
from oslo.config import cfg
from nova.openstack.common import timeutils
memcache_opts = [
cfg.ListOpt('memcached_servers',
default=None,
help='Memcached servers or None for in process cache.'),
]
CONF = cfg.CONF
CONF.register_opts(memcache_opts)
def get_client(memcached_servers=None):
client_cls = Client
if not memcached_servers:
memcached_servers = CONF.memcached_servers
if memcached_servers:
try:
import memcache
client_cls = memcache.Client
except ImportError:
pass
return client_cls(memcached_servers, debug=0)
class Client(object):
"""Replicates a tiny subset of memcached client interface."""
def __init__(self, *args, **kwargs):
"""Ignores the passed in args."""
self.cache = {}
def get(self, key):
"""Retrieves the value for a key or None.
This expunges expired keys during each get.
"""
now = timeutils.utcnow_ts()
for k in self.cache.keys():
(timeout, _value) = self.cache[k]
if timeout and now >= timeout:
del self.cache[k]
return self.cache.get(key, (0, None))[1]
def set(self, key, value, time=0, min_compress_len=0):
"""Sets the value for a key."""
timeout = 0
if time != 0:
timeout = timeutils.utcnow_ts() + time
self.cache[key] = (timeout, value)
return True
def add(self, key, value, time=0, min_compress_len=0):
"""Sets the value for a key if it doesn't exist."""
if self.get(key) is not None:
return False
return self.set(key, value, time, min_compress_len)
def incr(self, key, delta=1):
"""Increments the value for a key."""
value = self.get(key)
if value is None:
return None
new_value = int(value) + delta
self.cache[key] = (self.cache[key][0], str(new_value))
return new_value
def delete(self, key, time=0):
"""Deletes the value associated with a key."""
if key in self.cache:
del self.cache[key]

View File

@ -1,45 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Attach open standard audit information to request.environ
AuditMiddleware filter should be place after Keystone's auth_token middleware
in the pipeline so that it can utilise the information Keystone provides.
"""
from pycadf.audit import api as cadf_api
from nova.openstack.common.middleware import notifier
class AuditMiddleware(notifier.RequestNotifier):
def __init__(self, app, **conf):
super(AuditMiddleware, self).__init__(app, **conf)
self.cadf_audit = cadf_api.OpenStackAuditApi()
@notifier.log_and_ignore_error
def process_request(self, request):
self.cadf_audit.append_audit_event(request)
super(AuditMiddleware, self).process_request(request)
@notifier.log_and_ignore_error
def process_response(self, request, response,
exception=None, traceback=None):
self.cadf_audit.mod_audit_event(request, response)
super(AuditMiddleware, self).process_response(request, response,
exception, traceback)

View File

@ -1,55 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class(es) for WSGI Middleware."""
import webob.dec
class Middleware(object):
"""Base WSGI middleware wrapper.
These classes require an application to be initialized that will be called
next. By default the middleware will simply call its wrapped app, or you
can override __call__ to customize its behavior.
"""
@classmethod
def factory(cls, global_conf, **local_conf):
"""Factory method for paste.deploy."""
return cls
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)

View File

@ -1,126 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Send notifications on request
"""
import os.path
import sys
import traceback as tb
import webob.dec
from nova.openstack.common import context
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import log as logging
from nova.openstack.common.middleware import base
from nova.openstack.common.notifier import api
LOG = logging.getLogger(__name__)
def log_and_ignore_error(fn):
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
LOG.exception(_('An exception occurred processing '
'the API call: %s ') % e)
return wrapped
class RequestNotifier(base.Middleware):
"""Send notification on request."""
@classmethod
def factory(cls, global_conf, **local_conf):
"""Factory method for paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def _factory(app):
return cls(app, **conf)
return _factory
def __init__(self, app, **conf):
self.service_name = conf.get('service_name', None)
self.ignore_req_list = [x.upper().strip() for x in
conf.get('ignore_req_list', '').split(',')]
super(RequestNotifier, self).__init__(app)
@staticmethod
def environ_to_dict(environ):
"""Following PEP 333, server variables are lower case, so don't
include them.
"""
return dict((k, v) for k, v in environ.iteritems()
if k.isupper())
@log_and_ignore_error
def process_request(self, request):
request.environ['HTTP_X_SERVICE_NAME'] = \
self.service_name or request.host
payload = {
'request': self.environ_to_dict(request.environ),
}
api.notify(context.get_admin_context(),
api.publisher_id(os.path.basename(sys.argv[0])),
'http.request',
api.INFO,
payload)
@log_and_ignore_error
def process_response(self, request, response,
exception=None, traceback=None):
payload = {
'request': self.environ_to_dict(request.environ),
}
if response:
payload['response'] = {
'status': response.status,
'headers': response.headers,
}
if exception:
payload['exception'] = {
'value': repr(exception),
'traceback': tb.format_tb(traceback)
}
api.notify(context.get_admin_context(),
api.publisher_id(os.path.basename(sys.argv[0])),
'http.response',
api.INFO,
payload)
@webob.dec.wsgify
def __call__(self, req):
if req.method in self.ignore_req_list:
return req.get_response(self.application)
else:
self.process_request(req)
try:
response = req.get_response(self.application)
except Exception:
type, value, traceback = sys.exc_info()
self.process_response(req, None, value, traceback)
raise
else:
self.process_response(req, response)
return response

View File

@ -1,81 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Network-related utilities and helper functions.
"""
import urlparse
def parse_host_port(address, default_port=None):
"""Interpret a string as a host:port pair.
An IPv6 address MUST be escaped if accompanied by a port,
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
means both [2001:db8:85a3::8a2e:370:7334] and
[2001:db8:85a3::8a2e:370]:7334.
>>> parse_host_port('server01:80')
('server01', 80)
>>> parse_host_port('server01')
('server01', None)
>>> parse_host_port('server01', default_port=1234)
('server01', 1234)
>>> parse_host_port('[::1]:80')
('::1', 80)
>>> parse_host_port('[::1]')
('::1', None)
>>> parse_host_port('[::1]', default_port=1234)
('::1', 1234)
>>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234)
('2001:db8:85a3::8a2e:370:7334', 1234)
"""
if address[0] == '[':
# Escaped ipv6
_host, _port = address[1:].split(']')
host = _host
if ':' in _port:
port = _port.split(':')[1]
else:
port = default_port
else:
if address.count(':') == 1:
host, port = address.split(':')
else:
# 0 means ipv4, >1 means ipv6.
# We prohibit unescaped ipv6 addresses with port.
host = address
port = default_port
return (host, None if port is None else int(port))
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
This function papers over Python issue9374 when needed.
The parameters are the same as urlparse.urlsplit.
"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(
url, scheme, allow_fragments)
if allow_fragments and '#' in path:
path, fragment = path.split('#', 1)
if '?' in path:
path, query = path.split('?', 1)
return urlparse.SplitResult(scheme, netloc, path, query, fragment)

View File

@ -1,14 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,173 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import uuid
from oslo.config import cfg
from nova.openstack.common import context
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
notifier_opts = [
cfg.MultiStrOpt('notification_driver',
default=[],
help='Driver or drivers to handle sending notifications'),
cfg.StrOpt('default_notification_level',
default='INFO',
help='Default notification level for outgoing notifications'),
cfg.StrOpt('default_publisher_id',
default=None,
help='Default publisher_id for outgoing notifications'),
]
CONF = cfg.CONF
CONF.register_opts(notifier_opts)
WARN = 'WARN'
INFO = 'INFO'
ERROR = 'ERROR'
CRITICAL = 'CRITICAL'
DEBUG = 'DEBUG'
log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
class BadPriorityException(Exception):
pass
def notify_decorator(name, fn):
"""Decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param function: - object of the function
:returns: function -- decorated function
"""
def wrapped_func(*args, **kwarg):
body = {}
body['args'] = []
body['kwarg'] = {}
for arg in args:
body['args'].append(arg)
for key in kwarg:
body['kwarg'][key] = kwarg[key]
ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
notify(ctxt,
CONF.default_publisher_id or socket.gethostname(),
name,
CONF.default_notification_level,
body)
return fn(*args, **kwarg)
return wrapped_func
def publisher_id(service, host=None):
if not host:
try:
host = CONF.host
except AttributeError:
host = CONF.default_publisher_id or socket.gethostname()
return "%s.%s" % (service, host)
def notify(context, publisher_id, event_type, priority, payload):
"""Sends a notification using the specified driver
:param publisher_id: the source worker_type.host of the message
:param event_type: the literal type of event (ex. Instance Creation)
:param priority: patterned after the enumeration of Python logging
levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
:param payload: A python dictionary of attributes
Outgoing message format includes the above parameters, and appends the
following:
message_id
a UUID representing the id for this notification
timestamp
the GMT timestamp the notification was sent at
The composite message will be constructed as a dictionary of the above
attributes, which will then be sent via the transport mechanism defined
by the driver.
Message example::
{'message_id': str(uuid.uuid4()),
'publisher_id': 'compute.host1',
'timestamp': timeutils.utcnow(),
'priority': 'WARN',
'event_type': 'compute.create_instance',
'payload': {'instance_id': 12, ... }}
"""
if priority not in log_levels:
raise BadPriorityException(
_('%s not in valid priorities') % priority)
# Ensure everything is JSON serializable.
payload = jsonutils.to_primitive(payload, convert_instances=True)
msg = dict(message_id=str(uuid.uuid4()),
publisher_id=publisher_id,
event_type=event_type,
priority=priority,
payload=payload,
timestamp=str(timeutils.utcnow()))
for driver in _get_drivers():
try:
driver.notify(context, msg)
except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to "
"send to notification system. "
"Payload=%(payload)s")
% dict(e=e, payload=payload))
_drivers = None
def _get_drivers():
"""Instantiate, cache, and return drivers based on the CONF."""
global _drivers
if _drivers is None:
_drivers = {}
for notification_driver in CONF.notification_driver:
try:
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
except ImportError:
LOG.exception(_("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
return _drivers.values()
def _reset_drivers():
"""Used by unit tests to reset the drivers."""
global _drivers
_drivers = None

View File

@ -1,37 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
CONF = cfg.CONF
def notify(_context, message):
"""Notifies the recipient of the desired event given the model.
Log notifications using OpenStack's default logging system.
"""
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
logger = logging.getLogger(
'nova.openstack.common.notification.%s' %
message['event_type'])
getattr(logger, priority)(jsonutils.dumps(message))

View File

@ -1,19 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def notify(_context, message):
"""Notifies the recipient of the desired event given the model."""
pass

View File

@ -1,29 +0,0 @@
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import rpc_notifier
LOG = logging.getLogger(__name__)
def notify(context, message):
"""Deprecated in Grizzly. Please use rpc_notifier instead."""
LOG.deprecated(_("The rabbit_notifier is now deprecated."
" Please use rpc_notifier instead."))
rpc_notifier.notify(context, message)

View File

@ -1,46 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.openstack.common import context as req_context
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'notification_topics', default=['notifications', ],
help='AMQP topic used for OpenStack notifications')
CONF = cfg.CONF
CONF.register_opt(notification_topic_opt)
def notify(context, message):
"""Sends a notification via RPC."""
if not context:
context = req_context.get_admin_context()
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
for topic in CONF.notification_topics:
topic = '%s.%s' % (topic, priority)
try:
rpc.notify(context, topic, message)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals())

View File

@ -1,52 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''messaging based notification driver, with message envelopes'''
from oslo.config import cfg
from nova.openstack.common import context as req_context
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'topics', default=['notifications', ],
help='AMQP topic(s) used for OpenStack notifications')
opt_group = cfg.OptGroup(name='rpc_notifier2',
title='Options for rpc_notifier2')
CONF = cfg.CONF
CONF.register_group(opt_group)
CONF.register_opt(notification_topic_opt, opt_group)
def notify(context, message):
"""Sends a notification via RPC."""
if not context:
context = req_context.get_admin_context()
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
for topic in CONF.rpc_notifier2.topics:
topic = '%s.%s' % (topic, priority)
try:
rpc.notify(context, topic, message, envelope=True)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals())

View File

@ -1,22 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
NOTIFICATIONS = []
def notify(_context, message):
"""Test notifier, stores notifications in memory for unittests."""
NOTIFICATIONS.append(message)

View File

@ -1,188 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help=('Some periodic tasks can be run in a separate process. '
'Should we run them here?')),
]
CONF = cfg.CONF
CONF.register_opts(periodic_opts)
LOG = logging.getLogger(__name__)
DEFAULT_INTERVAL = 60.0
class InvalidPeriodicTaskArg(Exception):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
1. Without arguments '@periodic_task', this will be run on every cycle
of the periodic scheduler.
2. With arguments:
@periodic_task(spacing=N [, run_immediately=[True|False]])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
starts.
"""
def decorator(f):
# Test for old style invocation
if 'ticks_between_runs' in kwargs:
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
# Control if run at all
f._periodic_task = True
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
f._periodic_immediate = kwargs.pop('run_immediately', False)
if f._periodic_immediate:
f._periodic_last_run = None
else:
f._periodic_last_run = timeutils.utcnow()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
# and without parens.
#
# In the 'with-parens' case (with kwargs present), this function needs to
# return a decorator function since the interpreter will invoke it like:
#
# periodic_task(*args, **kwargs)(f)
#
# In the 'without-parens' case, the original function will be passed
# in as the first argument, like:
#
# periodic_task(f)
if kwargs:
return decorator
else:
return decorator(args[0])
class _PeriodicTasksMeta(type):
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
# NOTE(sirp): if the attribute is not present then we must be the base
# class, so, go ahead an initialize it. If the attribute is present,
# then we're a subclass so make a copy of it so we don't step on our
# parent's toes.
try:
cls._periodic_tasks = cls._periodic_tasks[:]
except AttributeError:
cls._periodic_tasks = []
try:
cls._periodic_last_run = cls._periodic_last_run.copy()
except AttributeError:
cls._periodic_last_run = {}
try:
cls._periodic_spacing = cls._periodic_spacing.copy()
except AttributeError:
cls._periodic_spacing = {}
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
task = value
name = task.__name__
if task._periodic_spacing < 0:
LOG.info(_('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
continue
if not task._periodic_enabled:
LOG.info(_('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
continue
# A periodic spacing of zero indicates that this task should
# be run every pass
if task._periodic_spacing == 0:
task._periodic_spacing = None
cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
cls._periodic_last_run[name] = task._periodic_last_run
class PeriodicTasks(object):
__metaclass__ = _PeriodicTasksMeta
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
now = timeutils.utcnow()
spacing = self._periodic_spacing[task_name]
last_run = self._periodic_last_run[task_name]
# If a periodic task is _nearly_ due, then we'll run it early
if spacing is not None and last_run is not None:
due = last_run + datetime.timedelta(seconds=spacing)
if not timeutils.is_soon(due, 0.2):
idle_for = min(idle_for, timeutils.delta_seconds(now, due))
continue
if spacing is not None:
idle_for = min(idle_for, spacing)
LOG.debug(_("Running periodic task %(full_task_name)s"), locals())
self._periodic_last_run[task_name] = timeutils.utcnow()
try:
task(self, context)
except Exception as e:
if raise_on_error:
raise
LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
locals())
time.sleep(0)
return idle_for

View File

@ -1,779 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import re
import urllib
import urllib2
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_rules = None
_checks = {}
class Rules(dict):
"""
A store for rules. Handles the default_rule setting directly.
"""
@classmethod
def load_json(cls, data, default_rule=None):
"""
Allow loading of JSON rule data.
"""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule or self.default_rule not in self:
raise KeyError(key)
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
# Really have to figure out a way to deprecate this
def set_rules(rules):
"""Set the rules in use for policy checks."""
global _rules
_rules = rules
# Ditto
def reset():
"""Clear the rules used for policy checks."""
global _rules
_rules = None
def check(rule, target, creds, exc=None, *args, **kwargs):
"""
Checks authorization of a rule against the target and credentials.
:param rule: The rule to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If exc is not provided, returns
False.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds)
elif not _rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = _rules[rule](target, creds)
except KeyError:
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if exc and result is False:
raise exc(*args, **kwargs)
return result
class BaseCheck(object):
"""
Abstract base class for Check classes.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __str__(self):
"""
Retrieve a string representation of the Check tree rooted at
this node.
"""
pass
@abc.abstractmethod
def __call__(self, target, cred):
"""
Perform the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""
A policy check that always returns False (disallow).
"""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""
A policy check that always returns True (allow).
"""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred):
"""Check the policy."""
return True
class Check(BaseCheck):
"""
A base class to allow for user-defined policy checks.
"""
def __init__(self, kind, match):
"""
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""
A policy check that inverts the result of another policy check.
Implements the "not" operator.
"""
def __init__(self, rule):
"""
Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred):
"""
Check the policy. Returns the logical inverse of the wrapped
check.
"""
return not self.rule(target, cred)
class AndCheck(BaseCheck):
"""
A policy check that requires that a list of other checks all
return True. Implements the "and" operator.
"""
def __init__(self, rules):
"""
Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""
Check the policy. Requires that all rules accept in order to
return True.
"""
for rule in self.rules:
if not rule(target, cred):
return False
return True
def add_check(self, rule):
"""
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""
A policy check that requires that at least one of a list of other
checks returns True. Implements the "or" operator.
"""
def __init__(self, rules):
"""
Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""
Check the policy. Requires that at least one rule accept in
order to return True.
"""
for rule in self.rules:
if rule(target, cred):
return True
return False
def add_check(self, rule):
"""
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""
Parse a single base check rule into an appropriate Check object.
"""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_("Failed to understand rule %(rule)s") % locals())
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""
Provided for backwards compatibility. Translates the old
list-of-lists syntax into a tree of Check objects.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, basestring):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if len(or_list) == 0:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""
Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""
Metaclass for the ParseState class. Facilitates identifying
reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""
Create the class. Injects the 'reducers' list, a list of
tuples matching token sequences to the names of the
corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""
Decorator for reduction methods. Arguments are a sequence of
tokens, in order, which should trigger running this reduction
method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
class ParseState(object):
"""
Implement the core of parsing the policy language. Uses a greedy
reduction algorithm to reduce a sequence of tokens into a single
terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
__metaclass__ = ParseStateMeta
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""
Perform a greedy reduction of the token stream. If a reducer
method matches, it will be executed, then the reduce() method
will be called recursively to search for any more possible
reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""
Obtain the final result of the parse. Raises ValueError if
the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""
Create an 'and_expr' from two checks joined by the 'and'
operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""
Extend an 'and_expr' by adding one more check.
"""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""
Create an 'or_expr' from two checks joined by the 'or'
operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""
Extend an 'or_expr' by adding one more check.
"""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_("Failed to understand rule %(rule)r") % locals())
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""
Parses a policy rule into a tree of Check objects.
"""
# If the rule is a string, it's in the policy language
if isinstance(rule, basestring):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""
Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds):
"""
Recursively checks credentials based on the defined rules.
"""
try:
return _rules[self.match](target, creds)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds):
"""
Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urllib.urlencode(data)
f = urllib2.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds):
"""
Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
"""
# TODO(termie): do dict inspection via dot syntax
match = self.match % target
if self.kind in creds:
return match == unicode(creds[self.kind])
return False

View File

@ -1,251 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import logging as stdlib_logging
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
% (description, cmd, exit_code, stdout, stderr))
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type proces_input: string
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be stdlib_logging.DEBUG or
stdlib_logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=('Command requested root, but did not specify a root '
'helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
if _returncode:
LOG.log(loglevel, _('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.log(loglevel, _('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', str(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,138 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Root wrapper for OpenStack services
Filters which commands a service is allowed to run as another user.
To use this with nova, you should set the following in
nova.conf:
rootwrap_config=/etc/nova/rootwrap.conf
You also need to let the nova user run nova-rootwrap
as root in sudoers:
nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap
/etc/nova/rootwrap.conf *
Service packaging should deploy .filters files only on nodes where
they are needed, to avoid allowing more than is necessary.
"""
from __future__ import print_function
import ConfigParser
import logging
import os
import pwd
import signal
import subprocess
import sys
RC_UNAUTHORIZED = 99
RC_NOCOMMAND = 98
RC_BADCONFIG = 97
RC_NOEXECFOUND = 96
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def _exit_error(execname, message, errorcode, log=True):
print("%s: %s" % (execname, message), file=sys.stderr)
if log:
logging.error(message)
sys.exit(errorcode)
def _getlogin():
try:
return os.getlogin()
except OSError:
return (os.getenv('USER') or
os.getenv('USERNAME') or
os.getenv('LOGNAME'))
def main():
# Split arguments, require at least a command
execname = sys.argv.pop(0)
if len(sys.argv) < 2:
_exit_error(execname, "No command specified", RC_NOCOMMAND, log=False)
configfile = sys.argv.pop(0)
userargs = sys.argv[:]
# Add ../ to sys.path to allow running from branch
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname),
os.pardir, os.pardir))
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
from nova.openstack.common.rootwrap import wrapper
# Load configuration
try:
rawconfig = ConfigParser.RawConfigParser()
rawconfig.read(configfile)
config = wrapper.RootwrapConfig(rawconfig)
except ValueError as exc:
msg = "Incorrect value in %s: %s" % (configfile, exc.message)
_exit_error(execname, msg, RC_BADCONFIG, log=False)
except ConfigParser.Error:
_exit_error(execname, "Incorrect configuration file: %s" % configfile,
RC_BADCONFIG, log=False)
if config.use_syslog:
wrapper.setup_syslog(execname,
config.syslog_log_facility,
config.syslog_log_level)
# Execute command if it matches any of the loaded filters
filters = wrapper.load_filters(config.filters_path)
try:
filtermatch = wrapper.match_filter(filters, userargs,
exec_dirs=config.exec_dirs)
if filtermatch:
command = filtermatch.get_command(userargs,
exec_dirs=config.exec_dirs)
if config.use_syslog:
logging.info("(%s > %s) Executing %s (filter match = %s)" % (
_getlogin(), pwd.getpwuid(os.getuid())[0],
command, filtermatch.name))
obj = subprocess.Popen(command,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
preexec_fn=_subprocess_setup,
env=filtermatch.get_environment(userargs))
obj.wait()
sys.exit(obj.returncode)
except wrapper.FilterMatchNotExecutable as exc:
msg = ("Executable not found: %s (filter match = %s)"
% (exc.match.exec_path, exc.match.name))
_exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog)
except wrapper.NoFilterMatched:
msg = ("Unauthorized command: %s (no filter matched)"
% ' '.join(userargs))
_exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog)

View File

@ -1,318 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
class CommandFilter(object):
"""Command filter only checking that the 1st argument matches exec_path."""
def __init__(self, exec_path, run_as, *args):
self.name = ''
self.exec_path = exec_path
self.run_as = run_as
self.args = args
self.real_exec = None
def get_exec(self, exec_dirs=[]):
"""Returns existing executable, or empty string if none found."""
if self.real_exec is not None:
return self.real_exec
self.real_exec = ""
if os.path.isabs(self.exec_path):
if os.access(self.exec_path, os.X_OK):
self.real_exec = self.exec_path
else:
for binary_path in exec_dirs:
expanded_path = os.path.join(binary_path, self.exec_path)
if os.access(expanded_path, os.X_OK):
self.real_exec = expanded_path
break
return self.real_exec
def match(self, userargs):
"""Only check that the first argument (command) matches exec_path."""
return userargs and os.path.basename(self.exec_path) == userargs[0]
def get_command(self, userargs, exec_dirs=[]):
"""Returns command to execute (with sudo -u if run_as != root)."""
to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
if (self.run_as != 'root'):
# Used to run commands at lesser privileges
return ['sudo', '-u', self.run_as, to_exec] + userargs[1:]
return [to_exec] + userargs[1:]
def get_environment(self, userargs):
"""Returns specific environment to set, None if none."""
return None
class RegExpFilter(CommandFilter):
"""Command filter doing regexp matching for every argument."""
def match(self, userargs):
# Early skip if command or number of args don't match
if (not userargs or len(self.args) != len(userargs)):
# DENY: argument numbers don't match
return False
# Compare each arg (anchoring pattern explicitly at end of string)
for (pattern, arg) in zip(self.args, userargs):
try:
if not re.match(pattern + '$', arg):
break
except re.error:
# DENY: Badly-formed filter
return False
else:
# ALLOW: All arguments matched
return True
# DENY: Some arguments did not match
return False
class PathFilter(CommandFilter):
"""Command filter checking that path arguments are within given dirs
One can specify the following constraints for command arguments:
1) pass - pass an argument as is to the resulting command
2) some_str - check if an argument is equal to the given string
3) abs path - check if a path argument is within the given base dir
A typical rootwrapper filter entry looks like this:
# cmdname: filter name, raw command, user, arg_i_constraint [, ...]
chown: PathFilter, /bin/chown, root, nova, /var/lib/images
"""
def match(self, userargs):
if not userargs or len(userargs) < 2:
return False
command, arguments = userargs[0], userargs[1:]
equal_args_num = len(self.args) == len(arguments)
exec_is_valid = super(PathFilter, self).match(userargs)
args_equal_or_pass = all(
arg == 'pass' or arg == value
for arg, value in zip(self.args, arguments)
if not os.path.isabs(arg) # arguments not specifying abs paths
)
paths_are_within_base_dirs = all(
os.path.commonprefix([arg, os.path.realpath(value)]) == arg
for arg, value in zip(self.args, arguments)
if os.path.isabs(arg) # arguments specifying abs paths
)
return (equal_args_num and
exec_is_valid and
args_equal_or_pass and
paths_are_within_base_dirs)
def get_command(self, userargs, exec_dirs=[]):
command, arguments = userargs[0], userargs[1:]
# convert path values to canonical ones; copy other args as is
args = [os.path.realpath(value) if os.path.isabs(arg) else value
for arg, value in zip(self.args, arguments)]
return super(PathFilter, self).get_command([command] + args,
exec_dirs)
class KillFilter(CommandFilter):
"""Specific filter for the kill calls.
1st argument is the user to run /bin/kill under
2nd argument is the location of the affected executable
if the argument is not absolute, it is checked against $PATH
Subsequent arguments list the accepted signals (if any)
This filter relies on /proc to accurately determine affected
executable, so it will only work on procfs-capable systems (not OSX).
"""
def __init__(self, *args):
super(KillFilter, self).__init__("/bin/kill", *args)
def match(self, userargs):
if not userargs or userargs[0] != "kill":
return False
args = list(userargs)
if len(args) == 3:
# A specific signal is requested
signal = args.pop(1)
if signal not in self.args[1:]:
# Requested signal not in accepted list
return False
else:
if len(args) != 2:
# Incorrect number of arguments
return False
if len(self.args) > 1:
# No signal requested, but filter requires specific signal
return False
try:
command = os.readlink("/proc/%d/exe" % int(args[1]))
except (ValueError, OSError):
# Incorrect PID
return False
# NOTE(yufang521247): /proc/PID/exe may have '\0' on the
# end, because python doen't stop at '\0' when read the
# target path.
command = command.partition('\0')[0]
# NOTE(dprince): /proc/PID/exe may have ' (deleted)' on
# the end if an executable is updated or deleted
if command.endswith(" (deleted)"):
command = command[:-len(" (deleted)")]
kill_command = self.args[0]
if os.path.isabs(kill_command):
return kill_command == command
return (os.path.isabs(command) and
kill_command == os.path.basename(command) and
os.path.dirname(command) in os.environ.get('PATH', ''
).split(':'))
class ReadFileFilter(CommandFilter):
"""Specific filter for the utils.read_file_as_root call."""
def __init__(self, file_path, *args):
self.file_path = file_path
super(ReadFileFilter, self).__init__("/bin/cat", "root", *args)
def match(self, userargs):
return (userargs == ['cat', self.file_path])
class IpFilter(CommandFilter):
"""Specific filter for the ip utility to that does not match exec."""
def match(self, userargs):
if userargs[0] == 'ip':
if userargs[1] == 'netns':
return (userargs[2] in ('list', 'add', 'delete'))
else:
return True
class EnvFilter(CommandFilter):
"""Specific filter for the env utility.
Behaves like CommandFilter, except that it handles
leading env A=B.. strings appropriately.
"""
def _extract_env(self, arglist):
"""Extract all leading NAME=VALUE arguments from arglist."""
envs = set()
for arg in arglist:
if '=' not in arg:
break
envs.add(arg.partition('=')[0])
return envs
def __init__(self, exec_path, run_as, *args):
super(EnvFilter, self).__init__(exec_path, run_as, *args)
env_list = self._extract_env(self.args)
# Set exec_path to X when args are in the form of
# env A=a B=b C=c X Y Z
if "env" in exec_path and len(env_list) < len(self.args):
self.exec_path = self.args[len(env_list)]
def match(self, userargs):
# ignore leading 'env'
if userargs[0] == 'env':
userargs.pop(0)
# require one additional argument after configured ones
if len(userargs) < len(self.args):
return False
# extract all env args
user_envs = self._extract_env(userargs)
filter_envs = self._extract_env(self.args)
user_command = userargs[len(user_envs):len(user_envs) + 1]
# match first non-env argument with CommandFilter
return (super(EnvFilter, self).match(user_command)
and len(filter_envs) and user_envs == filter_envs)
def exec_args(self, userargs):
args = userargs[:]
# ignore leading 'env'
if args[0] == 'env':
args.pop(0)
# Throw away leading NAME=VALUE arguments
while args and '=' in args[0]:
args.pop(0)
return args
def get_command(self, userargs, exec_dirs=[]):
to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
return [to_exec] + self.exec_args(userargs)[1:]
def get_environment(self, userargs):
env = os.environ.copy()
# ignore leading 'env'
if userargs[0] == 'env':
userargs.pop(0)
# Handle leading NAME=VALUE pairs
for a in userargs:
env_name, equals, env_value = a.partition('=')
if not equals:
break
if env_name and env_value:
env[env_name] = env_value
return env
class ChainingFilter(CommandFilter):
def exec_args(self, userargs):
return []
class IpNetnsExecFilter(ChainingFilter):
"""Specific filter for the ip utility to that does match exec."""
def match(self, userargs):
# Network namespaces currently require root
# require <ns> argument
if self.run_as != "root" or len(userargs) < 4:
return False
return (userargs[:3] == ['ip', 'netns', 'exec'])
def exec_args(self, userargs):
args = userargs[4:]
if args:
args[0] = os.path.basename(args[0])
return args

View File

@ -1,165 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import logging.handlers
import os
import string
from six import moves
from nova.openstack.common.rootwrap import filters
class NoFilterMatched(Exception):
"""This exception is raised when no filter matched."""
pass
class FilterMatchNotExecutable(Exception):
"""Raised when a filter matched but no executable was found."""
def __init__(self, match=None, **kwargs):
self.match = match
class RootwrapConfig(object):
def __init__(self, config):
# filters_path
self.filters_path = config.get("DEFAULT", "filters_path").split(",")
# exec_dirs
if config.has_option("DEFAULT", "exec_dirs"):
self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",")
else:
self.exec_dirs = []
# Use system PATH if exec_dirs is not specified
if "PATH" in os.environ:
self.exec_dirs = os.environ['PATH'].split(':')
# syslog_log_facility
if config.has_option("DEFAULT", "syslog_log_facility"):
v = config.get("DEFAULT", "syslog_log_facility")
facility_names = logging.handlers.SysLogHandler.facility_names
self.syslog_log_facility = getattr(logging.handlers.SysLogHandler,
v, None)
if self.syslog_log_facility is None and v in facility_names:
self.syslog_log_facility = facility_names.get(v)
if self.syslog_log_facility is None:
raise ValueError('Unexpected syslog_log_facility: %s' % v)
else:
default_facility = logging.handlers.SysLogHandler.LOG_SYSLOG
self.syslog_log_facility = default_facility
# syslog_log_level
if config.has_option("DEFAULT", "syslog_log_level"):
v = config.get("DEFAULT", "syslog_log_level")
self.syslog_log_level = logging.getLevelName(v.upper())
if (self.syslog_log_level == "Level %s" % v.upper()):
raise ValueError('Unexepected syslog_log_level: %s' % v)
else:
self.syslog_log_level = logging.ERROR
# use_syslog
if config.has_option("DEFAULT", "use_syslog"):
self.use_syslog = config.getboolean("DEFAULT", "use_syslog")
else:
self.use_syslog = False
def setup_syslog(execname, facility, level):
rootwrap_logger = logging.getLogger()
rootwrap_logger.setLevel(level)
handler = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
handler.setFormatter(logging.Formatter(
os.path.basename(execname) + ': %(message)s'))
rootwrap_logger.addHandler(handler)
def build_filter(class_name, *args):
"""Returns a filter object of class class_name."""
if not hasattr(filters, class_name):
logging.warning("Skipping unknown filter class (%s) specified "
"in filter definitions" % class_name)
return None
filterclass = getattr(filters, class_name)
return filterclass(*args)
def load_filters(filters_path):
"""Load filters from a list of directories."""
filterlist = []
for filterdir in filters_path:
if not os.path.isdir(filterdir):
continue
for filterfile in filter(lambda f: not f.startswith('.'),
os.listdir(filterdir)):
filterconfig = moves.configparser.RawConfigParser()
filterconfig.read(os.path.join(filterdir, filterfile))
for (name, value) in filterconfig.items("Filters"):
filterdefinition = [string.strip(s) for s in value.split(',')]
newfilter = build_filter(*filterdefinition)
if newfilter is None:
continue
newfilter.name = name
filterlist.append(newfilter)
return filterlist
def match_filter(filter_list, userargs, exec_dirs=[]):
"""Checks user command and arguments through command filters.
Returns the first matching filter.
Raises NoFilterMatched if no filter matched.
Raises FilterMatchNotExecutable if no executable was found for the
best filter match.
"""
first_not_executable_filter = None
for f in filter_list:
if f.match(userargs):
if isinstance(f, filters.ChainingFilter):
# This command calls exec verify that remaining args
# matches another filter.
def non_chain_filter(fltr):
return (fltr.run_as == f.run_as
and not isinstance(fltr, filters.ChainingFilter))
leaf_filters = [fltr for fltr in filter_list
if non_chain_filter(fltr)]
args = f.exec_args(userargs)
if (not args or not match_filter(leaf_filters,
args, exec_dirs=exec_dirs)):
continue
# Try other filters if executable is absent
if not f.get_exec(exec_dirs=exec_dirs):
if not first_not_executable_filter:
first_not_executable_filter = f
continue
# Otherwise return matching filter for execution
return f
if first_not_executable_filter:
# A filter matched, but no executable was found for it
raise FilterMatchNotExecutable(match=first_not_executable_filter)
# No filter matched
raise NoFilterMatched()

View File

@ -1,306 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A remote procedure call (rpc) abstraction.
For some wrappers that add message versioning to rpc, see:
rpc.dispatcher
rpc.proxy
"""
import inspect
from oslo.config import cfg
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import importutils
from nova.openstack.common import local
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
rpc_opts = [
cfg.StrOpt('rpc_backend',
default='%s.impl_kombu' % __package__,
help="The messaging module to use, defaults to kombu."),
cfg.IntOpt('rpc_thread_pool_size',
default=64,
help='Size of RPC thread pool'),
cfg.IntOpt('rpc_conn_pool_size',
default=30,
help='Size of RPC connection pool'),
cfg.IntOpt('rpc_response_timeout',
default=60,
help='Seconds to wait for a response from call or multicall'),
cfg.IntOpt('rpc_cast_timeout',
default=30,
help='Seconds to wait before a cast expires (TTL). '
'Only supported by impl_zmq.'),
cfg.ListOpt('allowed_rpc_exception_modules',
default=['nova.exception',
'cinder.exception',
'exceptions',
],
help='Modules of exceptions that are permitted to be recreated'
'upon receiving exception data from an rpc call.'),
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
cfg.StrOpt('control_exchange',
default='openstack',
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
CONF = cfg.CONF
CONF.register_opts(rpc_opts)
def set_defaults(control_exchange):
cfg.set_defaults(rpc_opts,
control_exchange=control_exchange)
def create_connection(new=True):
"""Create a connection to the message bus used for rpc.
For some example usage of creating a connection and some consumers on that
connection, see nova.service.
:param new: Whether or not to create a new connection. A new connection
will be created by default. If new is False, the
implementation is free to return an existing connection from a
pool.
:returns: An instance of openstack.common.rpc.common.Connection
"""
return _get_impl().create_connection(CONF, new=new)
def _check_for_lock():
if not CONF.debug:
return None
if ((hasattr(local.strong_store, 'locks_held')
and local.strong_store.locks_held)):
stack = ' :: '.join([frame[3] for frame in inspect.stack()])
LOG.warn(_('A RPC is being made while holding a lock. The locks '
'currently held are %(locks)s. This is probably a bug. '
'Please report it. Include the following: [%(stack)s].'),
{'locks': local.strong_store.locks_held,
'stack': stack})
return True
return False
def call(context, topic, msg, timeout=None, check_for_lock=False):
"""Invoke a remote method that returns something.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:param check_for_lock: if True, a warning is emitted if a RPC call is made
with a lock held.
:returns: A dict from the remote method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
if check_for_lock:
_check_for_lock()
return _get_impl().call(CONF, context, topic, msg, timeout)
def cast(context, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast(CONF, context, topic, msg)
def fanout_cast(context, topic, msg):
"""Broadcast a remote method invocation with no return.
This method will get invoked on all consumers that were set up with this
topic name and fanout=True.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=True.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast(CONF, context, topic, msg)
def multicall(context, topic, msg, timeout=None, check_for_lock=False):
"""Invoke a remote method and get back an iterator.
In this case, the remote method will be returning multiple values in
separate messages, so the return values can be processed as the come in via
an iterator.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:param check_for_lock: if True, a warning is emitted if a RPC call is made
with a lock held.
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
an index that starts at 0 and increases by one for each value
returned and X is the Nth value that was returned by the remote
method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
if check_for_lock:
_check_for_lock()
return _get_impl().multicall(CONF, context, topic, msg, timeout)
def notify(context, topic, msg, envelope=False):
"""Send notification event.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the notification to.
:param msg: This is a dict of content of event.
:param envelope: Set to True to enable message envelope for notifications.
:returns: None
"""
return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
def cleanup():
"""Clean up resoruces in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get torn down cleanly.
:returns: None
"""
return _get_impl().cleanup()
def cast_to_server(context, server_params, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast_to_server(CONF, context, server_params, topic,
msg)
def fanout_cast_to_server(context, server_params, topic, msg):
"""Broadcast to a remote method invocation with no return.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast_to_server(CONF, context, server_params,
topic, msg)
def queue_get_for(context, topic, host):
"""Get a queue name for a given topic + host.
This function only works if this naming convention is followed on the
consumer side, as well. For example, in nova, every instance of the
nova-foo service calls create_consumer() for two topics:
foo
foo.<host>
Messages sent to the 'foo' topic are distributed to exactly one instance of
the nova-foo service. The services are chosen in a round-robin fashion.
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
<host>.
"""
return '%s.%s' % (topic, host) if host else topic
_RPCIMPL = None
def _get_impl():
"""Delay import of rpc_backend until configuration is loaded."""
global _RPCIMPL
if _RPCIMPL is None:
try:
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
except ImportError:
# For backwards compatibility with older oslo.config.
impl = CONF.rpc_backend.replace('nova.rpc',
'nova.openstack.common.rpc')
_RPCIMPL = importutils.import_module(impl)
return _RPCIMPL

View File

@ -1,636 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
from oslo.config import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
amqp_opts = [
cfg.BoolOpt('amqp_durable_queues',
default=False,
deprecated_name='rabbit_durable_queues',
deprecated_group='DEFAULT',
help='Use durable queues in amqp.'),
cfg.BoolOpt('amqp_auto_delete',
default=False,
help='Auto-delete queues in amqp.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the tearDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the create_connection() caller.
This is essentially a wrapper around Connection that supports 'with'.
It can also return a new Connection, or one from a pool.
The function will also catch when an instance of this class is to be
deleted. With that we can return Connections to the pool on exceptions
and so forth without making the caller be responsible for catching them.
If possible the function makes sure to return a connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool."""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self."""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
ack_on_error=True):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name,
ack_on_error)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance."""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
"""Connection class for RPC replies / callbacks."""
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
', message : %(data)s'), {'msg_id': msg_id,
'data': message_data})
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
msg = {'result': reply, 'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
if isinstance(context, dict):
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.iteritems()])
else:
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager.
Used by the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback.
Allows it to be invoked in a green thread.
"""
def __init__(self, conf, callback, connection_pool,
wait_for_consumers=False):
"""Initiates CallbackWrapper object.
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
:param wait_for_consumers: wait for all green threads to
complete and raise the last
caught exception, if any.
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
self.wait_for_consumers = wait_for_consumers
self.exc_info = None
def _wrap(self, message_data, **kwargs):
"""Wrap the callback invocation to catch exceptions.
"""
try:
self.callback(message_data, **kwargs)
except Exception:
self.exc_info = sys.exc_info()
def __call__(self, message_data):
self.exc_info = None
self.pool.spawn_n(self._wrap, message_data)
if self.wait_for_consumers:
self.pool.waitall()
if self.exc_info:
raise self.exc_info[1], None, self.exc_info[2]
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version')
namespace = message_data.get('namespace')
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method,
namespace, args)
def _process_data(self, ctxt, version, method, namespace, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, namespace,
**args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
class MulticallProxyWaiter(object):
def __init__(self, conf, msg_id, timeout, connection_pool):
self._msg_id = msg_id
self._timeout = timeout or conf.rpc_response_timeout
self._reply_proxy = connection_pool.reply_proxy
self._done = False
self._got_ending = False
self._conf = conf
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
def done(self):
if self._done:
return
self._done = True
# Remove this caller from reply proxy's call_waiters
self._reply_proxy.del_call_waiter(self._msg_id)
def _process_data(self, data):
result = None
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
result = data['result']
return result
def __iter__(self):
"""Return a result until we get a reply with an 'ending' flag."""
if self._done:
raise StopIteration
while True:
try:
data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data)
except queue.Empty:
self.done()
raise rpc_common.Timeout()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection."""
return ConnectionContext(conf, connection_pool, pooled=not new)
_reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:
msg = rpc_common.serialize_msg(msg)
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
return conf.control_exchange

View File

@ -1,521 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import sys
import traceback
from oslo.config import cfg
import six
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import local
from nova.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
'''RPC Envelope Version.
This version number applies to the top level structure of messages sent out.
It does *not* apply to the message payload, which must be versioned
independently. For example, when using rpc APIs, a version number is applied
for changes to the API being exposed over rpc. This version number is handled
in the rpc proxy and dispatcher modules.
This version number applies to the message envelope that is used in the
serialization done inside the rpc layer. See serialize_msg() and
deserialize_msg().
The current message format (version 2.0) is very simple. It is:
{
'oslo.version': <RPC Envelope Version as a String>,
'oslo.message': <Application Message Payload, JSON encoded>
}
Message format version '1.0' is just considered to be the messages we sent
without a message envelope.
So, the current message envelope just includes the envelope version. It may
eventually contain additional information, such as a signature for the message
payload.
We will JSON encode the application message payload. The message envelope,
which includes the JSON encoded application message body, will be passed down
to the messaging libraries as a dict.
'''
_RPC_ENVELOPE_VERSION = '2.0'
_VERSION_KEY = 'oslo.version'
_MESSAGE_KEY = 'oslo.message'
_REMOTE_POSTFIX = '_Remote'
class RPCException(Exception):
msg_fmt = _("An unknown RPC related exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.msg_fmt
super(RPCException, self).__init__(message)
class RemoteError(RPCException):
"""Signifies that a remote class has raised an exception.
Contains a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevant info.
"""
msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
def __init__(self, exc_type=None, value=None, traceback=None):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__(exc_type=exc_type,
value=value,
traceback=traceback)
class Timeout(RPCException):
"""Signifies that a timeout has occurred.
This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side.
"""
msg_fmt = _('Timeout while waiting on RPC response - '
'topic: "%(topic)s", RPC method: "%(method)s" '
'info: "%(info)s"')
def __init__(self, info=None, topic=None, method=None):
"""Initiates Timeout object.
:param info: Extra info to convey to the user
:param topic: The topic that the rpc call was sent to
:param rpc_method_name: The name of the rpc method being
called
"""
self.info = info
self.topic = topic
self.method = method
super(Timeout, self).__init__(
None,
info=info or _('<unknown>'),
topic=topic or _('<unknown>'),
method=method or _('<unknown>'))
class DuplicateMessageError(RPCException):
msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
class InvalidRPCConnectionReuse(RPCException):
msg_fmt = _("Invalid reuse of an RPC connection.")
class UnsupportedRpcVersion(RPCException):
msg_fmt = _("Specified RPC version, %(version)s, not supported by "
"this endpoint.")
class UnsupportedRpcEnvelopeVersion(RPCException):
msg_fmt = _("Specified RPC envelope version, %(version)s, "
"not supported by this endpoint.")
class RpcVersionCapError(RPCException):
msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
class Connection(object):
"""A connection, returned by rpc.create_connection().
This class represents a connection to the message bus used for rpc.
An instance of this class should never be created by users of the rpc API.
Use rpc.create_connection() instead.
"""
def close(self):
"""Close the connection.
This method must be called when the connection will no longer be used.
It will ensure that any resources associated with the connection, such
as a network connection, and cleaned up.
"""
raise NotImplementedError()
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer on this connection.
A consumer is associated with a message queue on the backend message
bus. The consumer will read messages from the queue, unpack them, and
dispatch them to the proxy object. The contents of the message pulled
off of the queue will determine which method gets called on the proxy
object.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic. For example, all instances of nova-compute consume
from a queue called "compute". In that case, the
messages will get distributed amongst the consumers in a
round-robin fashion if fanout=False. If fanout=True,
every consumer associated with this topic will get a
copy of every message.
:param proxy: The object that will handle all incoming messages.
:param fanout: Whether or not this is a fanout topic. See the
documentation for the topic parameter for some
additional comments on this.
"""
raise NotImplementedError()
def create_worker(self, topic, proxy, pool_name):
"""Create a worker on this connection.
A worker is like a regular consumer of messages directed to a
topic, except that it is part of a set of such consumers (the
"pool") which may run in parallel. Every pool of workers will
receive a given message, but only one worker in the pool will
be asked to process it. Load is distributed across the members
of the pool in round-robin fashion.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic.
:param proxy: The object that will handle all incoming messages.
:param pool_name: String containing the name of the pool of workers
"""
raise NotImplementedError()
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
"""Register as a member of a group of consumers.
Uses given topic from the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
:param callback: Callable to be invoked for each message.
:type callback: callable accepting one argument
:param pool_name: The name of the consumer pool.
:type pool_name: str
:param topic: The routing topic for desired messages.
:type topic: str
:param exchange_name: The name of the message exchange where
the client should attach. Defaults to
the configured exchange.
:type exchange_name: str
"""
raise NotImplementedError()
def consume_in_thread(self):
"""Spawn a thread to handle incoming messages.
Spawn a thread that will be responsible for handling all incoming
messages for consumers that were set up on this connection.
Message dispatching inside of this is expected to be implemented in a
non-blocking manner. An example implementation would be having this
thread pull messages in for all of the consumers, but utilize a thread
pool for dispatching the messages to the proxy objects.
"""
raise NotImplementedError()
def _safe_log(log_func, msg, msg_data):
"""Sanitizes the msg_data field before logging."""
SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass']
def _fix_passwords(d):
"""Sanitizes the password fields in the dictionary."""
for k in d.iterkeys():
if k.lower().find('password') != -1:
d[k] = '<SANITIZED>'
elif k.lower() in SANITIZE:
d[k] = '<SANITIZED>'
elif isinstance(d[k], dict):
_fix_passwords(d[k])
return d
return log_func(msg, _fix_passwords(copy.deepcopy(msg_data)))
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = str(failure.__class__.__name__)
mod_name = str(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
def deserialize_remote_exception(conf, data):
failure = jsonutils.loads(str(data))
trace = failure.get('tb', [])
message = failure.get('message', "") + "\n" + "\n".join(trace)
name = failure.get('class')
module = failure.get('module')
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
if module not in conf.allowed_rpc_exception_modules:
return RemoteError(name, failure.get('message'), trace)
try:
mod = importutils.import_module(module)
klass = getattr(mod, name)
if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions")
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError):
return RemoteError(name, failure.get('message'), trace)
ex_type = type(failure)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
{'__str__': str_override, '__unicode__': str_override})
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
try:
# NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined
# Exceptions and not core python exceptions. This is important because
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
return failure
class CommonRpcContext(object):
def __init__(self, **kwargs):
self.values = kwargs
def __getattr__(self, key):
try:
return self.values[key]
except KeyError:
raise AttributeError(key)
def to_dict(self):
return copy.deepcopy(self.values)
@classmethod
def from_dict(cls, values):
return cls(**values)
def deepcopy(self):
return self.from_dict(self.to_dict())
def update_store(self):
local.store.context = self
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
# TODO(russellb) This method is a bit of a nova-ism. It makes
# some assumptions about the data in the request context sent
# across rpc, while the rest of this class does not. We could get
# rid of this if we changed the nova code that uses this to
# convert the RpcContext back to its native RequestContext doing
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
context = self.deepcopy()
context.values['is_admin'] = True
context.values.setdefault('roles', [])
if 'admin' not in context.values['roles']:
context.values['roles'].append('admin')
if read_deleted is not None:
context.values['read_deleted'] = read_deleted
return context
class ClientException(Exception):
"""Encapsulates actual exception expected to be hit by a RPC proxy object.
Merely instantiating it records the current exception information, which
will be passed back to the RPC client without exceptional logging.
"""
def __init__(self):
self._exc_info = sys.exc_info()
def catch_client_exception(exceptions, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if type(e) in exceptions:
raise ClientException()
else:
raise
def client_exceptions(*exceptions):
"""Decorator for manager methods that raise expected exceptions.
Marking a Manager method with this decorator allows the declaration
of expected exceptions that the RPC layer should not consider fatal,
and not log as if they were generated in a real error scenario. Note
that this will cause listed exceptions to be wrapped in a
ClientException, which is used internally by the RPC layer.
"""
def outer(func):
def inner(*args, **kwargs):
return catch_client_exception(exceptions, func, *args, **kwargs)
return inner
return outer
def version_is_compatible(imp_version, version):
"""Determine whether versions are compatible.
:param imp_version: The version implemented
:param version: The version requested by an incoming message.
"""
version_parts = version.split('.')
imp_version_parts = imp_version.split('.')
try:
rev = version_parts[2]
except IndexError:
rev = 0
try:
imp_rev = imp_version_parts[2]
except IndexError:
imp_rev = 0
if int(version_parts[0]) != int(imp_version_parts[0]): # Major
return False
if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
return False
if (int(version_parts[1]) == int(imp_version_parts[1]) and
int(rev) > int(imp_rev)): # Revision
return False
return True
def serialize_msg(raw_msg):
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
# information about this format.
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
return msg
def deserialize_msg(msg):
# NOTE(russellb): Hang on to your hats, this road is about to
# get a little bumpy.
#
# Robustness Principle:
# "Be strict in what you send, liberal in what you accept."
#
# At this point we have to do a bit of guessing about what it
# is we just received. Here is the set of possibilities:
#
# 1) We received a dict. This could be 2 things:
#
# a) Inspect it to see if it looks like a standard message envelope.
# If so, great!
#
# b) If it doesn't look like a standard message envelope, it could either
# be a notification, or a message from before we added a message
# envelope (referred to as version 1.0).
# Just return the message as-is.
#
# 2) It's any other non-dict type. Just return it and hope for the best.
# This case covers return values from rpc.call() from before message
# envelopes were used. (messages to call a method were always a dict)
if not isinstance(msg, dict):
# See #2 above.
return msg
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
if not all(map(lambda key: key in msg, base_envelope_keys)):
# See #1.b above.
return msg
# At this point we think we have the message envelope
# format we were expecting. (#1.a above)
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
return raw_msg

View File

@ -1,178 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Code for rpc message dispatching.
Messages that come in have a version number associated with them. RPC API
version numbers are in the form:
Major.Minor
For a given message with version X.Y, the receiver must be marked as able to
handle messages of version A.B, where:
A = X
B >= Y
The Major version number would be incremented for an almost completely new API.
The Minor version number would be incremented for backwards compatible changes
to an existing API. A backwards compatible change could be something like
adding a new method, adding an argument to an existing method (but not
requiring it), or changing the type for an existing argument (but still
handling the old type as well).
The conversion over to a versioned API must be done on both the client side and
server side of the API at the same time. However, as the code stands today,
there can be both versioned and unversioned APIs implemented in the same code
base.
EXAMPLES
========
Nova was the first project to use versioned rpc APIs. Consider the compute rpc
API as an example. The client side is in nova/compute/rpcapi.py and the server
side is in nova/compute/manager.py.
Example 1) Adding a new method.
-------------------------------
Adding a new method is a backwards compatible change. It should be added to
nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should
have a specific version specified to indicate the minimum API version that must
be implemented for the method to be supported. For example::
def get_host_uptime(self, ctxt, host):
topic = _compute_topic(self.topic, ctxt, host, None)
return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
version='1.1')
In this case, version '1.1' is the first version that supported the
get_host_uptime() method.
Example 2) Adding a new parameter.
----------------------------------
Adding a new parameter to an rpc method can be made backwards compatible. The
RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
The implementation of the method must not expect the parameter to be present.::
def some_remote_method(self, arg1, arg2, newarg=None):
# The code needs to deal with newarg=None for cases
# where an older client sends a message without it.
pass
On the client side, the same changes should be made as in example 1. The
minimum version that supports the new parameter should be specified.
"""
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common.rpc import serializer as rpc_serializer
class RpcDispatcher(object):
"""Dispatch rpc messages according to the requested API version.
This class can be used as the top level 'manager' for a service. It
contains a list of underlying managers that have an API_VERSION attribute.
"""
def __init__(self, callbacks, serializer=None):
"""Initialize the rpc dispatcher.
:param callbacks: List of proxy objects that are an instance
of a class with rpc methods exposed. Each proxy
object should have an RPC_API_VERSION attribute.
:param serializer: The Serializer object that will be used to
deserialize arguments before the method call and
to serialize the result after it returns.
"""
self.callbacks = callbacks
if serializer is None:
serializer = rpc_serializer.NoOpSerializer()
self.serializer = serializer
super(RpcDispatcher, self).__init__()
def _deserialize_args(self, context, kwargs):
"""Helper method called to deserialize args before dispatch.
This calls our serializer on each argument, returning a new set of
args that have been deserialized.
:param context: The request context
:param kwargs: The arguments to be deserialized
:returns: A new set of deserialized args
"""
new_kwargs = dict()
for argname, arg in kwargs.iteritems():
new_kwargs[argname] = self.serializer.deserialize_entity(context,
arg)
return new_kwargs
def dispatch(self, ctxt, version, method, namespace, **kwargs):
"""Dispatch a message based on a requested version.
:param ctxt: The request context
:param version: The requested API version from the incoming message
:param method: The method requested to be called by the incoming
message.
:param namespace: The namespace for the requested method. If None,
the dispatcher will look for a method on a callback
object with no namespace set.
:param kwargs: A dict of keyword arguments to be passed to the method.
:returns: Whatever is returned by the underlying method that gets
called.
"""
if not version:
version = '1.0'
had_compatible = False
for proxyobj in self.callbacks:
# Check for namespace compatibility
try:
cb_namespace = proxyobj.RPC_API_NAMESPACE
except AttributeError:
cb_namespace = None
if namespace != cb_namespace:
continue
# Check for version compatibility
try:
rpc_api_version = proxyobj.RPC_API_VERSION
except AttributeError:
rpc_api_version = '1.0'
is_compatible = rpc_common.version_is_compatible(rpc_api_version,
version)
had_compatible = had_compatible or is_compatible
if not hasattr(proxyobj, method):
continue
if is_compatible:
kwargs = self._deserialize_args(ctxt, kwargs)
result = getattr(proxyobj, method)(ctxt, **kwargs)
return self.serializer.serialize_entity(ctxt, result)
if had_compatible:
raise AttributeError("No such RPC function '%s'" % method)
else:
raise rpc_common.UnsupportedRpcVersion(version=version)

View File

@ -1,195 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fake RPC implementation which calls proxy methods directly with no
queues. Casts will block, but this is very useful for tests.
"""
import inspect
# NOTE(russellb): We specifically want to use json, not our own jsonutils.
# jsonutils has some extra logic to automatically convert objects to primitive
# types so that they can be serialized. We want to catch all cases where
# non-primitive types make it into this code and treat it as an error.
import json
import time
import eventlet
from nova.openstack.common.rpc import common as rpc_common
CONSUMERS = {}
class RpcContext(rpc_common.CommonRpcContext):
def __init__(self, **kwargs):
super(RpcContext, self).__init__(**kwargs)
self._response = []
self._done = False
def deepcopy(self):
values = self.to_dict()
new_inst = self.__class__(**values)
new_inst._response = self._response
new_inst._done = self._done
return new_inst
def reply(self, reply=None, failure=None, ending=False):
if ending:
self._done = True
if not self._done:
self._response.append((reply, failure))
class Consumer(object):
def __init__(self, topic, proxy):
self.topic = topic
self.proxy = proxy
def call(self, context, version, method, namespace, args, timeout):
done = eventlet.event.Event()
def _inner():
ctxt = RpcContext.from_dict(context.to_dict())
try:
rval = self.proxy.dispatch(context, version, method,
namespace, **args)
res = []
# Caller might have called ctxt.reply() manually
for (reply, failure) in ctxt._response:
if failure:
raise failure[0], failure[1], failure[2]
res.append(reply)
# if ending not 'sent'...we might have more data to
# return from the function itself
if not ctxt._done:
if inspect.isgenerator(rval):
for val in rval:
res.append(val)
else:
res.append(rval)
done.send(res)
except rpc_common.ClientException as e:
done.send_exception(e._exc_info[1])
except Exception as e:
done.send_exception(e)
thread = eventlet.greenthread.spawn(_inner)
if timeout:
start_time = time.time()
while not done.ready():
eventlet.greenthread.sleep(1)
cur_time = time.time()
if (cur_time - start_time) > timeout:
thread.kill()
raise rpc_common.Timeout()
return done.wait()
class Connection(object):
"""Connection object."""
def __init__(self):
self.consumers = []
def create_consumer(self, topic, proxy, fanout=False):
consumer = Consumer(topic, proxy)
self.consumers.append(consumer)
if topic not in CONSUMERS:
CONSUMERS[topic] = []
CONSUMERS[topic].append(consumer)
def close(self):
for consumer in self.consumers:
CONSUMERS[consumer.topic].remove(consumer)
self.consumers = []
def consume_in_thread(self):
pass
def create_connection(conf, new=True):
"""Create a connection."""
return Connection()
def check_serialize(msg):
"""Make sure a message intended for rpc can be serialized."""
json.dumps(msg)
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
check_serialize(msg)
method = msg.get('method')
if not method:
return
args = msg.get('args', {})
version = msg.get('version', None)
namespace = msg.get('namespace', None)
try:
consumer = CONSUMERS[topic][0]
except (KeyError, IndexError):
raise rpc_common.Timeout("No consumers available")
else:
return consumer.call(context, version, method, namespace, args,
timeout)
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg):
check_serialize(msg)
try:
call(conf, context, topic, msg)
except Exception:
pass
def notify(conf, context, topic, msg, envelope):
check_serialize(msg)
def cleanup():
pass
def fanout_cast(conf, context, topic, msg):
"""Cast to all consumers of a topic."""
check_serialize(msg)
method = msg.get('method')
if not method:
return
args = msg.get('args', {})
version = msg.get('version', None)
namespace = msg.get('namespace', None)
for consumer in CONSUMERS.get(topic, []):
try:
consumer.call(context, version, method, namespace, args, None)
except Exception:
pass

View File

@ -1,856 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import socket
import ssl
import time
import uuid
import eventlet
import greenlet
import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
from oslo.config import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import network_utils
from nova.openstack.common.rpc import amqp as rpc_amqp
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import sslutils
kombu_opts = [
cfg.StrOpt('kombu_ssl_version',
default='',
help='SSL version to use (valid only if SSL enabled). '
'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may '
'be available on some distributions'
),
cfg.StrOpt('kombu_ssl_keyfile',
default='',
help='SSL key file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_certfile',
default='',
help='SSL cert file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_ca_certs',
default='',
help=('SSL certification authority file '
'(valid only if SSL enabled)')),
cfg.StrOpt('rabbit_host',
default='localhost',
help='The RabbitMQ broker address where a single node is used'),
cfg.IntOpt('rabbit_port',
default=5672,
help='The RabbitMQ broker port where a single node is used'),
cfg.ListOpt('rabbit_hosts',
default=['$rabbit_host:$rabbit_port'],
help='RabbitMQ HA cluster host:port pairs'),
cfg.BoolOpt('rabbit_use_ssl',
default=False,
help='connect over SSL for RabbitMQ'),
cfg.StrOpt('rabbit_userid',
default='guest',
help='the RabbitMQ userid'),
cfg.StrOpt('rabbit_password',
default='guest',
help='the RabbitMQ password',
secret=True),
cfg.StrOpt('rabbit_virtual_host',
default='/',
help='the RabbitMQ virtual host'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='how frequently to retry connecting with RabbitMQ'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
help='how long to backoff for between retries when connecting '
'to RabbitMQ'),
cfg.IntOpt('rabbit_max_retries',
default=0,
help='maximum retries with trying to connect to RabbitMQ '
'(the default of 0 implies an infinite retry count)'),
cfg.BoolOpt('rabbit_ha_queues',
default=False,
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
'You need to wipe RabbitMQ database when '
'changing this option.'),
]
cfg.CONF.register_opts(kombu_opts)
LOG = rpc_common.LOG
def _get_queue_arguments(conf):
"""Construct the arguments for declaring a queue.
If the rabbit_ha_queues option is set, we declare a mirrored queue
as described here:
http://www.rabbitmq.com/ha.html
Setting x-ha-policy to all means that the queue will be mirrored
to all nodes in the cluster.
"""
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, channel, callback, tag, **kwargs):
"""Declare a queue on an amqp channel.
'channel' is the amqp channel to use
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
queue name, exchange name, and other kombu options are
passed in here as a dictionary.
"""
self.callback = callback
self.tag = str(tag)
self.kwargs = kwargs
self.queue = None
self.ack_on_error = kwargs.get('ack_on_error', True)
self.reconnect(channel)
def reconnect(self, channel):
"""Re-declare the queue after a rabbit reconnect."""
self.channel = channel
self.kwargs['channel'] = channel
self.queue = kombu.entity.Queue(**self.kwargs)
self.queue.declare()
def _callback_handler(self, message, callback):
"""Call callback with deserialized message.
Messages that are processed without exception are ack'ed.
If the message processing generates an exception, it will be
ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
"""
try:
msg = rpc_common.deserialize_msg(message.payload)
callback(msg)
except Exception:
if self.ack_on_error:
LOG.exception(_("Failed to process message"
" ... skipping it."))
message.ack()
else:
LOG.exception(_("Failed to process message"
" ... will requeue."))
message.requeue()
else:
message.ack()
def consume(self, *args, **kwargs):
"""Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the
Connection.iterconsume() iterator will process the messages,
calling the appropriate callback.
If a callback is specified in kwargs, use that. Otherwise,
use the callback passed during __init__()
If kwargs['nowait'] is True, then this call will block until
a message is read.
"""
options = {'consumer_tag': self.tag}
options['nowait'] = kwargs.get('nowait', False)
callback = kwargs.get('callback', self.callback)
if not callback:
raise ValueError("No callback defined")
def _callback(raw_message):
message = self.channel.message_to_python(raw_message)
self._callback_handler(message, callback)
self.queue.consume(*args, callback=_callback, **options)
def cancel(self):
"""Cancel the consuming from the queue, if it has started."""
try:
self.queue.cancel(self.tag)
except KeyError as e:
# NOTE(comstud): Kludge to get around a amqplib bug
if str(e) != "u'%s'" % self.tag:
raise
self.queue = None
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
"""Init a 'direct' queue.
'channel' is the amqp channel to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=msg_id,
type='direct',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(DirectConsumer, self).__init__(channel,
callback,
tag,
name=msg_id,
exchange=exchange,
routing_key=msg_id,
**options)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, channel, topic, callback, tag, name=None,
exchange_name=None, **kwargs):
"""Init a 'topic' queue.
:param channel: the amqp channel to use
:param topic: the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param tag: a unique ID for the consumer on the channel
:param name: optional queue name, defaults to topic
:paramtype name: str
Other kombu options may be passed as keyword arguments
"""
# Default options
options = {'durable': conf.amqp_durable_queues,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': conf.amqp_auto_delete,
'exclusive': False}
options.update(kwargs)
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
exchange = kombu.entity.Exchange(name=exchange_name,
type='topic',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(TopicConsumer, self).__init__(channel,
callback,
tag,
name=name or topic,
exchange=exchange,
routing_key=topic,
**options)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
"""Init a 'fanout' queue.
'channel' is the amqp channel to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
unique = uuid.uuid4().hex
exchange_name = '%s_fanout' % topic
queue_name = '%s_fanout_%s' % (topic, unique)
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(FanoutConsumer, self).__init__(channel, callback, tag,
name=queue_name,
exchange=exchange,
routing_key=topic,
**options)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, channel, exchange_name, routing_key, **kwargs):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.exchange_name = exchange_name
self.routing_key = routing_key
self.kwargs = kwargs
self.reconnect(channel)
def reconnect(self, channel):
"""Re-establish the Producer after a rabbit reconnection."""
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
**self.kwargs)
self.producer = kombu.messaging.Producer(exchange=self.exchange,
channel=channel,
routing_key=self.routing_key)
def send(self, msg, timeout=None):
"""Send a message."""
if timeout:
#
# AMQP TTL is in milliseconds when set in the header.
#
self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
else:
self.producer.publish(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, channel, msg_id, **kwargs):
"""init a 'direct' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
type='direct', **options)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'topic' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': conf.amqp_durable_queues,
'auto_delete': conf.amqp_auto_delete,
'exclusive': False}
options.update(kwargs)
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(channel,
exchange_name,
topic,
type='topic',
**options)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'fanout' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
None, type='fanout', **options)
class NotifyPublisher(TopicPublisher):
"""Publisher class for 'notify'."""
def __init__(self, conf, channel, topic, **kwargs):
self.durable = kwargs.pop('durable', conf.amqp_durable_queues)
self.queue_arguments = _get_queue_arguments(conf)
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
def reconnect(self, channel):
super(NotifyPublisher, self).reconnect(channel)
# NOTE(jerdfelt): Normally the consumer would create the queue, but
# we do this to ensure that messages don't get dropped if the
# consumer is started after we do
queue = kombu.entity.Queue(channel=channel,
exchange=self.exchange,
durable=self.durable,
name=self.routing_key,
routing_key=self.routing_key,
queue_arguments=self.queue_arguments)
queue.declare()
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
self.consumers = []
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
self.max_retries = self.conf.rabbit_max_retries
# Try forever?
if self.max_retries <= 0:
self.max_retries = None
self.interval_start = self.conf.rabbit_retry_interval
self.interval_stepping = self.conf.rabbit_retry_backoff
# max retry-interval = 30 seconds
self.interval_max = 30
self.memory_transport = False
if server_params is None:
server_params = {}
# Keys to translate from server_params to kombu params
server_params_to_kombu_params = {'username': 'userid'}
ssl_params = self._fetch_ssl_params()
params_list = []
for adr in self.conf.rabbit_hosts:
hostname, port = network_utils.parse_host_port(
adr, default_port=self.conf.rabbit_port)
params = {
'hostname': hostname,
'port': port,
'userid': self.conf.rabbit_userid,
'password': self.conf.rabbit_password,
'virtual_host': self.conf.rabbit_virtual_host,
}
for sp_key, value in server_params.iteritems():
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
if self.conf.fake_rabbit:
params['transport'] = 'memory'
if self.conf.rabbit_use_ssl:
params['ssl'] = ssl_params
params_list.append(params)
self.params_list = params_list
self.memory_transport = self.conf.fake_rabbit
self.connection = None
self.reconnect()
def _fetch_ssl_params(self):
"""Handles fetching what ssl params should be used for the connection
(if any).
"""
ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
if self.conf.kombu_ssl_version:
ssl_params['ssl_version'] = sslutils.validate_ssl_version(
self.conf.kombu_ssl_version)
if self.conf.kombu_ssl_keyfile:
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
if self.conf.kombu_ssl_certfile:
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
if self.conf.kombu_ssl_ca_certs:
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
# We might want to allow variations in the
# future with this?
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
# Return the extended behavior or just have the default behavior
return ssl_params or True
def _connect(self, params):
"""Connect to rabbit. Re-establish any queues that may have
been declared before if we are reconnecting. Exceptions should
be handled by the caller.
"""
if self.connection:
LOG.info(_("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params)
try:
self.connection.release()
except self.connection_errors:
pass
# Setting this in case the next statement fails, though
# it shouldn't be doing any network operations, yet.
self.connection = None
self.connection = kombu.connection.BrokerConnection(**params)
self.connection_errors = self.connection.connection_errors
if self.memory_transport:
# Kludge to speed up tests.
self.connection.transport.polling_interval = 0.0
self.consumer_num = itertools.count(1)
self.connection.connect()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
params)
def reconnect(self):
"""Handles reconnecting and re-establishing queues.
Will retry up to self.max_retries number of times.
self.max_retries = 0 means to retry forever.
Sleep between tries, starting at self.interval_start
seconds, backing off self.interval_stepping number of seconds
each attempt.
"""
attempt = 0
while True:
params = self.params_list[attempt % len(self.params_list)]
attempt += 1
try:
self._connect(params)
return
except (IOError, self.connection_errors) as e:
pass
except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
log_info = {}
log_info['err_str'] = str(e)
log_info['max_retries'] = self.max_retries
log_info.update(params)
if self.max_retries and attempt == self.max_retries:
msg = _('Unable to connect to AMQP server on '
'%(hostname)s:%(port)d after %(max_retries)d '
'tries: %(err_str)s') % log_info
LOG.error(msg)
raise rpc_common.RPCException(msg)
if attempt == 1:
sleep_time = self.interval_start or 1
elif attempt > 1:
sleep_time += self.interval_stepping
if self.interval_max:
sleep_time = min(sleep_time, self.interval_max)
log_info['sleep_time'] = sleep_time
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
time.sleep(sleep_time)
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (self.connection_errors, socket.timeout, IOError) as e:
if error_callback:
error_callback(e)
except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
if error_callback:
error_callback(e)
self.reconnect()
def get_channel(self):
"""Convenience call for bin/clear_rabbit_queues."""
return self.channel
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.channel.close()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
self.consumers = []
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.channel, topic, callback,
self.consumer_num.next())
self.consumers.append(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
info = {'do_consume': True}
def _error_callback(exc):
if isinstance(exc, socket.timeout):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
info['do_consume'] = True
def _consume():
if info['do_consume']:
queues_head = self.consumers[:-1] # not fanout.
queues_tail = self.consumers[-1] # fanout
for queue in queues_head:
queue.consume(nowait=True)
queues_tail.consume(nowait=False)
info['do_consume'] = False
return self.connection.drain_events(timeout=timeout)
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
"""Send to a publisher based on the publisher class."""
def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publish():
publisher = cls(self.conf, self.channel, topic, **kwargs)
publisher.send(msg, timeout)
self.ensure(_error_callback, _publish)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None, ack_on_error=True):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
ack_on_error=ack_on_error,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
self.publisher_send(TopicPublisher, topic, msg, timeout)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
self.declare_fanout_consumer(topic, proxy_cb)
else:
self.declare_topic_consumer(topic, proxy_cb)
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
self.declare_topic_consumer(topic, proxy_cb, pool_name)
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
wait_for_consumers=not ack_on_error
)
self.proxy_callbacks.append(callback_wrapper)
self.declare_topic_consumer(
queue_name=pool_name,
topic=topic,
exchange_name=exchange_name,
callback=callback_wrapper,
ack_on_error=ack_on_error,
)
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)

View File

@ -1,822 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import time
import eventlet
import greenlet
from oslo.config import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import amqp as rpc_amqp
from nova.openstack.common.rpc import common as rpc_common
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.IntOpt('qpid_port',
default=5672,
help='Qpid broker port'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection',
secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
# this file could probably use some additional refactoring so that the
# differences between each version are split into different classes.
cfg.IntOpt('qpid_topology_version',
default=1,
help="The qpid topology version to use. Version 1 is what "
"was originally used by impl_qpid. Version 2 includes "
"some backwards-incompatible changes that allow broker "
"federation to work. Users should update to version 2 "
"when they are able to take everything down, as it "
"requires a clean break."),
]
cfg.CONF.register_opts(qpid_opts)
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
def raise_invalid_topology_version(conf):
msg = (_("Invalid value for qpid_topology_version: %d") %
conf.qpid_topology_version)
LOG.error(msg)
raise Exception(msg)
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, conf, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
elif conf.qpid_topology_version == 2:
addr_opts = {
"link": {
"x-declare": {
"auto-delete": True,
"exclusive": False,
},
},
}
else:
raise_invalid_topology_version()
addr_opts["link"]["x-declare"].update(link_opts)
if link_name:
addr_opts["link"]["name"] = link_name
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.connect(session)
def connect(self, session):
"""Declare the reciever on connect."""
self._declare_receiver(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self):
"""Fetch the message and pass it to the callback object."""
message = self.receiver.fetch()
try:
self._unpack_json_msg(message)
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"exclusive": True,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
link_name = msg_id
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
link_name = None
else:
raise_invalid_topology_version()
super(DirectConsumer, self).__init__(conf, session, callback,
node_name, node_opts, link_name,
link_opts)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
{}, name or topic, link_opts)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
self.conf = conf
link_opts = {"exclusive": True}
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"durable": False, "type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutConsumer, self).__init__(conf, session, callback,
node_name, node_opts, None,
link_opts)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, conf, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
elif conf.qpid_topology_version == 2:
self.address = node_name
else:
raise_invalid_topology_version()
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg):
"""Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher."""
if conf.qpid_topology_version == 1:
node_name = msg_id
node_opts = {"type": "direct"}
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
else:
raise_invalid_topology_version()
super(DirectPublisher, self).__init__(conf, session, node_name,
node_opts)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicPublisher, self).__init__(conf, session, node_name)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, session, topic):
"""init a 'fanout' publisher.
"""
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutPublisher, self).__init__(conf, session, node_name,
node_opts)
class NotifyPublisher(Publisher):
"""Publisher class for notifications."""
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
node_opts = {"durable": True}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(NotifyPublisher, self).__init__(conf, session, node_name,
node_opts)
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.session = None
self.consumers = {}
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
if server_params and 'hostname' in server_params:
# NOTE(russellb) This enables support for cast_to_server.
server_params['qpid_hosts'] = [
'%s:%d' % (server_params['hostname'],
server_params.get('port', 5672))
]
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
params.update(server_params or {})
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
self.connection_create(self.brokers[0])
self.reconnect()
def connection_create(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = self.username
self.connection.password = self.password
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.transport = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[str(receiver)]
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues."""
attempt = 0
delay = 1
while True:
# Close the session if necessary
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
broker = self.brokers[attempt % len(self.brokers)]
attempt += 1
try:
self.connection_create(broker)
self.connection.open()
except qpid_exceptions.ConnectionError as e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(2 * delay, 60)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in consumers.itervalues():
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError) as e:
if error_callback:
error_callback(e)
self.reconnect()
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class."""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
#
# We want to create a message with attributes, e.g. a TTL. We
# don't really need to keep 'msg' in its JSON format any longer
# so let's create an actual qpid message here and get some
# value-add on the go.
#
# WARNING: Request timeout happens to be in the same units as
# qpid's TTL (seconds). If this changes in the future, then this
# will need to be altered accordingly.
#
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
else:
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
self._register_consumer(consumer)
return consumer
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
self._register_consumer(consumer)
return consumer
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
wait_for_consumers=not ack_on_error
)
self.proxy_callbacks.append(callback_wrapper)
consumer = TopicConsumer(conf=self.conf,
session=self.session,
topic=topic,
callback=callback_wrapper,
name=pool_name,
exchange_name=exchange_name)
self._register_consumer(consumer)
return consumer
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)

View File

@ -1,818 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import re
import socket
import sys
import types
import uuid
import eventlet
import greenlet
from oslo.config import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('nova.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object
def _serialize(data):
"""Serialization wrapper.
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_("JSON serialization failed."))
def _deserialize(data):
"""Deserialization wrapper."""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""A tiny wrapper around ZeroMQ.
Simplifies the send/recv protocol and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if self.subscriptions:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error("ZeroMQ socket could not be closed.")
self.sock = None
def recv(self, **kwargs):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart(**kwargs)
def send(self, data, **kwargs):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data, **kwargs)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr):
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
def cast(self, msg_id, topic, data, envelope):
msg_id = msg_id or 0
if not envelope:
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'],
data.get('namespace'), **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException as e:
LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
# this may be able to be removed earlier than
# 'I' if ConsumerBase.process were refactored.
if type(msg) is list:
payload = msg[-1]
else:
payload = msg
response = ConsumerBase.normalize_reply(
self._get_response(ctx, proxy, topic, payload),
ctx.replies)
LOG.debug(_("Sending reply"))
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
}, _msg_id=msg_id)
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
# Method starting with - are
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_("RPC message did not include method."))
return
# Internal method
# uses internal context for safety.
if method == '-reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
proxy.dispatch(ctx, data['version'],
data['method'], data.get('namespace'), **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
Used for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in,
in_bind=True, subscribe=None):
LOG.info(_("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
def consume_in_thread(self):
@excutils.forever_retry_uncaught_exceptions
def _consume(sock):
LOG.info(_("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""A consumer class implementing a topic-based proxy.
Forwards to IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
data = sock.recv(copy=False)
topic = data[1].bytes
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
# don't trust this input.
if self.badchars.search(topic) is not None:
emsg = _("Topic contained dangerous characters.")
LOG.warn(emsg)
raise RPCException(emsg)
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data, copy=False)
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service."""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
try:
os.makedirs(ipc_dir)
except os.error:
if not os.path.isdir(ipc_dir):
with excutils.save_and_reraise_exception():
LOG.error(_("Required IPC directory does not exist at"
" %s") % (ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL)
except zmq.ZMQError:
if os.access(ipc_dir, os.X_OK):
with excutils.save_and_reraise_exception():
LOG.error(_("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = i.next()
h[k] = i.next()
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""A consumer class implementing a consumer for messages.
Can also be used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
proxy = self.proxies[sock]
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Register with matchmaker.
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
_get_matchmaker().stop_heartbeat()
for topic in self.topics:
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
_get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, topic, msg, timeout=None,
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'topic': reply_topic,
# TODO(ewindisch): safe to remove mcontext in I.
'msg': [mcontext, msg]
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
_cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
except (IndexError, KeyError):
raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""Wraps the sending of messages.
Dispatches to the matchmaker and sends message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results
if not queues:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, envelope):
"""Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic = topic.replace('.', '-')
cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
mm = CONF.rpc_zmq_matchmaker
if mm.endswith('matchmaker.MatchMakerRing'):
mm.replace('matchmaker', 'matchmaker_ring')
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
' %(new)s instead') % dict(
orig=CONF.rpc_zmq_matchmaker, new=mm))
matchmaker = importutils.import_object(mm, *args, **kwargs)
return matchmaker

View File

@ -1,324 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The MatchMaker classes should except a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
import contextlib
import eventlet
from oslo.config import cfg
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import log as logging
matchmaker_opts = [
cfg.IntOpt('matchmaker_heartbeat_freq',
default=300,
help='Heartbeat frequency'),
cfg.IntOpt('matchmaker_heartbeat_ttl',
default=600,
help='Heartbeat time-to-live.'),
]
CONF = cfg.CONF
CONF.register_opts(matchmaker_opts)
LOG = logging.getLogger(__name__)
contextmanager = contextlib.contextmanager
class MatchMakerException(Exception):
"""Signified a match could not be found."""
message = _("Match not found by MatchMaker.")
class Exchange(object):
"""Implements lookups.
Subclass this to support hashtables, dns, etc.
"""
def __init__(self):
pass
def run(self, key):
raise NotImplementedError()
class Binding(object):
"""A binding on which to perform a lookup."""
def __init__(self):
pass
def test(self, key):
raise NotImplementedError()
class MatchMakerBase(object):
"""Match Maker Base Class.
Build off HeartbeatMatchMakerBase if building a heartbeat-capable
MatchMaker.
"""
def __init__(self):
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
self.bindings = []
self.no_heartbeat_msg = _('Matchmaker does not implement '
'registration or heartbeat.')
def register(self, key, host):
"""Register a host on a backend.
Heartbeats, if applicable, may keepalive registration.
"""
pass
def ack_alive(self, key, host):
"""Acknowledge that a key.host is alive.
Used internally for updating heartbeats, but may also be used
publically to acknowledge a system is alive (i.e. rpc message
successfully sent to host)
"""
pass
def is_alive(self, topic, host):
"""Checks if a host is alive."""
pass
def expire(self, topic, host):
"""Explicitly expire a host's registration."""
pass
def send_heartbeats(self):
"""Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method.
"""
pass
def unregister(self, key, host):
"""Unregister a topic."""
pass
def start_heartbeat(self):
"""Spawn heartbeat greenthread."""
pass
def stop_heartbeat(self):
"""Destroys the heartbeat greenthread."""
pass
def add_binding(self, binding, rule, last=True):
self.bindings.append((binding, rule, False, last))
#NOTE(ewindisch): kept the following method in case we implement the
# underlying support.
#def add_negate_binding(self, binding, rule, last=True):
# self.bindings.append((binding, rule, True, last))
def queues(self, key):
workers = []
# bit is for negate bindings - if we choose to implement it.
# last stops processing rules if this matches.
for (binding, exchange, bit, last) in self.bindings:
if binding.test(key):
workers.extend(exchange.run(key))
# Support last.
if last:
return workers
return workers
class HeartbeatMatchMakerBase(MatchMakerBase):
"""Base for a heart-beat capable MatchMaker.
Provides common methods for registering, unregistering, and maintaining
heartbeats.
"""
def __init__(self):
self.hosts = set()
self._heart = None
self.host_topic = {}
super(HeartbeatMatchMakerBase, self).__init__()
def send_heartbeats(self):
"""Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method.
"""
for key, host in self.host_topic:
self.ack_alive(key, host)
def ack_alive(self, key, host):
"""Acknowledge that a host.topic is alive.
Used internally for updating heartbeats, but may also be used
publically to acknowledge a system is alive (i.e. rpc message
successfully sent to host)
"""
raise NotImplementedError("Must implement ack_alive")
def backend_register(self, key, host):
"""Implements registration logic.
Called by register(self,key,host)
"""
raise NotImplementedError("Must implement backend_register")
def backend_unregister(self, key, key_host):
"""Implements de-registration logic.
Called by unregister(self,key,host)
"""
raise NotImplementedError("Must implement backend_unregister")
def register(self, key, host):
"""Register a host on a backend.
Heartbeats, if applicable, may keepalive registration.
"""
self.hosts.add(host)
self.host_topic[(key, host)] = host
key_host = '.'.join((key, host))
self.backend_register(key, key_host)
self.ack_alive(key, host)
def unregister(self, key, host):
"""Unregister a topic."""
if (key, host) in self.host_topic:
del self.host_topic[(key, host)]
self.hosts.discard(host)
self.backend_unregister(key, '.'.join((key, host)))
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
{'key': key, 'host': host})
def start_heartbeat(self):
"""Implementation of MatchMakerBase.start_heartbeat.
Launches greenthread looping send_heartbeats(),
yielding for CONF.matchmaker_heartbeat_freq seconds
between iterations.
"""
if not self.hosts:
raise MatchMakerException(
_("Register before starting heartbeat."))
def do_heartbeat():
while True:
self.send_heartbeats()
eventlet.sleep(CONF.matchmaker_heartbeat_freq)
self._heart = eventlet.spawn(do_heartbeat)
def stop_heartbeat(self):
"""Destroys the heartbeat greenthread."""
if self._heart:
self._heart.kill()
class DirectBinding(Binding):
"""Specifies a host in the key via a '.' character.
Although dots are used in the key, the behavior here is
that it maps directly to a host, thus direct.
"""
def test(self, key):
return '.' in key
class TopicBinding(Binding):
"""Where a 'bare' key without dots.
AMQP generally considers topic exchanges to be those *with* dots,
but we deviate here in terminology as the behavior here matches
that of a topic exchange (whereas where there are dots, behavior
matches that of a direct exchange.
"""
def test(self, key):
return '.' not in key
class FanoutBinding(Binding):
"""Match on fanout keys, where key starts with 'fanout.' string."""
def test(self, key):
return key.startswith('fanout~')
class StubExchange(Exchange):
"""Exchange that does nothing."""
def run(self, key):
return [(key, None)]
class LocalhostExchange(Exchange):
"""Exchange where all direct topics are local."""
def __init__(self, host='localhost'):
self.host = host
super(Exchange, self).__init__()
def run(self, key):
return [('.'.join((key.split('.')[0], self.host)), self.host)]
class DirectExchange(Exchange):
"""Exchange where all topic keys are split, sending to second half.
i.e. "compute.host" sends a message to "compute.host" running on "host"
"""
def __init__(self):
super(Exchange, self).__init__()
def run(self, key):
e = key.split('.', 1)[1]
return [(key, e)]
class MatchMakerLocalhost(MatchMakerBase):
"""Match Maker where all bare topics resolve to localhost.
Useful for testing.
"""
def __init__(self, host='localhost'):
super(MatchMakerLocalhost, self).__init__()
self.add_binding(FanoutBinding(), LocalhostExchange(host))
self.add_binding(DirectBinding(), DirectExchange())
self.add_binding(TopicBinding(), LocalhostExchange(host))
class MatchMakerStub(MatchMakerBase):
"""Match Maker where topics are untouched.
Useful for testing, or for AMQP/brokered queues.
Will not work where knowledge of hosts is known (i.e. zeromq)
"""
def __init__(self):
super(MatchMakerStub, self).__init__()
self.add_binding(FanoutBinding(), StubExchange())
self.add_binding(DirectBinding(), StubExchange())
self.add_binding(TopicBinding(), StubExchange())

View File

@ -1,145 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The MatchMaker classes should accept a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
from oslo.config import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import matchmaker as mm_common
redis = importutils.try_import('redis')
matchmaker_redis_opts = [
cfg.StrOpt('host',
default='127.0.0.1',
help='Host to locate redis'),
cfg.IntOpt('port',
default=6379,
help='Use this port to connect to redis host.'),
cfg.StrOpt('password',
default=None,
help='Password for Redis server. (optional)'),
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='matchmaker_redis',
title='Options for Redis-based MatchMaker')
CONF.register_group(opt_group)
CONF.register_opts(matchmaker_redis_opts, opt_group)
LOG = logging.getLogger(__name__)
class RedisExchange(mm_common.Exchange):
def __init__(self, matchmaker):
self.matchmaker = matchmaker
self.redis = matchmaker.redis
super(RedisExchange, self).__init__()
class RedisTopicExchange(RedisExchange):
"""Exchange where all topic keys are split, sending to second half.
i.e. "compute.host" sends a message to "compute" running on "host"
"""
def run(self, topic):
while True:
member_name = self.redis.srandmember(topic)
if not member_name:
# If this happens, there are no
# longer any members.
break
if not self.matchmaker.is_alive(topic, member_name):
continue
host = member_name.split('.', 1)[1]
return [(member_name, host)]
return []
class RedisFanoutExchange(RedisExchange):
"""Return a list of all hosts."""
def run(self, topic):
topic = topic.split('~', 1)[1]
hosts = self.redis.smembers(topic)
good_hosts = filter(
lambda host: self.matchmaker.is_alive(topic, host), hosts)
return [(x, x.split('.', 1)[1]) for x in good_hosts]
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
"""MatchMaker registering and looking-up hosts with a Redis server."""
def __init__(self):
super(MatchMakerRedis, self).__init__()
if not redis:
raise ImportError("Failed to import module redis.")
self.redis = redis.StrictRedis(
host=CONF.matchmaker_redis.host,
port=CONF.matchmaker_redis.port,
password=CONF.matchmaker_redis.password)
self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self))
self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange())
self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self))
def ack_alive(self, key, host):
topic = "%s.%s" % (key, host)
if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl):
# If we could not update the expiration, the key
# might have been pruned. Re-register, creating a new
# key in Redis.
self.register(self.topic_host[host], host)
def is_alive(self, topic, host):
if self.redis.ttl(host) == -1:
self.expire(topic, host)
return False
return True
def expire(self, topic, host):
with self.redis.pipeline() as pipe:
pipe.multi()
pipe.delete(host)
pipe.srem(topic, host)
pipe.execute()
def backend_register(self, key, key_host):
with self.redis.pipeline() as pipe:
pipe.multi()
pipe.sadd(key, key_host)
# No value is needed, we just
# care if it exists. Sets aren't viable
# because only keys can expire.
pipe.set(key_host, '')
pipe.execute()
def backend_unregister(self, key, key_host):
with self.redis.pipeline() as pipe:
pipe.multi()
pipe.srem(key, key_host)
pipe.delete(key_host)
pipe.execute()

View File

@ -1,108 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2013 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The MatchMaker classes should except a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
import itertools
import json
from oslo.config import cfg
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import matchmaker as mm
matchmaker_opts = [
# Matchmaker ring file
cfg.StrOpt('ringfile',
deprecated_name='matchmaker_ringfile',
deprecated_group='DEFAULT',
default='/etc/oslo/matchmaker_ring.json',
help='Matchmaker ring file (JSON)'),
]
CONF = cfg.CONF
CONF.register_opts(matchmaker_opts, 'matchmaker_ring')
LOG = logging.getLogger(__name__)
class RingExchange(mm.Exchange):
"""Match Maker where hosts are loaded from a static JSON formatted file.
__init__ takes optional ring dictionary argument, otherwise
loads the ringfile from CONF.mathcmaker_ringfile.
"""
def __init__(self, ring=None):
super(RingExchange, self).__init__()
if ring:
self.ring = ring
else:
fh = open(CONF.matchmaker_ring.ringfile, 'r')
self.ring = json.load(fh)
fh.close()
self.ring0 = {}
for k in self.ring.keys():
self.ring0[k] = itertools.cycle(self.ring[k])
def _ring_has(self, key):
return key in self.ring0
class RoundRobinRingExchange(RingExchange):
"""A Topic Exchange based on a hashmap."""
def __init__(self, ring=None):
super(RoundRobinRingExchange, self).__init__(ring)
def run(self, key):
if not self._ring_has(key):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (key, )
)
return []
host = next(self.ring0[key])
return [(key + '.' + host, host)]
class FanoutRingExchange(RingExchange):
"""Fanout Exchange based on a hashmap."""
def __init__(self, ring=None):
super(FanoutRingExchange, self).__init__(ring)
def run(self, key):
# Assume starts with "fanout~", strip it for lookup.
nkey = key.split('fanout~')[1:][0]
if not self._ring_has(nkey):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (nkey, )
)
return []
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
class MatchMakerRing(mm.MatchMakerBase):
"""Match Maker where hosts are loaded from a static hashmap."""
def __init__(self, ring=None):
super(MatchMakerRing, self).__init__()
self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
self.add_binding(mm.DirectBinding(), mm.DirectExchange())
self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring))

View File

@ -1,226 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A helper class for proxy objects to remote APIs.
For more information about rpc API version numbers, see:
rpc/dispatcher.py
"""
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common.rpc import serializer as rpc_serializer
class RpcProxy(object):
"""A helper class for rpc clients.
This class is a wrapper around the RPC client API. It allows you to
specify the topic and API version in a single place. This is intended to
be used as a base class for a class that implements the client side of an
rpc API.
"""
# The default namespace, which can be overriden in a subclass.
RPC_API_NAMESPACE = None
def __init__(self, topic, default_version, version_cap=None,
serializer=None):
"""Initialize an RpcProxy.
:param topic: The topic to use for all messages.
:param default_version: The default API version to request in all
outgoing messages. This can be overridden on a per-message
basis.
:param version_cap: Optionally cap the maximum version used for sent
messages.
:param serializer: Optionaly (de-)serialize entities with a
provided helper.
"""
self.topic = topic
self.default_version = default_version
self.version_cap = version_cap
if serializer is None:
serializer = rpc_serializer.NoOpSerializer()
self.serializer = serializer
super(RpcProxy, self).__init__()
def _set_version(self, msg, vers):
"""Helper method to set the version in a message.
:param msg: The message having a version added to it.
:param vers: The version number to add to the message.
"""
v = vers if vers else self.default_version
if (self.version_cap and not
rpc_common.version_is_compatible(self.version_cap, v)):
raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)
msg['version'] = v
def _get_topic(self, topic):
"""Return the topic to use for a message."""
return topic if topic else self.topic
def can_send_version(self, version):
"""Check to see if a version is compatible with the version cap."""
return (not self.version_cap or
rpc_common.version_is_compatible(self.version_cap, version))
@staticmethod
def make_namespaced_msg(method, namespace, **kwargs):
return {'method': method, 'namespace': namespace, 'args': kwargs}
def make_msg(self, method, **kwargs):
return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
**kwargs)
def _serialize_msg_args(self, context, kwargs):
"""Helper method called to serialize message arguments.
This calls our serializer on each argument, returning a new
set of args that have been serialized.
:param context: The request context
:param kwargs: The arguments to serialize
:returns: A new set of serialized arguments
"""
new_kwargs = dict()
for argname, arg in kwargs.iteritems():
new_kwargs[argname] = self.serializer.serialize_entity(context,
arg)
return new_kwargs
def call(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.call() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be
used that is usually sufficient.
:returns: The return value from the remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.call(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def multicall(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.multicall() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be
used that is usually sufficient.
:returns: An iterator that lets you process each of the returned values
from the remote method as they arrive.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.multicall(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def cast(self, context, msg, topic=None, version=None):
"""rpc.cast() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.cast() does not wait on any return value from the
remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast(context, self._get_topic(topic), msg)
def fanout_cast(self, context, msg, topic=None, version=None):
"""rpc.fanout_cast() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.fanout_cast() does not wait on any return value
from the remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast(context, self._get_topic(topic), msg)
def cast_to_server(self, context, server_params, msg, topic=None,
version=None):
"""rpc.cast_to_server() a remote method.
:param context: The request context
:param server_params: Server parameters. See rpc.cast_to_server() for
details.
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.cast_to_server() does not wait on any
return values.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
def fanout_cast_to_server(self, context, server_params, msg, topic=None,
version=None):
"""rpc.fanout_cast_to_server() a remote method.
:param context: The request context
:param server_params: Server parameters. See rpc.cast_to_server() for
details.
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.fanout_cast_to_server() does not wait on any
return values.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast_to_server(context, server_params,
self._get_topic(topic), msg)

View File

@ -1,52 +0,0 @@
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides the definition of an RPC serialization handler"""
import abc
class Serializer(object):
"""Generic (de-)serialization definition base class."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def serialize_entity(self, context, entity):
"""Serialize something to primitive form.
:param context: Security context
:param entity: Entity to be serialized
:returns: Serialized form of entity
"""
pass
@abc.abstractmethod
def deserialize_entity(self, context, entity):
"""Deserialize something from primitive form.
:param context: Security context
:param entity: Primitive to be deserialized
:returns: Deserialized form of entity
"""
pass
class NoOpSerializer(Serializer):
"""A serializer that does nothing."""
def serialize_entity(self, context, entity):
return entity
def deserialize_entity(self, context, entity):
return entity

View File

@ -1,78 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
from nova.openstack.common import service
LOG = logging.getLogger(__name__)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service enables rpc by listening to queues based on topic and host.
"""
def __init__(self, host, topic, manager=None, serializer=None):
super(Service, self).__init__()
self.host = host
self.topic = topic
self.serializer = serializer
if manager is None:
self.manager = self
else:
self.manager = manager
def start(self):
super(Service, self).start()
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
self.topic)
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
self.serializer)
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, dispatcher, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, dispatcher, fanout=False)
self.conn.create_consumer(self.topic, dispatcher, fanout=True)
# Hook to allow the manager to do other initializations after
# the rpc connection is created.
if callable(getattr(self.manager, 'initialize_service_hook', None)):
self.manager.initialize_service_hook(self)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
super(Service, self).stop()

View File

@ -1,40 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import contextlib
import sys
from oslo.config import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common.rpc import impl_zmq
CONF = cfg.CONF
CONF.register_opts(rpc.rpc_opts)
CONF.register_opts(impl_zmq.zmq_opts)
def main():
CONF(sys.argv[1:], project='oslo')
logging.setup("oslo")
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
reactor.consume_in_thread()
reactor.wait()

View File

@ -1,459 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import os
import random
import signal
import sys
import time
import eventlet
from eventlet import event
import logging as std_logging
from oslo.config import cfg
from nova.openstack.common import eventlet_backdoor
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import threadgroup
rpc = importutils.try_import('nova.openstack.common.rpc')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_sighup(signo):
return _sighup_supported() and signo == signal.SIGHUP
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self):
status = None
signo = 0
LOG.debug(_('Full set of CONF:'))
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()
if rpc:
try:
rpc.cleanup()
except Exception:
# We're shutting down, so it doesn't matter at this point.
LOG.exception(_('Exception during rpc cleanup.'))
return status, signo
def wait(self):
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal()
if not _is_sighup(signo):
return status
self.restart()
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self):
self.children = {}
self.sigcaught = None
self.running = True
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = None
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.launch_service(service)
return launcher
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if not _is_sighup(signo):
break
launcher.restart()
os._exit(status)
LOG.info(_('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(.01)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
LOG.debug(_('Full set of CONF:'))
CONF.log_opt_values(LOG, std_logging.DEBUG)
while True:
self.handle_signal()
self._respawn_children()
if self.sigcaught:
signame = _signo_to_signame(self.sigcaught)
LOG.info(_('Caught %s, stopping children'), signame)
if not _is_sighup(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
def stop(self):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=None):
if workers:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
else:
launcher = ServiceLauncher()
launcher.launch_service(service)
return launcher

View File

@ -1,100 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ssl
from oslo.config import cfg
from nova.openstack.common.gettextutils import _ # noqa
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23,
"sslv3": ssl.PROTOCOL_SSLv3
}
try:
_SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
except AttributeError:
pass
def validate_ssl_version(version):
key = version.lower()
try:
return _SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError(_("Invalid SSL version : %s") % version)

View File

@ -1,216 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import re
import sys
import unicodedata
from nova.openstack.common.gettextutils import _
# Used for looking up extensions of text
# to their 'multiplied' byte amount
BYTE_MULTIPLIERS = {
'': 1,
't': 1024 ** 4,
'g': 1024 ** 3,
'm': 1024 ** 2,
'k': 1024,
}
BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)')
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else is considered False.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, basestring):
subject = str(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return False
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming str using `incoming` if they're not already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an isntance of basestring
"""
if not isinstance(text, basestring):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, unicode):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming str/unicode using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an isntance of basestring
"""
if not isinstance(text, basestring):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, unicode):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
return text
def to_bytes(text, default=0):
"""Converts a string into an integer of bytes.
Looks at the last characters of the text to determine
what conversion is needed to turn the input text into a byte number.
Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)
:param text: String input for bytes size conversion.
:param default: Default return value when text is blank.
"""
match = BYTE_REGEX.search(text)
if match:
magnitude = int(match.group(1))
mult_key_org = match.group(2)
if not mult_key_org:
return magnitude
elif text:
msg = _('Invalid string format: %s') % text
raise TypeError(msg)
else:
return default
mult_key = mult_key_org.lower().replace('b', '', 1)
multiplier = BYTE_MULTIPLIERS.get(mult_key)
if multiplier is None:
msg = _('Unknown byte multiplier: %s') % mult_key_org
raise TypeError(msg)
return magnitude * multiplier
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of basestring
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)

View File

@ -1,121 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenlet
from eventlet import greenpool
from eventlet import greenthread
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs):
"""Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if.
"""
kwargs['group'].thread_done(kwargs['thread'])
class Thread(object):
"""Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list.
"""
def __init__(self, thread, group):
self.thread = thread
self.thread.link(_thread_done, group=group, thread=self)
def stop(self):
self.thread.kill()
def wait(self):
return self.thread.wait()
class ThreadGroup(object):
"""The point of the ThreadGroup classis to:
* keep track of timers and greenthreads (making it easier to stop them
when need be).
* provide an easy API to add timers.
"""
def __init__(self, thread_pool_size=10):
self.pool = greenpool.GreenPool(thread_pool_size)
self.threads = []
self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs):
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
pulse.start(interval=interval,
initial_delay=initial_delay)
self.timers.append(pulse)
def add_thread(self, callback, *args, **kwargs):
gt = self.pool.spawn(callback, *args, **kwargs)
th = Thread(gt, self)
self.threads.append(th)
def thread_done(self, thread):
self.threads.remove(thread)
def stop(self):
current = greenthread.getcurrent()
for x in self.threads:
if x is current:
# don't kill the current thread.
continue
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
for x in self.timers:
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
self.timers = []
def wait(self):
for x in self.timers:
try:
x.wait()
except greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
current = greenthread.getcurrent()
for x in self.threads:
if x is current:
continue
try:
x.wait()
except greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)

View File

@ -1,204 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import time
import iso8601
import six
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(six.text_type(e))
except TypeError as e:
raise ValueError(six.text_type(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
if utcnow.override_time is None:
# NOTE(kgriffs): This is several times faster
# than going through calendar.timegm(...)
return int(time.time())
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formated date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None
def set_time_override(override_time=None):
"""Overrides utils.utcnow.
Make it return a constant time or a list thereof, one at a time.
:param override_time: datetime instance or list thereof. If not
given, defaults to the current UTC time.
"""
utcnow.override_time = override_time or datetime.datetime.utcnow()
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
"""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""Return the difference between two timing objects.
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
return total_seconds(delta)
def total_seconds(delta):
"""Return the total seconds of datetime.timedelta object.
Compute total seconds of datetime.timedelta, datetime.timedelta
doesn't have method total_seconds in Python2.6, calculate it manually.
"""
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
:params dt: the time
:params window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon

View File

@ -1,39 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
UUID related utilities and helper functions.
"""
import uuid
def generate_uuid():
return str(uuid.uuid4())
def is_uuid_like(val):
"""Returns validation of a value as a UUID.
For our purposes, a UUID is a canonical form string:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
try:
return str(uuid.UUID(val)) == val
except (TypeError, ValueError, AttributeError):
return False

View File

@ -1,45 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import pkg_resources
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts

View File

@ -1,74 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.dom import minidom
from xml.parsers import expat
from xml import sax
from xml.sax import expatreader
class ProtectedExpatParser(expatreader.ExpatParser):
"""An expat parser which disables DTD's and entities by default."""
def __init__(self, forbid_dtd=True, forbid_entities=True,
*args, **kwargs):
# Python 2.x old style class
expatreader.ExpatParser.__init__(self, *args, **kwargs)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise ValueError("Inline DTD forbidden")
def entity_decl(self, entityName, is_parameter_entity, value, base,
systemId, publicId, notationName):
raise ValueError("<!ENTITY> entity declaration forbidden")
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise ValueError("<!ENTITY> unparsed entity forbidden")
def external_entity_ref(self, context, base, systemId, publicId):
raise ValueError("<!ENTITY> external entity forbidden")
def notation_decl(self, name, base, sysid, pubid):
raise ValueError("<!ENTITY> notation forbidden")
def reset(self):
expatreader.ExpatParser.reset(self)
if self.forbid_dtd:
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
self._parser.EndDoctypeDeclHandler = None
if self.forbid_entities:
self._parser.EntityDeclHandler = self.entity_decl
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
self._parser.ExternalEntityRefHandler = self.external_entity_ref
self._parser.NotationDeclHandler = self.notation_decl
try:
self._parser.SkippedEntityHandler = None
except AttributeError:
# some pyexpat versions do not support SkippedEntity
pass
def safe_minidom_parse_string(xml_string):
"""Parse an XML string using minidom safely.
"""
try:
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
except sax.SAXParseException:
raise expat.ExpatError()

View File

@ -1,69 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from oslo.config import cfg
path_opts = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory where the nova python module is installed'),
cfg.StrOpt('bindir',
default=os.path.join(sys.prefix, 'local', 'bin'),
help='Directory where nova binaries are installed'),
cfg.StrOpt('state_path',
default='$pybasedir',
help="Top-level directory for maintaining nova's state"),
]
CONF = cfg.CONF
CONF.register_opts(path_opts)
def basedir_def(*args):
"""Return an uninterpolated path relative to $pybasedir."""
return os.path.join('$pybasedir', *args)
def bindir_def(*args):
"""Return an uninterpolated path relative to $bindir."""
return os.path.join('$bindir', *args)
def state_path_def(*args):
"""Return an uninterpolated path relative to $state_path."""
return os.path.join('$state_path', *args)
def basedir_rel(*args):
"""Return a path relative to $pybasedir."""
return os.path.join(CONF.pybasedir, *args)
def bindir_rel(*args):
"""Return a path relative to $bindir."""
return os.path.join(CONF.bindir, *args)
def state_path_rel(*args):
"""Return a path relative to $state_path."""
return os.path.join(CONF.state_path, *args)

File diff suppressed because it is too large Load Diff

View File

@ -1,55 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions that won't produce circular imports."""
import inspect
def getcallargs(function, *args, **kwargs):
"""This is a simplified inspect.getcallargs (2.7+).
It should be replaced when python >= 2.7 is standard.
"""
keyed_args = {}
argnames, varargs, keywords, defaults = inspect.getargspec(function)
keyed_args.update(kwargs)
#NOTE(alaski) the implicit 'self' or 'cls' argument shows up in
# argnames but not in args or kwargs. Uses 'in' rather than '==' because
# some tests use 'self2'.
if 'self' in argnames[0] or 'cls' == argnames[0]:
# The function may not actually be a method or have im_self.
# Typically seen when it's stubbed with mox.
if inspect.ismethod(function) and hasattr(function, 'im_self'):
keyed_args[argnames[0]] = function.im_self
else:
keyed_args[argnames[0]] = None
remaining_argnames = filter(lambda x: x not in keyed_args, argnames)
keyed_args.update(dict(zip(remaining_argnames, args)))
if defaults:
num_defaults = len(defaults)
for argname, value in zip(argnames[-num_defaults:], defaults):
if argname not in keyed_args:
keyed_args[argname] = value
return keyed_args

128
gantt/scheduler/rpcapi.py Normal file
View File

@ -0,0 +1,128 @@
# Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the scheduler manager RPC API.
"""
from oslo.config import cfg
from oslo import messaging
from nova.objects import base as objects_base
from nova.openstack.common import jsonutils
from nova import rpc
rpcapi_opts = [
cfg.StrOpt('scheduler_topic',
default='scheduler',
help='The topic scheduler nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('scheduler',
help='Set a version cap for messages sent to scheduler services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class SchedulerAPI(object):
'''Client side of the scheduler rpc API.
API version history:
1.0 - Initial version.
1.1 - Changes to prep_resize():
- remove instance_uuid, add instance
- remove instance_type_id, add instance_type
- remove topic, it was unused
1.2 - Remove topic from run_instance, it was unused
1.3 - Remove instance_id, add instance to live_migration
1.4 - Remove update_db from prep_resize
1.5 - Add reservations argument to prep_resize()
1.6 - Remove reservations argument to run_instance()
1.7 - Add create_volume() method, remove topic from live_migration()
2.0 - Remove 1.x backwards compat
2.1 - Add image_id to create_volume()
2.2 - Remove reservations argument to create_volume()
2.3 - Remove create_volume()
2.4 - Change update_service_capabilities()
- accepts a list of capabilities
2.5 - Add get_backdoor_port()
2.6 - Add select_hosts()
... Grizzly supports message version 2.6. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.6.
2.7 - Add select_destinations()
2.8 - Deprecate prep_resize() -- JUST KIDDING. It is still used
by the compute manager for retries.
2.9 - Added the legacy_bdm_in_spec parameter to run_instance()
... Havana supports message version 2.9. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.9.
... - Deprecated live_migration() call, moved to conductor
... - Deprecated select_hosts()
'''
VERSION_ALIASES = {
'grizzly': '2.6',
'havana': '2.9',
}
def __init__(self):
super(SchedulerAPI, self).__init__()
target = messaging.Target(topic=CONF.scheduler_topic, version='2.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.scheduler,
CONF.upgrade_levels.scheduler)
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target, version_cap=version_cap,
serializer=serializer)
def select_destinations(self, ctxt, request_spec, filter_properties):
cctxt = self.client.prepare(version='2.7')
return cctxt.call(ctxt, 'select_destinations',
request_spec=request_spec, filter_properties=filter_properties)
def run_instance(self, ctxt, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties, legacy_bdm_in_spec=True):
version = '2.0'
msg_kwargs = {'request_spec': request_spec,
'admin_password': admin_password,
'injected_files': injected_files,
'requested_networks': requested_networks,
'is_first_time': is_first_time,
'filter_properties': filter_properties}
if self.client.can_send_version('2.9'):
version = '2.9'
msg_kwargs['legacy_bdm_in_spec'] = legacy_bdm_in_spec
cctxt = self.client.prepare(version=version)
return cctxt.cast(ctxt, 'run_instance', **msg_kwargs)
def prep_resize(self, ctxt, instance, instance_type, image,
request_spec, filter_properties, reservations):
instance_p = jsonutils.to_primitive(instance)
instance_type_p = jsonutils.to_primitive(instance_type)
reservations_p = jsonutils.to_primitive(reservations)
image_p = jsonutils.to_primitive(image)
self.client.cast(ctxt, 'prep_resize',
instance=instance_p, instance_type=instance_type_p,
image=image_p, request_spec=request_spec,
filter_properties=filter_properties,
reservations=reservations_p)

View File

@ -0,0 +1,24 @@
# Copyright 2012 IBM Corp.
# Copyright (c) AT&T Labs Inc. 2012 Yun Mao <yunmao@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The membership service for Nova. Different implementations can be plugged
according to the Nova configuration.
"""
from gantt.servicegroup import api
API = api.API

150
gantt/servicegroup/api.py Normal file
View File

@ -0,0 +1,150 @@
# Copyright 2012 IBM Corp.
# Copyright (c) AT&T Labs Inc. 2012 Yun Mao <yunmao@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define APIs for the servicegroup access."""
import random
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
_default_driver = 'db'
servicegroup_driver_opt = cfg.StrOpt('servicegroup_driver',
default=_default_driver,
help='The driver for servicegroup '
'service (valid options are: '
'db, zk, mc)')
CONF = cfg.CONF
CONF.register_opt(servicegroup_driver_opt)
class API(object):
_driver = None
_driver_name_class_mapping = {
'db': 'nova.servicegroup.drivers.db.DbDriver',
'zk': 'nova.servicegroup.drivers.zk.ZooKeeperDriver',
'mc': 'nova.servicegroup.drivers.mc.MemcachedDriver'
}
def __new__(cls, *args, **kwargs):
'''Create an instance of the servicegroup API.
args and kwargs are passed down to the servicegroup driver when it gets
created. No args currently exist, though. Valid kwargs are:
db_allowed - Boolean. False if direct db access is not allowed and
alternative data access (conductor) should be used
instead.
'''
if not cls._driver:
LOG.debug(_('ServiceGroup driver defined as an instance of %s'),
str(CONF.servicegroup_driver))
driver_name = CONF.servicegroup_driver
try:
driver_class = cls._driver_name_class_mapping[driver_name]
except KeyError:
raise TypeError(_("unknown ServiceGroup driver name: %s")
% driver_name)
cls._driver = importutils.import_object(driver_class,
*args, **kwargs)
utils.check_isinstance(cls._driver, ServiceGroupDriver)
# we don't have to check that cls._driver is not NONE,
# check_isinstance does it
return super(API, cls).__new__(cls)
def join(self, member_id, group_id, service=None):
"""Add a new member to the ServiceGroup
@param member_id: the joined member ID
@param group_id: the group name, of the joined member
@param service: the parameter can be used for notifications about
disconnect mode and update some internals
"""
msg = _('Join new ServiceGroup member %(member_id)s to the '
'%(group_id)s group, service = %(service)s')
LOG.debug(msg, {'member_id': member_id, 'group_id': group_id,
'service': service})
return self._driver.join(member_id, group_id, service)
def service_is_up(self, member):
"""Check if the given member is up."""
msg = _('Check if the given member [%s] is part of the '
'ServiceGroup, is up')
LOG.debug(msg, member)
return self._driver.is_up(member)
def leave(self, member_id, group_id):
"""Explicitly remove the given member from the ServiceGroup
monitoring.
"""
msg = _('Explicitly remove the given member %(member_id)s from the'
'%(group_id)s group monitoring')
LOG.debug(msg, {'member_id': member_id, 'group_id': group_id})
return self._driver.leave(member_id, group_id)
def get_all(self, group_id):
"""Returns ALL members of the given group."""
LOG.debug(_('Returns ALL members of the [%s] '
'ServiceGroup'), group_id)
return self._driver.get_all(group_id)
def get_one(self, group_id):
"""Returns one member of the given group. The strategy to select
the member is decided by the driver (e.g. random or round-robin).
"""
LOG.debug(_('Returns one member of the [%s] group'), group_id)
return self._driver.get_one(group_id)
class ServiceGroupDriver(object):
"""Base class for ServiceGroup drivers."""
def join(self, member_id, group_id, service=None):
"""Join the given service with it's group."""
raise NotImplementedError()
def is_up(self, member):
"""Check whether the given member is up."""
raise NotImplementedError()
def leave(self, member_id, group_id):
"""Remove the given member from the ServiceGroup monitoring."""
raise NotImplementedError()
def get_all(self, group_id):
"""Returns ALL members of the given group."""
raise NotImplementedError()
def get_one(self, group_id):
"""The default behavior of get_one is to randomly pick one from
the result of get_all(). This is likely to be overridden in the
actual driver implementation.
"""
members = self.get_all(group_id)
if members is None:
return None
length = len(members)
if length == 0:
return None
return random.choice(members)

View File

@ -0,0 +1,107 @@
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
import six
from nova import conductor
from nova import context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.servicegroup import api
CONF = cfg.CONF
CONF.import_opt('service_down_time', 'nova.service')
LOG = logging.getLogger(__name__)
class DbDriver(api.ServiceGroupDriver):
def __init__(self, *args, **kwargs):
self.db_allowed = kwargs.get('db_allowed', True)
self.conductor_api = conductor.API(use_local=self.db_allowed)
def join(self, member_id, group_id, service=None):
"""Join the given service with it's group."""
msg = _('DB_Driver: join new ServiceGroup member %(member_id)s to '
'the %(group_id)s group, service = %(service)s')
LOG.debug(msg, {'member_id': member_id, 'group_id': group_id,
'service': service})
if service is None:
raise RuntimeError(_('service is a mandatory argument for DB based'
' ServiceGroup driver'))
report_interval = service.report_interval
if report_interval:
service.tg.add_timer(report_interval, self._report_state,
report_interval, service)
def is_up(self, service_ref):
"""Moved from nova.utils
Check whether a service is up based on last heartbeat.
"""
last_heartbeat = service_ref['updated_at'] or service_ref['created_at']
if isinstance(last_heartbeat, six.string_types):
# NOTE(russellb) If this service_ref came in over rpc via
# conductor, then the timestamp will be a string and needs to be
# converted back to a datetime.
last_heartbeat = timeutils.parse_strtime(last_heartbeat)
else:
# Objects have proper UTC timezones, but the timeutils comparison
# below does not (and will fail)
last_heartbeat = last_heartbeat.replace(tzinfo=None)
# Timestamps in DB are UTC.
elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
LOG.debug('DB_Driver.is_up last_heartbeat = %(lhb)s elapsed = %(el)s',
{'lhb': str(last_heartbeat), 'el': str(elapsed)})
return abs(elapsed) <= CONF.service_down_time
def get_all(self, group_id):
"""
Returns ALL members of the given group
"""
LOG.debug(_('DB_Driver: get_all members of the %s group') % group_id)
rs = []
ctxt = context.get_admin_context()
services = self.conductor_api.service_get_all_by_topic(ctxt, group_id)
for service in services:
if self.is_up(service):
rs.append(service['host'])
return rs
def _report_state(self, service):
"""Update the state of this service in the datastore."""
ctxt = context.get_admin_context()
state_catalog = {}
try:
report_count = service.service_ref['report_count'] + 1
state_catalog['report_count'] = report_count
service.service_ref = self.conductor_api.service_update(ctxt,
service.service_ref, state_catalog)
# TODO(termie): make this pattern be more elegant.
if getattr(service, 'model_disconnected', False):
service.model_disconnected = False
LOG.error(_('Recovered model server connection!'))
# TODO(vish): this should probably only catch connection errors
except Exception: # pylint: disable=W0702
if not getattr(service, 'model_disconnected', False):
service.model_disconnected = True
LOG.exception(_('model server went away'))

View File

@ -0,0 +1,107 @@
# Service heartbeat driver using Memcached
# Copyright (c) 2013 Akira Yoshiyama <akirayoshiyama at gmail dot com>
#
# This is derived from nova/servicegroup/drivers/db.py.
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from nova import conductor
from nova import context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
from nova.openstack.common import timeutils
from nova.servicegroup import api
CONF = cfg.CONF
CONF.import_opt('service_down_time', 'nova.service')
CONF.import_opt('memcached_servers', 'nova.openstack.common.memorycache')
LOG = logging.getLogger(__name__)
class MemcachedDriver(api.ServiceGroupDriver):
def __init__(self, *args, **kwargs):
test = kwargs.get('test')
if not CONF.memcached_servers and not test:
raise RuntimeError(_('memcached_servers not defined'))
self.mc = memorycache.get_client()
self.db_allowed = kwargs.get('db_allowed', True)
self.conductor_api = conductor.API(use_local=self.db_allowed)
def join(self, member_id, group_id, service=None):
"""Join the given service with its group."""
msg = _('Memcached_Driver: join new ServiceGroup member '
'%(member_id)s to the %(group_id)s group, '
'service = %(service)s')
LOG.debug(msg, {'member_id': member_id, 'group_id': group_id,
'service': service})
if service is None:
raise RuntimeError(_('service is a mandatory argument for '
'Memcached based ServiceGroup driver'))
report_interval = service.report_interval
if report_interval:
service.tg.add_timer(report_interval, self._report_state,
report_interval, service)
def is_up(self, service_ref):
"""Moved from nova.utils
Check whether a service is up based on last heartbeat.
"""
key = "%(topic)s:%(host)s" % service_ref
return self.mc.get(str(key)) is not None
def get_all(self, group_id):
"""
Returns ALL members of the given group
"""
LOG.debug(_('Memcached_Driver: get_all members of the %s group') %
group_id)
rs = []
ctxt = context.get_admin_context()
services = self.conductor_api.service_get_all_by_topic(ctxt, group_id)
for service in services:
if self.is_up(service):
rs.append(service['host'])
return rs
def _report_state(self, service):
"""Update the state of this service in the datastore."""
ctxt = context.get_admin_context()
try:
key = "%(topic)s:%(host)s" % service.service_ref
# memcached has data expiration time capability.
# set(..., time=CONF.service_down_time) uses it and
# reduces key-deleting code.
self.mc.set(str(key),
timeutils.utcnow(),
time=CONF.service_down_time)
# TODO(termie): make this pattern be more elegant.
if getattr(service, 'model_disconnected', False):
service.model_disconnected = False
LOG.error(_('Recovered model server connection!'))
# TODO(vish): this should probably only catch connection errors
except Exception: # pylint: disable=W0702
if not getattr(service, 'model_disconnected', False):
service.model_disconnected = True
LOG.exception(_('model server went away'))

View File

@ -0,0 +1,158 @@
# Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import eventlet
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.servicegroup import api
evzookeeper = importutils.try_import('evzookeeper')
membership = importutils.try_import('evzookeeper.membership')
zookeeper = importutils.try_import('zookeeper')
zk_driver_opts = [
cfg.StrOpt('address',
help='The ZooKeeper addresses for servicegroup service in the '
'format of host1:port,host2:port,host3:port'),
cfg.IntOpt('recv_timeout',
default=4000,
help='recv_timeout parameter for the zk session'),
cfg.StrOpt('sg_prefix',
default="/servicegroups",
help='The prefix used in ZooKeeper to store ephemeral nodes'),
cfg.IntOpt('sg_retry_interval',
default=5,
help='Number of seconds to wait until retrying to join the '
'session'),
]
CONF = cfg.CONF
CONF.register_opts(zk_driver_opts, group="zookeeper")
LOG = logging.getLogger(__name__)
class ZooKeeperDriver(api.ServiceGroupDriver):
"""ZooKeeper driver for the service group API."""
def __init__(self, *args, **kwargs):
"""Create the zk session object."""
if not all([evzookeeper, membership, zookeeper]):
raise ImportError('zookeeper module not found')
null = open(os.devnull, "w")
self._session = evzookeeper.ZKSession(CONF.zookeeper.address,
recv_timeout=
CONF.zookeeper.recv_timeout,
zklog_fd=null)
self._memberships = {}
self._monitors = {}
# Make sure the prefix exists
try:
self._session.create(CONF.zookeeper.sg_prefix, "",
acl=[evzookeeper.ZOO_OPEN_ACL_UNSAFE])
except zookeeper.NodeExistsException:
pass
super(ZooKeeperDriver, self).__init__()
def join(self, member_id, group, service=None):
"""Join the given service with its group."""
LOG.debug(_('ZooKeeperDriver: join new member %(id)s to the '
'%(gr)s group, service=%(sr)s'),
{'id': member_id, 'gr': group, 'sr': service})
member = self._memberships.get((group, member_id), None)
if member is None:
# the first time to join. Generate a new object
path = "%s/%s" % (CONF.zookeeper.sg_prefix, group)
try:
member = membership.Membership(self._session, path, member_id)
except RuntimeError:
LOG.exception(_("Unable to join. It is possible that either "
"another node exists with the same name, or "
"this node just restarted. We will try "
"again in a short while to make sure."))
eventlet.sleep(CONF.zookeeper.sg_retry_interval)
member = membership.Membership(self._session, path, member_id)
self._memberships[(group, member_id)] = member
return FakeLoopingCall(self, member_id, group)
def leave(self, member_id, group):
"""Remove the given member from the service group."""
LOG.debug(_('ZooKeeperDriver.leave: %(member)s from group %(group)s'),
{'member': member_id, 'group': group})
try:
key = (group, member_id)
member = self._memberships[key]
member.leave()
del self._memberships[key]
except KeyError:
LOG.error(_('ZooKeeperDriver.leave: %(id)s has not joined to the '
'%(gr)s group'), {'id': member_id, 'gr': group})
def is_up(self, service_ref):
group_id = service_ref['topic']
member_id = service_ref['host']
all_members = self.get_all(group_id)
return member_id in all_members
def get_all(self, group_id):
"""Return all members in a list, or a ServiceGroupUnavailable
exception.
"""
monitor = self._monitors.get(group_id, None)
if monitor is None:
path = "%s/%s" % (CONF.zookeeper.sg_prefix, group_id)
monitor = membership.MembershipMonitor(self._session, path)
self._monitors[group_id] = monitor
# Note(maoy): When initialized for the first time, it takes a
# while to retrieve all members from zookeeper. To prevent
# None to be returned, we sleep 5 sec max to wait for data to
# be ready.
for _retry in range(50):
eventlet.sleep(0.1)
all_members = monitor.get_all()
if all_members is not None:
return all_members
all_members = monitor.get_all()
if all_members is None:
raise exception.ServiceGroupUnavailable(driver="ZooKeeperDriver")
return all_members
class FakeLoopingCall(loopingcall.LoopingCallBase):
"""The fake Looping Call implementation, created for backward
compatibility with a membership based on DB.
"""
def __init__(self, driver, host, group):
self._driver = driver
self._group = group
self._host = host
def stop(self):
self._driver.leave(self._host, self._group)
def start(self, interval, initial_delay=None):
pass
def wait(self):
pass

View File

@ -0,0 +1,69 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova import config
from nova import ipv6
from nova.openstack.common.fixture import config as config_fixture
from nova import paths
from nova.tests import utils
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
CONF.import_opt('fake_network', 'nova.network.manager')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('policy_file', 'nova.policy')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('api_paste_config', 'nova.wsgi')
class ConfFixture(config_fixture.Config):
"""Fixture to manage global conf settings."""
def setUp(self):
super(ConfFixture, self).setUp()
self.conf.set_default('api_paste_config',
paths.state_path_def('etc/nova/api-paste.ini'))
self.conf.set_default('host', 'fake-mini')
self.conf.set_default('compute_driver', 'nova.virt.fake.FakeDriver')
self.conf.set_default('fake_network', True)
self.conf.set_default('flat_network_bridge', 'br100')
self.conf.set_default('floating_ip_dns_manager',
'nova.tests.utils.dns_manager')
self.conf.set_default('instance_dns_manager',
'nova.tests.utils.dns_manager')
self.conf.set_default('network_size', 8)
self.conf.set_default('num_networks', 2)
self.conf.set_default('rpc_backend',
'nova.openstack.common.rpc.impl_fake')
self.conf.set_default('rpc_cast_timeout', 5)
self.conf.set_default('rpc_response_timeout', 5)
self.conf.set_default('connection', "sqlite://", group='database')
self.conf.set_default('sqlite_synchronous', False)
self.conf.set_default('use_ipv6', True)
self.conf.set_default('verbose', True)
self.conf.set_default('vlan_interface', 'eth0')
config.parse_args([], default_config_files=[])
self.addCleanup(utils.cleanup_dns_managers)
self.addCleanup(ipv6.api.reset_backend)

View File

@ -0,0 +1,108 @@
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import instance_fault as inst_fault_obj
def fake_db_secgroups(instance, names):
secgroups = []
for i, name in enumerate(names):
group_name = 'secgroup-%i' % i
if isinstance(name, dict) and name.get('name'):
group_name = name.get('name')
secgroups.append(
{'id': i,
'instance_uuid': instance['uuid'],
'name': group_name,
'description': 'Fake secgroup',
'user_id': instance['user_id'],
'project_id': instance['project_id'],
'deleted': False,
'deleted_at': None,
'created_at': None,
'updated_at': None,
})
return secgroups
def fake_db_instance(**updates):
db_instance = {
'id': 1,
'deleted': False,
'uuid': str(uuid.uuid4()),
'user_id': 'fake-user',
'project_id': 'fake-project',
'host': 'fake-host',
'created_at': datetime.datetime(1955, 11, 5),
'pci_devices': [],
'security_groups': [],
'metadata': {},
'system_metadata': {},
'root_gb': 0,
'ephemeral_gb': 0
}
for name, field in instance_obj.Instance.fields.items():
if name in db_instance:
continue
if field.nullable:
db_instance[name] = None
elif field.default != fields.UnspecifiedDefault:
db_instance[name] = field.default
else:
raise Exception('fake_db_instance needs help with %s' % name)
if updates:
db_instance.update(updates)
if db_instance.get('security_groups'):
db_instance['security_groups'] = fake_db_secgroups(
db_instance, db_instance['security_groups'])
return db_instance
def fake_instance_obj(context, **updates):
expected_attrs = updates.pop('expected_attrs', None)
return instance_obj.Instance._from_db_object(context,
instance_obj.Instance(), fake_db_instance(**updates),
expected_attrs=expected_attrs)
def fake_fault_obj(instance_uuid, code=404,
message='HTTPNotFound',
details='Stock details for test',
**updates):
fault = {
'id': 1,
'instance_uuid': instance_uuid,
'code': code,
'message': message,
'details': details,
'host': 'fake_host',
'deleted': False,
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'updated_at': None,
'deleted_at': None
}
if updates:
fault.update(updates)
return inst_fault_obj.InstanceFault._from_db_object(
inst_fault_obj.InstanceFault(),
fault)

View File

@ -0,0 +1,121 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova import db
FAKE_UUID = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
FAKE_REQUEST_ID1 = 'req-3293a3f1-b44c-4609-b8d2-d81b105636b8'
FAKE_REQUEST_ID2 = 'req-25517360-b757-47d3-be45-0e8d2a01b36a'
FAKE_ACTION_ID1 = 123
FAKE_ACTION_ID2 = 456
FAKE_ACTIONS = {
FAKE_UUID: {
FAKE_REQUEST_ID1: {'id': FAKE_ACTION_ID1,
'action': 'reboot',
'instance_uuid': FAKE_UUID,
'request_id': FAKE_REQUEST_ID1,
'project_id': '147',
'user_id': '789',
'start_time': datetime.datetime(
2012, 12, 5, 0, 0, 0, 0),
'finish_time': None,
'message': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
},
FAKE_REQUEST_ID2: {'id': FAKE_ACTION_ID2,
'action': 'resize',
'instance_uuid': FAKE_UUID,
'request_id': FAKE_REQUEST_ID2,
'user_id': '789',
'project_id': '842',
'start_time': datetime.datetime(
2012, 12, 5, 1, 0, 0, 0),
'finish_time': None,
'message': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
}
}
FAKE_EVENTS = {
FAKE_ACTION_ID1: [{'id': 1,
'action_id': FAKE_ACTION_ID1,
'event': 'schedule',
'start_time': datetime.datetime(
2012, 12, 5, 1, 0, 2, 0),
'finish_time': datetime.datetime(
2012, 12, 5, 1, 2, 0, 0),
'result': 'Success',
'traceback': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
},
{'id': 2,
'action_id': FAKE_ACTION_ID1,
'event': 'compute_create',
'start_time': datetime.datetime(
2012, 12, 5, 1, 3, 0, 0),
'finish_time': datetime.datetime(
2012, 12, 5, 1, 4, 0, 0),
'result': 'Success',
'traceback': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
],
FAKE_ACTION_ID2: [{'id': 3,
'action_id': FAKE_ACTION_ID2,
'event': 'schedule',
'start_time': datetime.datetime(
2012, 12, 5, 3, 0, 0, 0),
'finish_time': datetime.datetime(
2012, 12, 5, 3, 2, 0, 0),
'result': 'Error',
'traceback': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
]
}
def fake_action_event_start(*args):
pass
def fake_action_event_finish(*args):
pass
def stub_out_action_events(stubs):
stubs.Set(db, 'action_event_start', fake_action_event_start)
stubs.Set(db, 'action_event_finish', fake_action_event_finish)

374
gantt/tests/fake_policy.py Normal file
View File

@ -0,0 +1,374 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
policy_data = """
{
"admin_api": "is_admin:True",
"cells_scheduler_filter:TargetCellFilter": "is_admin:True",
"context_is_admin": "role:admin or role:administrator",
"compute:create": "",
"compute:create:attach_network": "",
"compute:create:attach_volume": "",
"compute:get": "",
"compute:get_all": "",
"compute:get_all_tenants": "",
"compute:update": "",
"compute:get_instance_metadata": "",
"compute:get_all_instance_metadata": "",
"compute:get_all_instance_system_metadata": "",
"compute:update_instance_metadata": "",
"compute:delete_instance_metadata": "",
"compute:get_instance_faults": "",
"compute:get_diagnostics": "",
"compute:get_lock": "",
"compute:lock": "",
"compute:unlock": "",
"compute:unlock_override": "is_admin:True",
"compute:get_vnc_console": "",
"compute:get_spice_console": "",
"compute:get_console_output": "",
"compute:associate_floating_ip": "",
"compute:reset_network": "",
"compute:inject_network_info": "",
"compute:add_fixed_ip": "",
"compute:remove_fixed_ip": "",
"compute:attach_volume": "",
"compute:detach_volume": "",
"compute:inject_file": "",
"compute:set_admin_password": "",
"compute:rescue": "",
"compute:unrescue": "",
"compute:suspend": "",
"compute:resume": "",
"compute:pause": "",
"compute:unpause": "",
"compute:start": "",
"compute:stop": "",
"compute:resize": "",
"compute:confirm_resize": "",
"compute:revert_resize": "",
"compute:rebuild": "",
"compute:reboot": "",
"compute:snapshot": "",
"compute:backup": "",
"compute:shelve": "",
"compute:shelve_offload": "",
"compute:unshelve": "",
"compute:security_groups:add_to_instance": "",
"compute:security_groups:remove_from_instance": "",
"compute:delete": "",
"compute:soft_delete": "",
"compute:force_delete": "",
"compute:restore": "",
"compute:swap_volume": "",
"compute:volume_snapshot_create": "",
"compute:volume_snapshot_delete": "",
"compute_extension:v3:os-access-ips": "",
"compute_extension:accounts": "",
"compute_extension:admin_actions:pause": "",
"compute_extension:admin_actions:unpause": "",
"compute_extension:admin_actions:suspend": "",
"compute_extension:admin_actions:resume": "",
"compute_extension:admin_actions:lock": "",
"compute_extension:admin_actions:unlock": "",
"compute_extension:admin_actions:resetNetwork": "",
"compute_extension:admin_actions:injectNetworkInfo": "",
"compute_extension:admin_actions:createBackup": "",
"compute_extension:admin_actions:migrateLive": "",
"compute_extension:admin_actions:resetState": "",
"compute_extension:admin_actions:migrate": "",
"compute_extension:v3:os-admin-actions:pause": "",
"compute_extension:v3:os-admin-actions:unpause": "",
"compute_extension:v3:os-admin-actions:suspend": "",
"compute_extension:v3:os-admin-actions:resume": "",
"compute_extension:v3:os-admin-actions:lock": "",
"compute_extension:v3:os-admin-actions:unlock": "",
"compute_extension:v3:os-admin-actions:reset_network": "",
"compute_extension:v3:os-admin-actions:inject_network_info": "",
"compute_extension:v3:os-admin-actions:create_backup": "",
"compute_extension:v3:os-admin-actions:migrate_live": "",
"compute_extension:v3:os-admin-actions:reset_state": "",
"compute_extension:v3:os-admin-actions:migrate": "",
"compute_extension:v3:os-admin-password": "",
"compute_extension:aggregates": "rule:admin_api",
"compute_extension:v3:os-aggregates:index": "rule:admin_api",
"compute_extension:v3:os-aggregates:create": "rule:admin_api",
"compute_extension:v3:os-aggregates:show": "rule:admin_api",
"compute_extension:v3:os-aggregates:update": "rule:admin_api",
"compute_extension:v3:os-aggregates:delete": "rule:admin_api",
"compute_extension:v3:os-aggregates:add_host": "rule:admin_api",
"compute_extension:v3:os-aggregates:remove_host": "rule:admin_api",
"compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api",
"compute_extension:agents": "",
"compute_extension:v3:os-agents": "",
"compute_extension:attach_interfaces": "",
"compute_extension:v3:os-attach-interfaces": "",
"compute_extension:baremetal_nodes": "",
"compute_extension:cells": "",
"compute_extension:v3:os-cells": "",
"compute_extension:certificates": "",
"compute_extension:v3:os-certificates": "",
"compute_extension:cloudpipe": "",
"compute_extension:cloudpipe_update": "",
"compute_extension:config_drive": "",
"compute_extension:v3:os-config-drive": "",
"compute_extension:console_output": "",
"compute_extension:v3:console-output": "",
"compute_extension:consoles": "",
"compute_extension:v3:os-remote-consoles": "",
"compute_extension:createserverext": "",
"compute_extension:deferred_delete": "",
"compute_extension:v3:os-deferred-delete": "",
"compute_extension:disk_config": "",
"compute_extension:evacuate": "is_admin:True",
"compute_extension:v3:os-evacuate": "is_admin:True",
"compute_extension:extended_server_attributes": "",
"compute_extension:v3:os-extended-server-attributes": "",
"compute_extension:extended_status": "",
"compute_extension:v3:os-extended-status": "",
"compute_extension:extended_availability_zone": "",
"compute_extension:v3:os-extended-availability-zone": "",
"compute_extension:extended_ips": "",
"compute_extension:extended_ips_mac": "",
"compute_extension:extended_vif_net": "",
"compute_extension:extended_volumes": "",
"compute_extension:v3:os-extended-volumes": "",
"compute_extension:v3:os-extended-volumes:swap": "",
"compute_extension:v3:os-extended-volumes:attach": "",
"compute_extension:v3:os-extended-volumes:detach": "",
"compute_extension:v3:extensions:discoverable": "",
"compute_extension:fixed_ips": "",
"compute_extension:flavor_access": "",
"compute_extension:flavor_access:addTenantAccess": "rule:admin_api",
"compute_extension:flavor_access:removeTenantAccess": "rule:admin_api",
"compute_extension:v3:flavor-access": "",
"compute_extension:v3:flavor-access:remove_tenant_access":
"rule:admin_api",
"compute_extension:v3:flavor-access:add_tenant_access":
"rule:admin_api",
"compute_extension:flavor_disabled": "",
"compute_extension:v3:os-flavor-disabled": "",
"compute_extension:flavor_rxtx": "",
"compute_extension:v3:os-flavor-rxtx": "",
"compute_extension:flavor_swap": "",
"compute_extension:flavorextradata": "",
"compute_extension:flavorextraspecs:index": "",
"compute_extension:flavorextraspecs:show": "",
"compute_extension:flavorextraspecs:create": "is_admin:True",
"compute_extension:flavorextraspecs:update": "is_admin:True",
"compute_extension:flavorextraspecs:delete": "is_admin:True",
"compute_extension:v3:flavor-extra-specs:index": "",
"compute_extension:v3:flavor-extra-specs:show": "",
"compute_extension:v3:flavor-extra-specs:create": "is_admin:True",
"compute_extension:v3:flavor-extra-specs:update": "is_admin:True",
"compute_extension:v3:flavor-extra-specs:delete": "is_admin:True",
"compute_extension:flavormanage": "",
"compute_extension:v3:flavor-manage": "",
"compute_extension:v3:flavors:discoverable": "",
"compute_extension:floating_ip_dns": "",
"compute_extension:floating_ip_pools": "",
"compute_extension:floating_ips": "",
"compute_extension:floating_ips_bulk": "",
"compute_extension:fping": "",
"compute_extension:fping:all_tenants": "is_admin:True",
"compute_extension:hide_server_addresses": "",
"compute_extension:v3:os-hide-server-addresses": "",
"compute_extension:hosts": "",
"compute_extension:v3:os-hosts": "rule:admin_api",
"compute_extension:hypervisors": "",
"compute_extension:v3:os-hypervisors": "rule:admin_api",
"compute_extension:image_size": "",
"compute_extension:instance_actions": "",
"compute_extension:v3:os-instance-actions": "",
"compute_extension:instance_actions:events": "is_admin:True",
"compute_extension:v3:os-instance-actions:events": "is_admin:True",
"compute_extension:instance_usage_audit_log": "",
"compute_extension:v3:os-instance-usage-audit-log": "",
"compute_extension:keypairs": "",
"compute_extension:keypairs:index": "",
"compute_extension:keypairs:show": "",
"compute_extension:keypairs:create": "",
"compute_extension:keypairs:delete": "",
"compute_extension:v3:keypairs": "",
"compute_extension:v3:keypairs:index": "",
"compute_extension:v3:keypairs:show": "",
"compute_extension:v3:keypairs:create": "",
"compute_extension:v3:keypairs:delete": "",
"compute_extension:multinic": "",
"compute_extension:v3:os-multinic": "",
"compute_extension:networks": "",
"compute_extension:networks:view": "",
"compute_extension:networks_associate": "",
"compute_extension:os-tenant-networks": "",
"compute_extension:v3:os-pci:pci_servers": "",
"compute_extension:quotas:show": "",
"compute_extension:quotas:update": "",
"compute_extension:quotas:delete": "",
"compute_extension:v3:os-quota-sets:show": "",
"compute_extension:v3:os-quota-sets:update": "",
"compute_extension:v3:os-quota-sets:delete": "",
"compute_extension:v3:os-quota-sets:detail": "",
"compute_extension:quota_classes": "",
"compute_extension:v3:os-quota-class-sets": "",
"compute_extension:rescue": "",
"compute_extension:v3:os-rescue": "",
"compute_extension:security_group_default_rules": "",
"compute_extension:security_groups": "",
"compute_extension:v3:os-security-groups": "",
"compute_extension:server_diagnostics": "",
"compute_extension:v3:os-server-diagnostics": "",
"compute_extension:server_password": "",
"compute_extension:v3:os-server-password": "",
"compute_extension:server_usage": "",
"compute_extension:v3:os-server-usage": "",
"compute_extension:services": "",
"compute_extension:v3:os-services": "",
"compute_extension:shelve": "",
"compute_extension:shelveOffload": "",
"compute_extension:v3:os-shelve:shelve": "",
"compute_extension:v3:os-shelve:shelve_offload": "",
"compute_extension:simple_tenant_usage:show": "",
"compute_extension:v3:os-simple-tenant-usage:show": "",
"compute_extension:simple_tenant_usage:list": "",
"compute_extension:v3:os-simple-tenant-usage:list": "",
"compute_extension:unshelve": "",
"compute_extension:v3:os-shelve:unshelve": "",
"compute_extension:users": "",
"compute_extension:virtual_interfaces": "",
"compute_extension:virtual_storage_arrays": "",
"compute_extension:volumes": "",
"compute_extension:volume_attachments:index": "",
"compute_extension:volume_attachments:show": "",
"compute_extension:volume_attachments:create": "",
"compute_extension:volume_attachments:update": "",
"compute_extension:volume_attachments:delete": "",
"compute_extension:volumetypes": "",
"compute_extension:zones": "",
"compute_extension:availability_zone:list": "",
"compute_extension:v3:os-availability-zone:list": "",
"compute_extension:availability_zone:detail": "is_admin:True",
"compute_extension:v3:os-availability-zone:detail": "is_admin:True",
"compute_extension:used_limits_for_admin": "is_admin:True",
"compute_extension:migrations:index": "is_admin:True",
"compute_extension:v3:os-migrations:index": "is_admin:True",
"compute_extension:os-assisted-volume-snapshots:create": "",
"compute_extension:os-assisted-volume-snapshots:delete": "",
"volume:create": "",
"volume:get": "",
"volume:get_all": "",
"volume:get_volume_metadata": "",
"volume:delete": "",
"volume:update": "",
"volume:delete_volume_metadata": "",
"volume:update_volume_metadata": "",
"volume:attach": "",
"volume:detach": "",
"volume:reserve_volume": "",
"volume:unreserve_volume": "",
"volume:begin_detaching": "",
"volume:roll_detaching": "",
"volume:check_attach": "",
"volume:check_detach": "",
"volume:initialize_connection": "",
"volume:terminate_connection": "",
"volume:create_snapshot": "",
"volume:delete_snapshot": "",
"volume:get_snapshot": "",
"volume:get_all_snapshots": "",
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_actions:upload_image": "",
"volume_extension:types_manage": "",
"volume_extension:types_extra_specs": "",
"network:get_all": "",
"network:get": "",
"network:create": "",
"network:delete": "",
"network:associate": "",
"network:disassociate": "",
"network:get_vifs_by_instance": "",
"network:get_vif_by_mac_address": "",
"network:allocate_for_instance": "",
"network:deallocate_for_instance": "",
"network:validate_networks": "",
"network:get_instance_uuids_by_ip_filter": "",
"network:get_instance_id_by_floating_address": "",
"network:setup_networks_on_host": "",
"network:get_floating_ip": "",
"network:get_floating_ip_pools": "",
"network:get_floating_ip_by_address": "",
"network:get_floating_ips_by_project": "",
"network:get_floating_ips_by_fixed_address": "",
"network:allocate_floating_ip": "",
"network:deallocate_floating_ip": "",
"network:associate_floating_ip": "",
"network:disassociate_floating_ip": "",
"network:release_floating_ip": "",
"network:migrate_instance_start": "",
"network:migrate_instance_finish": "",
"network:get_fixed_ip": "",
"network:get_fixed_ip_by_address": "",
"network:add_fixed_ip_to_instance": "",
"network:remove_fixed_ip_from_instance": "",
"network:add_network_to_project": "",
"network:get_instance_nw_info": "",
"network:get_dns_domains": "",
"network:add_dns_entry": "",
"network:modify_dns_entry": "",
"network:delete_dns_entry": "",
"network:get_dns_entries_by_address": "",
"network:get_dns_entries_by_name": "",
"network:create_private_dns_domain": "",
"network:create_public_dns_domain": "",
"network:delete_dns_domain": ""
}
"""

View File

@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudscaling Group, Inc
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may

257
gantt/tests/image/fake.py Normal file
View File

@ -0,0 +1,257 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake image service."""
import copy
import datetime
import uuid
from oslo.config import cfg
from nova import exception
import nova.image.glance
from nova.openstack.common import log as logging
CONF = cfg.CONF
CONF.import_opt('null_kernel', 'nova.compute.api')
LOG = logging.getLogger(__name__)
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'size': '25165824',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64'}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '58145823',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'size': '83594576',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '84035174',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '26360814',
'properties': {'kernel_id':
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '49163826',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '74185822',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
super(_FakeImageService, self).__init__()
#TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def download(self, context, image_id, dst_path=None, data=None):
self.show(context, image_id)
if data:
data.write(self._imagedata.get(image_id, ''))
elif dst_path:
with open(dst_path, 'wb') as data:
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.CouldNotUploadImage(image_id=image_id)
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except KeyError:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
def get_valid_image_id():
return _fakeImageService.images.keys()[0]
def stub_out_image_service(stubs):
image_service = FakeImageService()
stubs.Set(nova.image.glance, 'get_remote_image_service',
lambda x, y: (image_service, y))
stubs.Set(nova.image.glance, 'get_default_image_service',
lambda: image_service)
return image_service

456
gantt/tests/matchers.py Normal file
View File

@ -0,0 +1,456 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Matcher classes to be used inside of the testtools assertThat framework."""
import pprint
from testtools import content
from lxml import etree
class DictKeysMismatch(object):
def __init__(self, d1only, d2only):
self.d1only = d1only
self.d2only = d2only
def describe(self):
return ('Keys in d1 and not d2: %(d1only)s.'
' Keys in d2 and not d1: %(d2only)s' % self.__dict__)
def get_details(self):
return {}
class DictMismatch(object):
def __init__(self, key, d1_value, d2_value):
self.key = key
self.d1_value = d1_value
self.d2_value = d2_value
def describe(self):
return ("Dictionaries do not match at %(key)s."
" d1: %(d1_value)s d2: %(d2_value)s" % self.__dict__)
def get_details(self):
return {}
class DictMatches(object):
def __init__(self, d1, approx_equal=False, tolerance=0.001):
self.d1 = d1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictMatches(%s)' % (pprint.pformat(self.d1))
# Useful assertions
def match(self, d2):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
d1keys = set(self.d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
return DictKeysMismatch(d1only, d2only)
for key in d1keys:
d1value = self.d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= self.tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
matcher = DictMatches(d1value)
did_match = matcher.match(d2value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (d1value, d2value):
continue
elif self.approx_equal and within_tolerance:
continue
elif d1value != d2value:
return DictMismatch(key, d1value, d2value)
class ListLengthMismatch(object):
def __init__(self, len1, len2):
self.len1 = len1
self.len2 = len2
def describe(self):
return ('Length mismatch: len(L1)=%(len1)d != '
'len(L2)=%(len2)d' % self.__dict__)
def get_details(self):
return {}
class DictListMatches(object):
def __init__(self, l1, approx_equal=False, tolerance=0.001):
self.l1 = l1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictListMatches(%s)' % (pprint.pformat(self.l1))
# Useful assertions
def match(self, l2):
"""Assert a list of dicts are equivalent."""
l1count = len(self.l1)
l2count = len(l2)
if l1count != l2count:
return ListLengthMismatch(l1count, l2count)
for d1, d2 in zip(self.l1, l2):
matcher = DictMatches(d2,
approx_equal=self.approx_equal,
tolerance=self.tolerance)
did_match = matcher.match(d1)
if did_match:
return did_match
class SubDictMismatch(object):
def __init__(self,
key=None,
sub_value=None,
super_value=None,
keys=False):
self.key = key
self.sub_value = sub_value
self.super_value = super_value
self.keys = keys
def describe(self):
if self.keys:
return "Keys between dictionaries did not match"
else:
return("Dictionaries do not match at %s. d1: %s d2: %s"
% (self.key,
self.super_value,
self.sub_value))
def get_details(self):
return {}
class IsSubDictOf(object):
def __init__(self, super_dict):
self.super_dict = super_dict
def __str__(self):
return 'IsSubDictOf(%s)' % (self.super_dict)
def match(self, sub_dict):
"""Assert a sub_dict is subset of super_dict."""
if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())):
return SubDictMismatch(keys=True)
for k, sub_value in sub_dict.items():
super_value = self.super_dict[k]
if isinstance(sub_value, dict):
matcher = IsSubDictOf(super_value)
did_match = matcher.match(sub_value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
if sub_value != super_value:
return SubDictMismatch(k, sub_value, super_value)
class FunctionCallMatcher(object):
def __init__(self, expected_func_calls):
self.expected_func_calls = expected_func_calls
self.actual_func_calls = []
def call(self, *args, **kwargs):
func_call = {'args': args, 'kwargs': kwargs}
self.actual_func_calls.append(func_call)
def match(self):
dict_list_matcher = DictListMatches(self.expected_func_calls)
return dict_list_matcher.match(self.actual_func_calls)
class XMLMismatch(object):
"""Superclass for XML mismatch."""
def __init__(self, state):
self.path = str(state)
self.expected = state.expected
self.actual = state.actual
def describe(self):
return "%(path)s: XML does not match" % self.__dict__
def get_details(self):
return {
'expected': content.text_content(self.expected),
'actual': content.text_content(self.actual),
}
class XMLTagMismatch(XMLMismatch):
"""XML tags don't match."""
def __init__(self, state, idx, expected_tag, actual_tag):
super(XMLTagMismatch, self).__init__(state)
self.idx = idx
self.expected_tag = expected_tag
self.actual_tag = actual_tag
def describe(self):
return ("%(path)s: XML tag mismatch at index %(idx)d: "
"expected tag <%(expected_tag)s>; "
"actual tag <%(actual_tag)s>" % self.__dict__)
class XMLAttrKeysMismatch(XMLMismatch):
"""XML attribute keys don't match."""
def __init__(self, state, expected_only, actual_only):
super(XMLAttrKeysMismatch, self).__init__(state)
self.expected_only = ', '.join(sorted(expected_only))
self.actual_only = ', '.join(sorted(actual_only))
def describe(self):
return ("%(path)s: XML attributes mismatch: "
"keys only in expected: %(expected_only)s; "
"keys only in actual: %(actual_only)s" % self.__dict__)
class XMLAttrValueMismatch(XMLMismatch):
"""XML attribute values don't match."""
def __init__(self, state, key, expected_value, actual_value):
super(XMLAttrValueMismatch, self).__init__(state)
self.key = key
self.expected_value = expected_value
self.actual_value = actual_value
def describe(self):
return ("%(path)s: XML attribute value mismatch: "
"expected value of attribute %(key)s: %(expected_value)r; "
"actual value: %(actual_value)r" % self.__dict__)
class XMLTextValueMismatch(XMLMismatch):
"""XML text values don't match."""
def __init__(self, state, expected_text, actual_text):
super(XMLTextValueMismatch, self).__init__(state)
self.expected_text = expected_text
self.actual_text = actual_text
def describe(self):
return ("%(path)s: XML text value mismatch: "
"expected text value: %(expected_text)r; "
"actual value: %(actual_text)r" % self.__dict__)
class XMLUnexpectedChild(XMLMismatch):
"""Unexpected child present in XML."""
def __init__(self, state, tag, idx):
super(XMLUnexpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML unexpected child element <%(tag)s> "
"present at index %(idx)d" % self.__dict__)
class XMLExpectedChild(XMLMismatch):
"""Expected child not present in XML."""
def __init__(self, state, tag, idx):
super(XMLExpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML expected child element <%(tag)s> "
"not present at index %(idx)d" % self.__dict__)
class XMLMatchState(object):
"""
Maintain some state for matching.
Tracks the XML node path and saves the expected and actual full
XML text, for use by the XMLMismatch subclasses.
"""
def __init__(self, expected, actual):
self.path = []
self.expected = expected
self.actual = actual
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, exc_tb):
self.path.pop()
return False
def __str__(self):
return '/' + '/'.join(self.path)
def node(self, tag, idx):
"""
Adds tag and index to the path; they will be popped off when
the corresponding 'with' statement exits.
:param tag: The element tag
:param idx: If not None, the integer index of the element
within its parent. Not included in the path
element if None.
"""
if idx is not None:
self.path.append("%s[%d]" % (tag, idx))
else:
self.path.append(tag)
return self
class XMLMatches(object):
"""Compare XML strings. More complete than string comparison."""
def __init__(self, expected):
self.expected_xml = expected
self.expected = etree.fromstring(expected)
def __str__(self):
return 'XMLMatches(%r)' % self.expected_xml
def match(self, actual_xml):
actual = etree.fromstring(actual_xml)
state = XMLMatchState(self.expected_xml, actual_xml)
result = self._compare_node(self.expected, actual, state, None)
if result is False:
return XMLMismatch(state)
elif result is not True:
return result
def _compare_node(self, expected, actual, state, idx):
"""Recursively compares nodes within the XML tree."""
# Start by comparing the tags
if expected.tag != actual.tag:
return XMLTagMismatch(state, idx, expected.tag, actual.tag)
with state.node(expected.tag, idx):
# Compare the attribute keys
expected_attrs = set(expected.attrib.keys())
actual_attrs = set(actual.attrib.keys())
if expected_attrs != actual_attrs:
expected_only = expected_attrs - actual_attrs
actual_only = actual_attrs - expected_attrs
return XMLAttrKeysMismatch(state, expected_only, actual_only)
# Compare the attribute values
for key in expected_attrs:
expected_value = expected.attrib[key]
actual_value = actual.attrib[key]
if 'DONTCARE' in (expected_value, actual_value):
continue
elif expected_value != actual_value:
return XMLAttrValueMismatch(state, key, expected_value,
actual_value)
# Compare the contents of the node
if len(expected) == 0 and len(actual) == 0:
# No children, compare text values
if ('DONTCARE' not in (expected.text, actual.text) and
expected.text != actual.text):
return XMLTextValueMismatch(state, expected.text,
actual.text)
else:
expected_idx = 0
actual_idx = 0
while (expected_idx < len(expected) and
actual_idx < len(actual)):
# Ignore comments and processing instructions
# TODO(Vek): may interpret PIs in the future, to
# allow for, say, arbitrary ordering of some
# elements
if (expected[expected_idx].tag in
(etree.Comment, etree.ProcessingInstruction)):
expected_idx += 1
continue
# Compare the nodes
result = self._compare_node(expected[expected_idx],
actual[actual_idx], state,
actual_idx)
if result is not True:
return result
# Step on to comparing the next nodes...
expected_idx += 1
actual_idx += 1
# Make sure we consumed all nodes in actual
if actual_idx < len(actual):
return XMLUnexpectedChild(state, actual[actual_idx].tag,
actual_idx)
# Make sure we consumed all nodes in expected
if expected_idx < len(expected):
for node in expected[expected_idx:]:
if (node.tag in
(etree.Comment, etree.ProcessingInstruction)):
continue
return XMLExpectedChild(state, node.tag, actual_idx)
# The nodes match
return True

View File

@ -0,0 +1,44 @@
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from oslo.config import cfg
from nova.openstack.common import policy as common_policy
import nova.policy
from nova.tests import fake_policy
CONF = cfg.CONF
class PolicyFixture(fixtures.Fixture):
def setUp(self):
super(PolicyFixture, self).setUp()
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file_name = os.path.join(self.policy_dir.path,
'policy.json')
with open(self.policy_file_name, 'w') as policy_file:
policy_file.write(fake_policy.policy_data)
CONF.set_override('policy_file', self.policy_file_name)
nova.policy.reset()
nova.policy.init()
self.addCleanup(nova.policy.reset)
def set_rules(self, rules):
common_policy.set_rules(common_policy.Rules(
dict((k, common_policy.parse_rule(v))
for k, v in rules.items())))

Some files were not shown because too many files have changed in this diff Show More