Move files out of the namespace package

Move the public API out of oslo.db to oslo_db. Retain the ability to
import from the old namespace package for backwards compatibility for
this release cycle.

Blueprint: drop-namespace-packages

Change-Id: Ie96b482b9fbcb1d85203ad35bb65c1f43e912a44
This commit is contained in:
Doug Hellmann 2014-12-12 14:23:13 -05:00 committed by Roman Podoliaka
parent 571433bfc4
commit 7063585c60
90 changed files with 8865 additions and 4438 deletions

View File

@ -2,6 +2,6 @@
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
${PYTHON:-python} -m subunit.run discover -t ./ ./tests $LISTOPT $IDOPTION
${PYTHON:-python} -m subunit.run discover -t ./ ./oslo_db/tests $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -1,8 +1,8 @@
=============
oslo.db.api
oslo_db.api
=============
.. automodule:: oslo.db.api
.. automodule:: oslo_db.api
:members:
:undoc-members:
:show-inheritance:

View File

@ -1,8 +1,8 @@
=====================
oslo.db.concurrency
oslo_db.concurrency
=====================
.. automodule:: oslo.db.concurrency
.. automodule:: oslo_db.concurrency
:members:
:undoc-members:
:show-inheritance:

View File

@ -1,8 +1,8 @@
===================
oslo.db.exception
oslo_db.exception
===================
.. automodule:: oslo.db.exception
.. automodule:: oslo_db.exception
:members:
:undoc-members:
:show-inheritance:

View File

@ -1,8 +1,8 @@
=================
oslo.db.options
oslo_db.options
=================
.. automodule:: oslo.db.options
.. automodule:: oslo_db.options
:members:
:undoc-members:
:show-inheritance:

View File

@ -1,5 +1,5 @@
====================
oslo.db.sqlalchemy
oslo_db.sqlalchemy
====================
.. toctree::

View File

@ -1,8 +1,8 @@
==============================
oslo.db.sqlalchemy.migration
oslo_db.sqlalchemy.migration
==============================
.. automodule:: oslo.db.sqlalchemy.migration
.. automodule:: oslo_db.sqlalchemy.migration
:members:
:undoc-members:
:show-inheritance:

View File

@ -1,8 +1,8 @@
===========================
oslo.db.sqlalchemy.models
oslo_db.sqlalchemy.models
===========================
.. automodule:: oslo.db.sqlalchemy.models
.. automodule:: oslo_db.sqlalchemy.models
:members:
:undoc-members:
:show-inheritance:

View File

@ -1,8 +1,8 @@
==============================
oslo.db.sqlalchemy.provision
oslo_db.sqlalchemy.provision
==============================
.. automodule:: oslo.db.sqlalchemy.provision
.. automodule:: oslo_db.sqlalchemy.provision
:members:
:undoc-members:
:show-inheritance:

View File

@ -1,8 +1,8 @@
============================
oslo.db.sqlalchemy.session
oslo_db.sqlalchemy.session
============================
.. automodule:: oslo.db.sqlalchemy.session
.. automodule:: oslo_db.sqlalchemy.session
:members:
:undoc-members:
:show-inheritance:

View File

@ -1,8 +1,8 @@
==============================
oslo.db.sqlalchemy.test_base
oslo_db.sqlalchemy.test_base
==============================
.. automodule:: oslo.db.sqlalchemy.test_base
.. automodule:: oslo_db.sqlalchemy.test_base
:members:
:undoc-members:
:show-inheritance:

View File

@ -1,8 +1,8 @@
====================================
oslo.db.sqlalchemy.test_migrations
oslo_db.sqlalchemy.test_migrations
====================================
.. automodule:: oslo.db.sqlalchemy.test_migrations
.. automodule:: oslo_db.sqlalchemy.test_migrations
:members:
:undoc-members:
:show-inheritance:

View File

@ -1,8 +1,8 @@
==========================
oslo.db.sqlalchemy.utils
oslo_db.sqlalchemy.utils
==========================
.. automodule:: oslo.db.sqlalchemy.utils
.. automodule:: oslo_db.sqlalchemy.utils
:members:
:undoc-members:
:show-inheritance:

View File

@ -0,0 +1,26 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import warnings
def deprecated():
new_name = __name__.replace('.', '_')
warnings.warn(
('The oslo namespace package is deprecated. Please use %s instead.' %
new_name),
DeprecationWarning,
stacklevel=3,
)
deprecated()

View File

@ -1,4 +1,3 @@
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,217 +12,4 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
=================================
Multiple DB API backend support.
=================================
A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB
API methods.
"""
import logging
import threading
import time
from oslo.utils import importutils
import six
from oslo.db._i18n import _LE
from oslo.db import exception
from oslo.db import options
LOG = logging.getLogger(__name__)
def safe_for_db_retry(f):
"""Indicate api method as safe for re-connection to database.
Database connection retries will be enabled for the decorated api method.
Database connection failure can have many causes, which can be temporary.
In such cases retry may increase the likelihood of connection.
Usage::
@safe_for_db_retry
def api_method(self):
self.engine.connect()
:param f: database api method.
:type f: function.
"""
f.__dict__['enable_retry'] = True
return f
class wrap_db_retry(object):
"""Decorator class. Retry db.api methods, if DBConnectionError() raised.
Retry decorated db.api methods. If we enabled `use_db_reconnect`
in config, this decorator will be applied to all db.api functions,
marked with @safe_for_db_retry decorator.
Decorator catches DBConnectionError() and retries function in a
loop until it succeeds, or until maximum retries count will be reached.
Keyword arguments:
:param retry_interval: seconds between transaction retries
:type retry_interval: int
:param max_retries: max number of retries before an error is raised
:type max_retries: int
:param inc_retry_interval: determine increase retry interval or not
:type inc_retry_interval: bool
:param max_retry_interval: max interval value between retries
:type max_retry_interval: int
"""
def __init__(self, retry_interval, max_retries, inc_retry_interval,
max_retry_interval):
super(wrap_db_retry, self).__init__()
self.retry_interval = retry_interval
self.max_retries = max_retries
self.inc_retry_interval = inc_retry_interval
self.max_retry_interval = max_retry_interval
def __call__(self, f):
@six.wraps(f)
def wrapper(*args, **kwargs):
next_interval = self.retry_interval
remaining = self.max_retries
while True:
try:
return f(*args, **kwargs)
except exception.DBConnectionError as e:
if remaining == 0:
LOG.exception(_LE('DB exceeded retry limit.'))
raise exception.DBError(e)
if remaining != -1:
remaining -= 1
LOG.exception(_LE('DB connection error.'))
# NOTE(vsergeyev): We are using patched time module, so
# this effectively yields the execution
# context to another green thread.
time.sleep(next_interval)
if self.inc_retry_interval:
next_interval = min(
next_interval * 2,
self.max_retry_interval
)
return wrapper
class DBAPI(object):
"""Initialize the chosen DB API backend.
After initialization API methods is available as normal attributes of
``DBAPI`` subclass. Database API methods are supposed to be called as
DBAPI instance methods.
:param backend_name: name of the backend to load
:type backend_name: str
:param backend_mapping: backend name -> module/class to load mapping
:type backend_mapping: dict
:default backend_mapping: None
:param lazy: load the DB backend lazily on the first DB API method call
:type lazy: bool
:default lazy: False
:keyword use_db_reconnect: retry DB transactions on disconnect or not
:type use_db_reconnect: bool
:keyword retry_interval: seconds between transaction retries
:type retry_interval: int
:keyword inc_retry_interval: increase retry interval or not
:type inc_retry_interval: bool
:keyword max_retry_interval: max interval value between retries
:type max_retry_interval: int
:keyword max_retries: max number of retries before an error is raised
:type max_retries: int
"""
def __init__(self, backend_name, backend_mapping=None, lazy=False,
**kwargs):
self._backend = None
self._backend_name = backend_name
self._backend_mapping = backend_mapping or {}
self._lock = threading.Lock()
if not lazy:
self._load_backend()
self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
self.retry_interval = kwargs.get('retry_interval', 1)
self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
self.max_retry_interval = kwargs.get('max_retry_interval', 10)
self.max_retries = kwargs.get('max_retries', 20)
def _load_backend(self):
with self._lock:
if not self._backend:
# Import the untranslated name if we don't have a mapping
backend_path = self._backend_mapping.get(self._backend_name,
self._backend_name)
LOG.debug('Loading backend %(name)r from %(path)r',
{'name': self._backend_name,
'path': backend_path})
backend_mod = importutils.import_module(backend_path)
self._backend = backend_mod.get_backend()
def __getattr__(self, key):
if not self._backend:
self._load_backend()
attr = getattr(self._backend, key)
if not hasattr(attr, '__call__'):
return attr
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
# DB API methods, decorated with @safe_for_db_retry
# on disconnect.
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
attr = wrap_db_retry(
retry_interval=self.retry_interval,
max_retries=self.max_retries,
inc_retry_interval=self.inc_retry_interval,
max_retry_interval=self.max_retry_interval)(attr)
return attr
@classmethod
def from_config(cls, conf, backend_mapping=None, lazy=False):
"""Initialize DBAPI instance given a config instance.
:param conf: oslo.config config instance
:type conf: oslo.config.cfg.ConfigOpts
:param backend_mapping: backend name -> module/class to load mapping
:type backend_mapping: dict
:param lazy: load the DB backend lazily on the first DB API method call
:type lazy: bool
"""
conf.register_opts(options.database_opts, 'database')
return cls(backend_name=conf.database.backend,
backend_mapping=backend_mapping,
lazy=lazy,
use_db_reconnect=conf.database.use_db_reconnect,
retry_interval=conf.database.db_retry_interval,
inc_retry_interval=conf.database.db_inc_retry_interval,
max_retry_interval=conf.database.db_max_retry_interval,
max_retries=conf.database.db_max_retries)
from oslo_db.api import * # noqa

View File

@ -1,4 +1,3 @@
# Copyright 2014 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,69 +12,4 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import logging
import threading
from oslo.config import cfg
from oslo.db._i18n import _LE
from oslo.db import api
LOG = logging.getLogger(__name__)
tpool_opts = [
cfg.BoolOpt('use_tpool',
default=False,
deprecated_name='dbapi_use_tpool',
deprecated_group='DEFAULT',
help='Enable the experimental use of thread pooling for '
'all DB API calls'),
]
class TpoolDbapiWrapper(object):
"""DB API wrapper class.
This wraps the oslo DB API with an option to be able to use eventlet's
thread pooling. Since the CONF variable may not be loaded at the time
this class is instantiated, we must look at it on the first DB API call.
"""
def __init__(self, conf, backend_mapping):
self._db_api = None
self._backend_mapping = backend_mapping
self._conf = conf
self._conf.register_opts(tpool_opts, 'database')
self._lock = threading.Lock()
@property
def _api(self):
if not self._db_api:
with self._lock:
if not self._db_api:
db_api = api.DBAPI.from_config(
conf=self._conf, backend_mapping=self._backend_mapping)
if self._conf.database.use_tpool:
try:
from eventlet import tpool
except ImportError:
LOG.exception(_LE("'eventlet' is required for "
"TpoolDbapiWrapper."))
raise
self._db_api = tpool.Proxy(db_api)
else:
self._db_api = db_api
return self._db_api
def __getattr__(self, key):
return getattr(self._api, key)
def list_opts():
"""Returns a list of oslo.config options available in this module.
:returns: a list of (group_name, opts) tuples
"""
return [('database', copy.deepcopy(tpool_opts))]
from oslo_db.concurrency import * # noqa

View File

@ -1,5 +1,3 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -14,160 +12,4 @@
# License for the specific language governing permissions and limitations
# under the License.
"""DB related custom exceptions.
Custom exceptions intended to determine the causes of specific database
errors. This module provides more generic exceptions than the database-specific
driver libraries, and so users of oslo.db can catch these no matter which
database the application is using. Most of the exceptions are wrappers. Wrapper
exceptions take an original exception as positional argument and keep it for
purposes of deeper debug.
Example::
try:
statement(arg)
except sqlalchemy.exc.OperationalError as e:
raise DBDuplicateEntry(e)
This is useful to determine more specific error cases further at execution,
when you need to add some extra information to an error message. Wrapper
exceptions takes care about original error message displaying to not to loose
low level cause of an error. All the database api exceptions wrapped into
the specific exceptions provided belove.
Please use only database related custom exceptions with database manipulations
with `try/except` statement. This is required for consistent handling of
database errors.
"""
import six
from oslo.db._i18n import _
class DBError(Exception):
"""Base exception for all custom database exceptions.
:kwarg inner_exception: an original exception which was wrapped with
DBError or its subclasses.
"""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(six.text_type(inner_exception))
class DBDuplicateEntry(DBError):
"""Duplicate entry at unique column error.
Raised when made an attempt to write to a unique column the same entry as
existing one. :attr: `columns` available on an instance of the exception
and could be used at error handling::
try:
instance_type_ref.save()
except DBDuplicateEntry as e:
if 'colname' in e.columns:
# Handle error.
:kwarg columns: a list of unique columns have been attempted to write a
duplicate entry.
:type columns: list
:kwarg value: a value which has been attempted to write. The value will
be None, if we can't extract it for a particular database backend. Only
MySQL and PostgreSQL 9.x are supported right now.
"""
def __init__(self, columns=None, inner_exception=None, value=None):
self.columns = columns or []
self.value = value
super(DBDuplicateEntry, self).__init__(inner_exception)
class DBReferenceError(DBError):
"""Foreign key violation error.
:param table: a table name in which the reference is directed.
:type table: str
:param constraint: a problematic constraint name.
:type constraint: str
:param key: a broken reference key name.
:type key: str
:param key_table: a table name which contains the key.
:type key_table: str
"""
def __init__(self, table, constraint, key, key_table,
inner_exception=None):
self.table = table
self.constraint = constraint
self.key = key
self.key_table = key_table
super(DBReferenceError, self).__init__(inner_exception)
class DBDeadlock(DBError):
"""Database dead lock error.
Deadlock is a situation that occurs when two or more different database
sessions have some data locked, and each database session requests a lock
on the data that another, different, session has already locked.
"""
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)
class DBInvalidUnicodeParameter(Exception):
"""Database unicode error.
Raised when unicode parameter is passed to a database
without encoding directive.
"""
message = _("Invalid Parameter: "
"Encoding directive wasn't provided.")
class DbMigrationError(DBError):
"""Wrapped migration specific exception.
Raised when migrations couldn't be completed successfully.
"""
def __init__(self, message=None):
super(DbMigrationError, self).__init__(message)
class DBConnectionError(DBError):
"""Wrapped connection specific exception.
Raised when database connection is failed.
"""
pass
class InvalidSortKey(Exception):
"""A sort key destined for database query usage is invalid."""
message = _("Sort key supplied was not valid.")
class ColumnError(Exception):
"""Error raised when no column or an invalid column is found."""
class BackendNotAvailable(Exception):
"""Error raised when a particular database backend is not available
within a test suite.
"""
from oslo_db.exception import * # noqa

View File

@ -1,220 +1,15 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# All Rights Reserved.
#
# http://www.apache.org/licenses/LICENSE-2.0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo.config import cfg
database_opts = [
cfg.StrOpt('sqlite_db',
deprecated_group='DEFAULT',
default='oslo.sqlite',
help='The file name to use with SQLite.'),
cfg.BoolOpt('sqlite_synchronous',
deprecated_group='DEFAULT',
default=True,
help='If True, SQLite uses synchronous mode.'),
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The back end to use for the database.'),
cfg.StrOpt('connection',
help='The SQLAlchemy connection string to use to connect to '
'the database.',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('slave_connection',
secret=True,
help='The SQLAlchemy connection string to use to connect to the'
' slave database.'),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help='The SQL mode to be used for MySQL sessions. '
'This option, including the default, overrides any '
'server-set SQL mode. To use whatever SQL mode '
'is set by the server configuration, '
'set this to no value. Example: mysql_sql_mode='),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle SQL connections are reaped.'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool.'),
cfg.IntOpt('max_pool_size',
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool.'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum number of database connection retries '
'during startup. Set to -1 to specify an infinite '
'retry count.'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a SQL connection.'),
cfg.IntOpt('max_overflow',
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with '
'SQLAlchemy.'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information: 0=None, '
'100=Everything.'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add Python stack traces to SQL as comment strings.'),
cfg.IntOpt('pool_timeout',
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with '
'SQLAlchemy.'),
cfg.BoolOpt('use_db_reconnect',
default=False,
help='Enable the experimental use of database reconnect '
'on connection lost.'),
cfg.IntOpt('db_retry_interval',
default=1,
help='Seconds between database connection retries.'),
cfg.BoolOpt('db_inc_retry_interval',
default=True,
help='If True, increases the interval between database '
'connection retries up to db_max_retry_interval.'),
cfg.IntOpt('db_max_retry_interval',
default=10,
help='If db_inc_retry_interval is set, the '
'maximum seconds between database connection retries.'),
cfg.IntOpt('db_max_retries',
default=20,
help='Maximum database connection retries before error is '
'raised. Set to -1 to specify an infinite retry '
'count.'),
]
def set_defaults(conf, connection=None, sqlite_db=None,
max_pool_size=None, max_overflow=None,
pool_timeout=None):
"""Set defaults for configuration variables.
Overrides default options values.
:param conf: Config instance specified to set default options in it. Using
of instances instead of a global config object prevents conflicts between
options declaration.
:type conf: oslo.config.cfg.ConfigOpts instance.
:keyword connection: SQL connection string.
Valid SQLite URL forms are:
* sqlite:///:memory: (or, sqlite://)
* sqlite:///relative/path/to/file.db
* sqlite:////absolute/path/to/file.db
:type connection: str
:keyword sqlite_db: path to SQLite database file.
:type sqlite_db: str
:keyword max_pool_size: maximum connections pool size. The size of the pool
to be maintained, defaults to 5, will be used if value of the parameter is
`None`. This is the largest number of connections that will be kept
persistently in the pool. Note that the pool begins with no connections;
once this number of connections is requested, that number of connections
will remain.
:type max_pool_size: int
:default max_pool_size: None
:keyword max_overflow: The maximum overflow size of the pool. When the
number of checked-out connections reaches the size set in pool_size,
additional connections will be returned up to this limit. When those
additional connections are returned to the pool, they are disconnected and
discarded. It follows then that the total number of simultaneous
connections the pool will allow is pool_size + max_overflow, and the total
number of "sleeping" connections the pool will allow is pool_size.
max_overflow can be set to -1 to indicate no overflow limit; no limit will
be placed on the total number of concurrent connections. Defaults to 10,
will be used if value of the parameter in `None`.
:type max_overflow: int
:default max_overflow: None
:keyword pool_timeout: The number of seconds to wait before giving up on
returning a connection. Defaults to 30, will be used if value of the
parameter is `None`.
:type pool_timeout: int
:default pool_timeout: None
"""
conf.register_opts(database_opts, group='database')
if connection is not None:
conf.set_default('connection', connection, group='database')
if sqlite_db is not None:
conf.set_default('sqlite_db', sqlite_db, group='database')
if max_pool_size is not None:
conf.set_default('max_pool_size', max_pool_size, group='database')
if max_overflow is not None:
conf.set_default('max_overflow', max_overflow, group='database')
if pool_timeout is not None:
conf.set_default('pool_timeout', pool_timeout, group='database')
def list_opts():
"""Returns a list of oslo.config options available in the library.
The returned list includes all oslo.config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
return [('database', copy.deepcopy(database_opts))]
from oslo_db.options import * # noqa

View File

@ -1,3 +1,5 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -9,22 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""compatiblity extensions for SQLAlchemy versions.
Elements within this module provide SQLAlchemy features that have been
added at some point but for which oslo.db provides a compatible versions
for previous SQLAlchemy versions.
"""
from oslo.db.sqlalchemy.compat import engine_connect as _e_conn
from oslo.db.sqlalchemy.compat import handle_error as _h_err
# trying to get: "from oslo.db.sqlalchemy import compat; compat.handle_error"
# flake8 won't let me import handle_error directly
engine_connect = _e_conn.engine_connect
handle_error = _h_err.handle_error
handle_connect_context = _h_err.handle_connect_context
__all__ = [
'engine_connect', 'handle_error',
'handle_connect_context']
from oslo_db.sqlalchemy.compat import engine_connect # noqa
from oslo_db.sqlalchemy.compat import handle_error # noqa
from oslo_db.sqlalchemy.compat import utils # noqa

View File

@ -1,3 +1,5 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -9,18 +11,5 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sqlalchemy
_SQLA_VERSION = tuple(
int(num) if re.match(r'^\d+$', num) else num
for num in sqlalchemy.__version__.split(".")
)
sqla_100 = _SQLA_VERSION >= (1, 0, 0)
sqla_097 = _SQLA_VERSION >= (0, 9, 7)
sqla_094 = _SQLA_VERSION >= (0, 9, 4)
sqla_090 = _SQLA_VERSION >= (0, 9, 0)
sqla_08 = _SQLA_VERSION >= (0, 8)
from oslo_db.sqlalchemy.compat.utils import * # noqa

View File

@ -1,3 +1,5 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -9,350 +11,5 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Define exception redefinitions for SQLAlchemy DBAPI exceptions."""
import collections
import logging
import re
from sqlalchemy import exc as sqla_exc
from oslo.db._i18n import _LE
from oslo.db import exception
from oslo.db.sqlalchemy import compat
LOG = logging.getLogger(__name__)
_registry = collections.defaultdict(
lambda: collections.defaultdict(
list
)
)
def filters(dbname, exception_type, regex):
"""Mark a function as receiving a filtered exception.
:param dbname: string database name, e.g. 'mysql'
:param exception_type: a SQLAlchemy database exception class, which
extends from :class:`sqlalchemy.exc.DBAPIError`.
:param regex: a string, or a tuple of strings, that will be processed
as matching regular expressions.
"""
def _receive(fn):
_registry[dbname][exception_type].extend(
(fn, re.compile(reg))
for reg in
((regex,) if not isinstance(regex, tuple) else regex)
)
return fn
return _receive
# NOTE(zzzeek) - for Postgresql, catch both OperationalError, as the
# actual error is
# psycopg2.extensions.TransactionRollbackError(OperationalError),
# as well as sqlalchemy.exc.DBAPIError, as SQLAlchemy will reraise it
# as this until issue #3075 is fixed.
@filters("mysql", sqla_exc.OperationalError, r"^.*\b1213\b.*Deadlock found.*")
@filters("mysql", sqla_exc.OperationalError,
r"^.*\b1205\b.*Lock wait timeout exceeded.*")
@filters("mysql", sqla_exc.InternalError, r"^.*\b1213\b.*Deadlock found.*")
@filters("postgresql", sqla_exc.OperationalError, r"^.*deadlock detected.*")
@filters("postgresql", sqla_exc.DBAPIError, r"^.*deadlock detected.*")
@filters("ibm_db_sa", sqla_exc.DBAPIError, r"^.*SQL0911N.*")
def _deadlock_error(operational_error, match, engine_name, is_disconnect):
"""Filter for MySQL or Postgresql deadlock error.
NOTE(comstud): In current versions of DB backends, Deadlock violation
messages follow the structure:
mysql+mysqldb:
(OperationalError) (1213, 'Deadlock found when trying to get lock; try '
'restarting transaction') <query_str> <query_args>
mysql+mysqlconnector:
(InternalError) 1213 (40001): Deadlock found when trying to get lock; try
restarting transaction
postgresql:
(TransactionRollbackError) deadlock detected <deadlock_details>
ibm_db_sa:
SQL0911N The current transaction has been rolled back because of a
deadlock or timeout <deadlock details>
"""
raise exception.DBDeadlock(operational_error)
@filters("mysql", sqla_exc.IntegrityError,
r"^.*\b1062\b.*Duplicate entry '(?P<value>[^']+)'"
r" for key '(?P<columns>[^']+)'.*$")
# NOTE(pkholkin): the first regex is suitable only for PostgreSQL 9.x versions
# the second regex is suitable for PostgreSQL 8.x versions
@filters("postgresql", sqla_exc.IntegrityError,
(r'^.*duplicate\s+key.*"(?P<columns>[^"]+)"\s*\n.*'
r'Key\s+\((?P<key>.*)\)=\((?P<value>.*)\)\s+already\s+exists.*$',
r"^.*duplicate\s+key.*\"(?P<columns>[^\"]+)\"\s*\n.*$"))
def _default_dupe_key_error(integrity_error, match, engine_name,
is_disconnect):
"""Filter for MySQL or Postgresql duplicate key error.
note(boris-42): In current versions of DB backends unique constraint
violation messages follow the structure:
postgres:
1 column - (IntegrityError) duplicate key value violates unique
constraint "users_c1_key"
N columns - (IntegrityError) duplicate key value violates unique
constraint "name_of_our_constraint"
mysql+mysqldb:
1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
'c1'")
N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
with -' for key 'name_of_our_constraint'")
mysql+mysqlconnector:
1 column - (IntegrityError) 1062 (23000): Duplicate entry 'value_of_c1' for
key 'c1'
N columns - (IntegrityError) 1062 (23000): Duplicate entry 'values
joined with -' for key 'name_of_our_constraint'
"""
columns = match.group('columns')
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
columns = [columns[columns.index("_") + 1:columns.rindex("_")]]
else:
columns = [columns]
else:
columns = columns[len(uniqbase):].split("0")[1:]
value = match.groupdict().get('value')
raise exception.DBDuplicateEntry(columns, integrity_error, value)
@filters("sqlite", sqla_exc.IntegrityError,
(r"^.*columns?(?P<columns>[^)]+)(is|are)\s+not\s+unique$",
r"^.*UNIQUE\s+constraint\s+failed:\s+(?P<columns>.+)$",
r"^.*PRIMARY\s+KEY\s+must\s+be\s+unique.*$"))
def _sqlite_dupe_key_error(integrity_error, match, engine_name, is_disconnect):
"""Filter for SQLite duplicate key error.
note(boris-42): In current versions of DB backends unique constraint
violation messages follow the structure:
sqlite:
1 column - (IntegrityError) column c1 is not unique
N columns - (IntegrityError) column c1, c2, ..., N are not unique
sqlite since 3.7.16:
1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
sqlite since 3.8.2:
(IntegrityError) PRIMARY KEY must be unique
"""
columns = []
# NOTE(ochuprykov): We can get here by last filter in which there are no
# groups. Trying to access the substring that matched by
# the group will lead to IndexError. In this case just
# pass empty list to exception.DBDuplicateEntry
try:
columns = match.group('columns')
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
except IndexError:
pass
raise exception.DBDuplicateEntry(columns, integrity_error)
@filters("sqlite", sqla_exc.IntegrityError,
r"(?i).*foreign key constraint failed")
@filters("postgresql", sqla_exc.IntegrityError,
r".*on table \"(?P<table>[^\"]+)\" violates "
"foreign key constraint \"(?P<constraint>[^\"]+)\"\s*\n"
"DETAIL: Key \((?P<key>.+)\)=\(.+\) "
"is not present in table "
"\"(?P<key_table>[^\"]+)\".")
@filters("mysql", sqla_exc.IntegrityError,
r".* 'Cannot add or update a child row: "
'a foreign key constraint fails \([`"].+[`"]\.[`"](?P<table>.+)[`"], '
'CONSTRAINT [`"](?P<constraint>.+)[`"] FOREIGN KEY '
'\([`"](?P<key>.+)[`"]\) REFERENCES [`"](?P<key_table>.+)[`"] ')
def _foreign_key_error(integrity_error, match, engine_name, is_disconnect):
"""Filter for foreign key errors."""
try:
table = match.group("table")
except IndexError:
table = None
try:
constraint = match.group("constraint")
except IndexError:
constraint = None
try:
key = match.group("key")
except IndexError:
key = None
try:
key_table = match.group("key_table")
except IndexError:
key_table = None
raise exception.DBReferenceError(table, constraint, key, key_table,
integrity_error)
@filters("ibm_db_sa", sqla_exc.IntegrityError, r"^.*SQL0803N.*$")
def _db2_dupe_key_error(integrity_error, match, engine_name, is_disconnect):
"""Filter for DB2 duplicate key errors.
N columns - (IntegrityError) SQL0803N One or more values in the INSERT
statement, UPDATE statement, or foreign key update caused by a
DELETE statement are not valid because the primary key, unique
constraint or unique index identified by "2" constrains table
"NOVA.KEY_PAIRS" from having duplicate values for the index
key.
"""
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
# columns so we have to omit that from the DBDuplicateEntry error.
raise exception.DBDuplicateEntry([], integrity_error)
@filters("mysql", sqla_exc.DBAPIError, r".*\b1146\b")
def _raise_mysql_table_doesnt_exist_asis(
error, match, engine_name, is_disconnect):
"""Raise MySQL error 1146 as is.
Raise MySQL error 1146 as is, so that it does not conflict with
the MySQL dialect's checking a table not existing.
"""
raise error
@filters("*", sqla_exc.OperationalError, r".*")
def _raise_operational_errors_directly_filter(operational_error,
match, engine_name,
is_disconnect):
"""Filter for all remaining OperationalError classes and apply.
Filter for all remaining OperationalError classes and apply
special rules.
"""
if is_disconnect:
# operational errors that represent disconnect
# should be wrapped
raise exception.DBConnectionError(operational_error)
else:
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise operational_error
@filters("mysql", sqla_exc.OperationalError, r".*\(.*(?:2002|2003|2006|2013)")
@filters("ibm_db_sa", sqla_exc.OperationalError, r".*(?:30081)")
def _is_db_connection_error(operational_error, match, engine_name,
is_disconnect):
"""Detect the exception as indicating a recoverable error on connect."""
raise exception.DBConnectionError(operational_error)
@filters("*", sqla_exc.DBAPIError, r".*")
def _raise_for_remaining_DBAPIError(error, match, engine_name, is_disconnect):
"""Filter for remaining DBAPIErrors.
Filter for remaining DBAPIErrors and wrap if they represent
a disconnect error.
"""
if is_disconnect:
raise exception.DBConnectionError(error)
else:
LOG.exception(
_LE('DBAPIError exception wrapped from %s') % error)
raise exception.DBError(error)
@filters('*', UnicodeEncodeError, r".*")
def _raise_for_unicode_encode(error, match, engine_name, is_disconnect):
raise exception.DBInvalidUnicodeParameter()
@filters("*", Exception, r".*")
def _raise_for_all_others(error, match, engine_name, is_disconnect):
LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(error)
def handler(context):
"""Iterate through available filters and invoke those which match.
The first one which raises wins. The order in which the filters
are attempted is sorted by specificity - dialect name or "*",
exception class per method resolution order (``__mro__``).
Method resolution order is used so that filter rules indicating a
more specific exception class are attempted first.
"""
def _dialect_registries(engine):
if engine.dialect.name in _registry:
yield _registry[engine.dialect.name]
if '*' in _registry:
yield _registry['*']
for per_dialect in _dialect_registries(context.engine):
for exc in (
context.sqlalchemy_exception,
context.original_exception):
for super_ in exc.__class__.__mro__:
if super_ in per_dialect:
regexp_reg = per_dialect[super_]
for fn, regexp in regexp_reg:
match = regexp.match(exc.args[0])
if match:
try:
fn(
exc,
match,
context.engine.dialect.name,
context.is_disconnect)
except exception.DBConnectionError:
context.is_disconnect = True
raise
def register_engine(engine):
compat.handle_error(engine, handler)
def handle_connect_error(engine):
"""Handle connect error.
Provide a special context that will allow on-connect errors
to be treated within the filtering context.
This routine is dependent on SQLAlchemy version, as version 1.0.0
provides this functionality natively.
"""
with compat.handle_connect_context(handler, engine):
return engine.connect()
from oslo_db.sqlalchemy.exc_filters import * # noqa

View File

@ -1,160 +1,15 @@
# coding=utf-8
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Base on code in migrate/changeset/databases/sqlite.py which is under
# the following license:
#
# The MIT License
#
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from oslo.db._i18n import _
from oslo.db import exception
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
available version.
:param init_version: Initial database version
:param sanity_check: Require schema sanity checking for all tables
"""
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.DbMigrationError(
message=_("version should be an integer"))
current_version = db_version(engine, abs_path, init_version)
repository = _find_migrate_repo(abs_path)
if sanity_check:
_db_schema_sanity_check(engine)
if version is None or version > current_version:
return versioning_api.upgrade(engine, repository, version)
else:
return versioning_api.downgrade(engine, repository,
version)
def _db_schema_sanity_check(engine):
"""Ensure all database tables were created with required parameters.
:param engine: SQLAlchemy engine instance for a given database
"""
if engine.name == 'mysql':
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
'from information_schema.TABLES '
'where TABLE_SCHEMA=%s and '
'TABLE_COLLATION NOT LIKE \'%%utf8%%\'')
# NOTE(morganfainberg): exclude the sqlalchemy-migrate and alembic
# versioning tables from the tables we need to verify utf8 status on.
# Non-standard table names are not supported.
EXCLUDED_TABLES = ['migrate_version', 'alembic_version']
table_names = [res[0] for res in
engine.execute(onlyutf8_sql, engine.url.database) if
res[0].lower() not in EXCLUDED_TABLES]
if len(table_names) > 0:
raise ValueError(_('Tables "%s" have non utf8 collation, '
'please make sure all tables are CHARSET=utf8'
) % ','.join(table_names))
def db_version(engine, abs_path, init_version):
"""Show the current version of the repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
try:
return versioning_api.db_version(engine, repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0 or 'alembic_version' in tables:
db_version_control(engine, abs_path, version=init_version)
return versioning_api.db_version(engine, repository)
else:
raise exception.DbMigrationError(
message=_(
"The database is not under version control, but has "
"tables. Please stamp the current version of the schema "
"manually."))
def db_version_control(engine, abs_path, version=None):
"""Mark a database as under this repository's version control.
Once a database is under version control, schema changes should
only be done via change scripts in this repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
versioning_api.version_control(engine, repository, version)
return version
def _find_migrate_repo(abs_path):
"""Get the project's change script repository
:param abs_path: Absolute path to migrate repository
"""
if not os.path.exists(abs_path):
raise exception.DbMigrationError("Path %s not found" % abs_path)
return Repository(abs_path)
from oslo_db.sqlalchemy.migration import * # noqa

View File

@ -0,0 +1,18 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db.sqlalchemy.migration_cli import ext_alembic # noqa
from oslo_db.sqlalchemy.migration_cli import ext_base # noqa
from oslo_db.sqlalchemy.migration_cli import ext_migrate # noqa
from oslo_db.sqlalchemy.migration_cli import manager # noqa

View File

@ -1,8 +1,3 @@
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -16,113 +11,5 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models.
"""
import six
from oslo.utils import timeutils
from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
class ModelBase(six.Iterator):
"""Base class for models."""
__table_initialized__ = False
def save(self, session):
"""Save this object."""
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def __contains__(self, key):
return hasattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
@property
def _extra_keys(self):
"""Specifies custom fields
Subclasses can override this property to return a list
of custom fields that should be included in their dict
representation.
For reference check tests/db/sqlalchemy/test_models.py
"""
return []
def __iter__(self):
columns = list(dict(object_mapper(self).columns).keys())
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
columns.extend(self._extra_keys)
return ModelIterator(self, iter(columns))
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in six.iteritems(values):
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict(self)
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
if not k[0] == '_'])
local.update(joined)
return six.iteritems(local)
class ModelIterator(ModelBase, six.Iterator):
def __init__(self, model, columns):
self.model = model
self.i = columns
def __iter__(self):
return self
# In Python 3, __next__() has replaced next().
def __next__(self):
n = six.advance_iterator(self.i)
return n, getattr(self.model, n)
class TimestampMixin(object):
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
def soft_delete(self, session):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)
from oslo_db.sqlalchemy.models import * # noqa

View File

@ -1,4 +1,3 @@
# Copyright 2013 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,495 +12,4 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Provision test environment for specific DB backends"""
import abc
import argparse
import logging
import os
import random
import re
import string
import six
from six import moves
import sqlalchemy
from sqlalchemy.engine import url as sa_url
from oslo.db._i18n import _LI
from oslo.db import exception
from oslo.db.sqlalchemy import session
from oslo.db.sqlalchemy import utils
LOG = logging.getLogger(__name__)
class ProvisionedDatabase(object):
"""Represent a single database node that can be used for testing in
a serialized fashion.
``ProvisionedDatabase`` includes features for full lifecycle management
of a node, in a way that is context-specific. Depending on how the
test environment runs, ``ProvisionedDatabase`` should know if it needs
to create and drop databases or if it is making use of a database that
is maintained by an external process.
"""
def __init__(self, database_type):
self.backend = Backend.backend_for_database_type(database_type)
self.db_token = _random_ident()
self.backend.create_named_database(self.db_token)
self.engine = self.backend.provisioned_engine(self.db_token)
def dispose(self):
self.engine.dispose()
self.backend.drop_named_database(self.db_token)
class Backend(object):
"""Represent a particular database backend that may be provisionable.
The ``Backend`` object maintains a database type (e.g. database without
specific driver type, such as "sqlite", "postgresql", etc.),
a target URL, a base ``Engine`` for that URL object that can be used
to provision databases and a ``BackendImpl`` which knows how to perform
operations against this type of ``Engine``.
"""
backends_by_database_type = {}
def __init__(self, database_type, url):
self.database_type = database_type
self.url = url
self.verified = False
self.engine = None
self.impl = BackendImpl.impl(database_type)
Backend.backends_by_database_type[database_type] = self
@classmethod
def backend_for_database_type(cls, database_type):
"""Return and verify the ``Backend`` for the given database type.
Creates the engine if it does not already exist and raises
``BackendNotAvailable`` if it cannot be produced.
:return: a base ``Engine`` that allows provisioning of databases.
:raises: ``BackendNotAvailable``, if an engine for this backend
cannot be produced.
"""
try:
backend = cls.backends_by_database_type[database_type]
except KeyError:
raise exception.BackendNotAvailable(database_type)
else:
return backend._verify()
@classmethod
def all_viable_backends(cls):
"""Return an iterator of all ``Backend`` objects that are present
and provisionable.
"""
for backend in cls.backends_by_database_type.values():
try:
yield backend._verify()
except exception.BackendNotAvailable:
pass
def _verify(self):
"""Verify that this ``Backend`` is available and provisionable.
:return: this ``Backend``
:raises: ``BackendNotAvailable`` if the backend is not available.
"""
if not self.verified:
try:
eng = self._ensure_backend_available(self.url)
except exception.BackendNotAvailable:
raise
else:
self.engine = eng
finally:
self.verified = True
if self.engine is None:
raise exception.BackendNotAvailable(self.database_type)
return self
@classmethod
def _ensure_backend_available(cls, url):
url = sa_url.make_url(str(url))
try:
eng = sqlalchemy.create_engine(url)
except ImportError as i_e:
# SQLAlchemy performs an "import" of the DBAPI module
# within create_engine(). So if ibm_db_sa, cx_oracle etc.
# isn't installed, we get an ImportError here.
LOG.info(
_LI("The %(dbapi)s backend is unavailable: %(err)s"),
dict(dbapi=url.drivername, err=i_e))
raise exception.BackendNotAvailable("No DBAPI installed")
else:
try:
conn = eng.connect()
except sqlalchemy.exc.DBAPIError as d_e:
# upon connect, SQLAlchemy calls dbapi.connect(). This
# usually raises OperationalError and should always at
# least raise a SQLAlchemy-wrapped DBAPI Error.
LOG.info(
_LI("The %(dbapi)s backend is unavailable: %(err)s"),
dict(dbapi=url.drivername, err=d_e)
)
raise exception.BackendNotAvailable("Could not connect")
else:
conn.close()
return eng
def create_named_database(self, ident):
"""Create a database with the given name."""
self.impl.create_named_database(self.engine, ident)
def drop_named_database(self, ident, conditional=False):
"""Drop a database with the given name."""
self.impl.drop_named_database(
self.engine, ident,
conditional=conditional)
def database_exists(self, ident):
"""Return True if a database of the given name exists."""
return self.impl.database_exists(self.engine, ident)
def provisioned_engine(self, ident):
"""Given the URL of a particular database backend and the string
name of a particular 'database' within that backend, return
an Engine instance whose connections will refer directly to the
named database.
For hostname-based URLs, this typically involves switching just the
'database' portion of the URL with the given name and creating
an engine.
For URLs that instead deal with DSNs, the rules may be more custom;
for example, the engine may need to connect to the root URL and
then emit a command to switch to the named database.
"""
return self.impl.provisioned_engine(self.url, ident)
@classmethod
def _setup(cls):
"""Initial startup feature will scan the environment for configured
URLs and place them into the list of URLs we will use for provisioning.
This searches through OS_TEST_DBAPI_ADMIN_CONNECTION for URLs. If
not present, we set up URLs based on the "opportunstic" convention,
e.g. username+password = "openstack_citest".
The provisioning system will then use or discard these URLs as they
are requested, based on whether or not the target database is actually
found to be available.
"""
configured_urls = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', None)
if configured_urls:
configured_urls = configured_urls.split(";")
else:
configured_urls = [
impl.create_opportunistic_driver_url()
for impl in BackendImpl.all_impls()
]
for url_str in configured_urls:
url = sa_url.make_url(url_str)
m = re.match(r'([^+]+?)(?:\+(.+))?$', url.drivername)
database_type, drivertype = m.group(1, 2)
Backend(database_type, url)
@six.add_metaclass(abc.ABCMeta)
class BackendImpl(object):
"""Provide database-specific implementations of key provisioning
functions.
``BackendImpl`` is owned by a ``Backend`` instance which delegates
to it for all database-specific features.
"""
@classmethod
def all_impls(cls):
"""Return an iterator of all possible BackendImpl objects.
These are BackendImpls that are implemented, but not
necessarily provisionable.
"""
for database_type in cls.impl.reg:
if database_type == '*':
continue
yield BackendImpl.impl(database_type)
@utils.dispatch_for_dialect("*")
def impl(drivername):
"""Return a ``BackendImpl`` instance corresponding to the
given driver name.
This is a dispatched method which will refer to the constructor
of implementing subclasses.
"""
raise NotImplementedError(
"No provision impl available for driver: %s" % drivername)
def __init__(self, drivername):
self.drivername = drivername
@abc.abstractmethod
def create_opportunistic_driver_url(self):
"""Produce a string url known as the 'opportunistic' URL.
This URL is one that corresponds to an established Openstack
convention for a pre-established database login, which, when
detected as available in the local environment, is automatically
used as a test platform for a specific type of driver.
"""
@abc.abstractmethod
def create_named_database(self, engine, ident):
"""Create a database with the given name."""
@abc.abstractmethod
def drop_named_database(self, engine, ident, conditional=False):
"""Drop a database with the given name."""
def provisioned_engine(self, base_url, ident):
"""Return a provisioned engine.
Given the URL of a particular database backend and the string
name of a particular 'database' within that backend, return
an Engine instance whose connections will refer directly to the
named database.
For hostname-based URLs, this typically involves switching just the
'database' portion of the URL with the given name and creating
an engine.
For URLs that instead deal with DSNs, the rules may be more custom;
for example, the engine may need to connect to the root URL and
then emit a command to switch to the named database.
"""
url = sa_url.make_url(str(base_url))
url.database = ident
return session.create_engine(
url,
logging_name="%s@%s" % (self.drivername, ident))
@BackendImpl.impl.dispatch_for("mysql")
class MySQLBackendImpl(BackendImpl):
def create_opportunistic_driver_url(self):
return "mysql://openstack_citest:openstack_citest@localhost/"
def create_named_database(self, engine, ident):
with engine.connect() as conn:
conn.execute("CREATE DATABASE %s" % ident)
def drop_named_database(self, engine, ident, conditional=False):
with engine.connect() as conn:
if not conditional or self.database_exists(conn, ident):
conn.execute("DROP DATABASE %s" % ident)
def database_exists(self, engine, ident):
return bool(engine.scalar("SHOW DATABASES LIKE '%s'" % ident))
@BackendImpl.impl.dispatch_for("sqlite")
class SQLiteBackendImpl(BackendImpl):
def create_opportunistic_driver_url(self):
return "sqlite://"
def create_named_database(self, engine, ident):
url = self._provisioned_database_url(engine.url, ident)
eng = sqlalchemy.create_engine(url)
eng.connect().close()
def provisioned_engine(self, base_url, ident):
return session.create_engine(
self._provisioned_database_url(base_url, ident))
def drop_named_database(self, engine, ident, conditional=False):
url = self._provisioned_database_url(engine.url, ident)
filename = url.database
if filename and (not conditional or os.access(filename, os.F_OK)):
os.remove(filename)
def database_exists(self, engine, ident):
url = self._provisioned_database_url(engine.url, ident)
filename = url.database
return not filename or os.access(filename, os.F_OK)
def _provisioned_database_url(self, base_url, ident):
if base_url.database:
return sa_url.make_url("sqlite:////tmp/%s.db" % ident)
else:
return base_url
@BackendImpl.impl.dispatch_for("postgresql")
class PostgresqlBackendImpl(BackendImpl):
def create_opportunistic_driver_url(self):
return "postgresql://openstack_citest:openstack_citest"\
"@localhost/postgres"
def create_named_database(self, engine, ident):
with engine.connect().execution_options(
isolation_level="AUTOCOMMIT") as conn:
conn.execute("CREATE DATABASE %s" % ident)
def drop_named_database(self, engine, ident, conditional=False):
with engine.connect().execution_options(
isolation_level="AUTOCOMMIT") as conn:
self._close_out_database_users(conn, ident)
if conditional:
conn.execute("DROP DATABASE IF EXISTS %s" % ident)
else:
conn.execute("DROP DATABASE %s" % ident)
def database_exists(self, engine, ident):
return bool(
engine.scalar(
sqlalchemy.text(
"select datname from pg_database "
"where datname=:name"), name=ident)
)
def _close_out_database_users(self, conn, ident):
"""Attempt to guarantee a database can be dropped.
Optional feature which guarantees no connections with our
username are attached to the DB we're going to drop.
This method has caveats; for one, the 'pid' column was named
'procpid' prior to Postgresql 9.2. But more critically,
prior to 9.2 this operation required superuser permissions,
even if the connections we're closing are under the same username
as us. In more recent versions this restriction has been
lifted for same-user connections.
"""
if conn.dialect.server_version_info >= (9, 2):
conn.execute(
sqlalchemy.text(
"select pg_terminate_backend(pid) "
"from pg_stat_activity "
"where usename=current_user and "
"pid != pg_backend_pid() "
"and datname=:dname"
), dname=ident)
def _random_ident():
return ''.join(
random.choice(string.ascii_lowercase)
for i in moves.range(10))
def _echo_cmd(args):
idents = [_random_ident() for i in moves.range(args.instances_count)]
print("\n".join(idents))
def _create_cmd(args):
idents = [_random_ident() for i in moves.range(args.instances_count)]
for backend in Backend.all_viable_backends():
for ident in idents:
backend.create_named_database(ident)
print("\n".join(idents))
def _drop_cmd(args):
for backend in Backend.all_viable_backends():
for ident in args.instances:
backend.drop_named_database(ident, args.conditional)
Backend._setup()
def main(argv=None):
"""Command line interface to create/drop databases.
::create: Create test database with random names.
::drop: Drop database created by previous command.
::echo: create random names and display them; don't create.
"""
parser = argparse.ArgumentParser(
description='Controller to handle database creation and dropping'
' commands.',
epilog='Typically called by the test runner, e.g. shell script, '
'testr runner via .testr.conf, or other system.')
subparsers = parser.add_subparsers(
help='Subcommands to manipulate temporary test databases.')
create = subparsers.add_parser(
'create',
help='Create temporary test databases.')
create.set_defaults(which=_create_cmd)
create.add_argument(
'instances_count',
type=int,
help='Number of databases to create.')
drop = subparsers.add_parser(
'drop',
help='Drop temporary test databases.')
drop.set_defaults(which=_drop_cmd)
drop.add_argument(
'instances',
nargs='+',
help='List of databases uri to be dropped.')
drop.add_argument(
'--conditional',
action="store_true",
help="Check if database exists first before dropping"
)
echo = subparsers.add_parser(
'echo',
help="Create random database names and display only."
)
echo.set_defaults(which=_echo_cmd)
echo.add_argument(
'instances_count',
type=int,
help='Number of identifiers to create.')
args = parser.parse_args(argv)
cmd = args.which
cmd(args)
if __name__ == "__main__":
main()
from oslo_db.sqlalchemy.provision import * # noqa

View File

@ -1,5 +1,3 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -14,834 +12,4 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
`model_query()` will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
.. note:: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and `reservation_rollback()`.
Examples:
.. code-block:: python
def get_foo(context, foo):
return (model_query(context, models.Foo).
filter_by(foo=foo).
first())
def update_foo(context, id, newfoo):
(model_query(context, models.Foo).
filter_by(id=id).
update({'foo': newfoo}))
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keep all the reads and writes within
the context managed by a single session. In this way, the session's
`__exit__` handler will take care of calling `flush()` and `commit()` for
you. If using this approach, you should not explicitly call `flush()` or
`commit()`. Any error within the context of the session will cause the
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
raised in `session`'s `__exit__` handler, and any try/except within the
context managed by `session` will not be triggered. And catching other
non-database errors in the session will not trigger the ROLLBACK, so
exception handlers should always be outside the session, unless the
developer wants to do a partial commit on purpose. If the connection is
dropped before this is possible, the database will implicitly roll back the
transaction.
.. note:: Statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call `model.save()`:
.. code-block:: python
def create_many_foo(context, foos):
session = sessionmaker()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = sessionmaker()
with session.begin():
foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id).
first())
(model_query(context, models.Bar, session).
filter_by(id=foo_ref['bar_id']).
update({'bar': newbar}))
.. note:: `update_bar` is a trivially simple example of using
``with session.begin``. Whereas `create_many_foo` is a good example of
when a transaction is needed, it is always best to use as few queries as
possible.
The two queries in `update_bar` can be better expressed using a single query
which avoids the need for an explicit transaction. It can be expressed like
so:
.. code-block:: python
def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
filter_by(id=foo_id).
limit(1).
subquery())
(model_query(context, models.Bar).
filter_by(id=subq.as_scalar()).
update({'bar': newbar}))
For reference, this emits approximately the following SQL statement:
.. code-block:: sql
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
.. note:: `create_duplicate_foo` is a trivially simple example of catching an
exception while using ``with session.begin``. Here create two duplicate
instances with same primary key, must catch the exception out of context
managed by a single session:
.. code-block:: python
def create_duplicate_foo(context):
foo1 = models.Foo()
foo2 = models.Foo()
foo1.id = foo2.id = 1
session = sessionmaker()
try:
with session.begin():
session.add(foo1)
session.add(foo2)
except exception.DBDuplicateEntry as e:
handle_error(e)
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call `session.begin()` on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
.. code-block:: python
def myfunc(foo):
session = sessionmaker()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = sessionmaker()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your ``with session.begin()`` block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid ``with_lockmode('UPDATE')`` when possible.
In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use
``INSERT .. ON DUPLICATE KEY UPDATE``.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
to your model class. For example:
.. code-block:: python
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
`model.soft_delete()` and `query.soft_delete()`.
The `model.soft_delete()` method works with a single already-fetched entry.
`query.soft_delete()` makes only one db request for all entries that
correspond to the query.
* In almost all cases you should use `query.soft_delete()`. Some examples:
.. code-block:: python
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = sessionmaker()
with session.begin(subtransactions=True):
count = (model_query(BarModel).
find(some_condition).
soft_delete(synchronize_session=True))
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where `model.soft_delete()` is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
.. code-block:: python
def soft_delete_bar_model():
session = sessionmaker()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use the `query.soft_delete()` method:
.. code-block:: python
def soft_delete_multi_models():
session = sessionmaker()
with session.begin():
query = (model_query(BarModel, session=session).
find(some_condition))
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient.
.. code-block:: python
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import itertools
import logging
import re
import time
from oslo.utils import timeutils
import six
import sqlalchemy.orm
from sqlalchemy import pool
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import select
from oslo.db._i18n import _LW
from oslo.db import exception
from oslo.db import options
from oslo.db.sqlalchemy import compat
from oslo.db.sqlalchemy import exc_filters
from oslo.db.sqlalchemy import utils
LOG = logging.getLogger(__name__)
def _thread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
execute instead of time.sleep(0).
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
time.sleep(0)
def _connect_ping_listener(connection, branch):
"""Ping the server at connection startup.
Ping the server at transaction begin and transparently reconnect
if a disconnect exception occurs.
"""
if branch:
return
# turn off "close with result". This can also be accomplished
# by branching the connection, however just setting the flag is
# more performant and also doesn't get involved with some
# connection-invalidation awkardness that occurs (see
# https://bitbucket.org/zzzeek/sqlalchemy/issue/3215/)
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
try:
# run a SELECT 1. use a core select() so that
# any details like that needed by Oracle, DB2 etc. are handled.
connection.scalar(select([1]))
except exception.DBConnectionError:
# catch DBConnectionError, which is raised by the filter
# system.
# disconnect detected. The connection is now
# "invalid", but the pool should be ready to return
# new connections assuming they are good now.
# run the select again to re-validate the Connection.
connection.scalar(select([1]))
finally:
connection.should_close_with_result = save_should_close_with_result
def _setup_logging(connection_debug=0):
"""setup_logging function maps SQL debug level to Python log level.
Connection_debug is a verbosity of SQL debugging information.
0=None(default value),
1=Processed only messages with WARNING level or higher
50=Processed only messages with INFO level or higher
100=Processed only messages with DEBUG level
"""
if connection_debug >= 0:
logger = logging.getLogger('sqlalchemy.engine')
if connection_debug >= 100:
logger.setLevel(logging.DEBUG)
elif connection_debug >= 50:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
idle_timeout=3600,
connection_debug=0, max_pool_size=None, max_overflow=None,
pool_timeout=None, sqlite_synchronous=True,
connection_trace=False, max_retries=10, retry_interval=10,
thread_checkin=True, logging_name=None):
"""Return a new SQLAlchemy engine."""
url = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": idle_timeout,
'convert_unicode': True,
'connect_args': {},
'logging_name': logging_name
}
_setup_logging(connection_debug)
_init_connection_args(
url, engine_args,
sqlite_fk=sqlite_fk,
max_pool_size=max_pool_size,
max_overflow=max_overflow,
pool_timeout=pool_timeout
)
engine = sqlalchemy.create_engine(url, **engine_args)
_init_events(
engine,
mysql_sql_mode=mysql_sql_mode,
sqlite_synchronous=sqlite_synchronous,
sqlite_fk=sqlite_fk,
thread_checkin=thread_checkin,
connection_trace=connection_trace
)
# register alternate exception handler
exc_filters.register_engine(engine)
# register engine connect handler
compat.engine_connect(engine, _connect_ping_listener)
# initial connect + test
_test_connection(engine, max_retries, retry_interval)
return engine
@utils.dispatch_for_dialect('*', multiple=True)
def _init_connection_args(
url, engine_args,
max_pool_size=None, max_overflow=None, pool_timeout=None, **kw):
pool_class = url.get_dialect().get_pool_class(url)
if issubclass(pool_class, pool.QueuePool):
if max_pool_size is not None:
engine_args['pool_size'] = max_pool_size
if max_overflow is not None:
engine_args['max_overflow'] = max_overflow
if pool_timeout is not None:
engine_args['pool_timeout'] = pool_timeout
@_init_connection_args.dispatch_for("sqlite")
def _init_connection_args(url, engine_args, **kw):
pool_class = url.get_dialect().get_pool_class(url)
# singletonthreadpool is used for :memory: connections;
# replace it with StaticPool.
if issubclass(pool_class, pool.SingletonThreadPool):
engine_args["poolclass"] = pool.StaticPool
engine_args['connect_args']['check_same_thread'] = False
@_init_connection_args.dispatch_for("postgresql")
def _init_connection_args(url, engine_args, **kw):
if 'client_encoding' not in url.query:
# Set encoding using engine_args instead of connect_args since
# it's supported for PostgreSQL 8.*. More details at:
# http://docs.sqlalchemy.org/en/rel_0_9/dialects/postgresql.html
engine_args['client_encoding'] = 'utf8'
@_init_connection_args.dispatch_for("mysql")
def _init_connection_args(url, engine_args, **kw):
if 'charset' not in url.query:
engine_args['connect_args']['charset'] = 'utf8'
@_init_connection_args.dispatch_for("mysql+mysqlconnector")
def _init_connection_args(url, engine_args, **kw):
# mysqlconnector engine (<1.0) incorrectly defaults to
# raise_on_warnings=True
# https://bitbucket.org/zzzeek/sqlalchemy/issue/2515
if 'raise_on_warnings' not in url.query:
engine_args['connect_args']['raise_on_warnings'] = False
@_init_connection_args.dispatch_for("mysql+mysqldb")
@_init_connection_args.dispatch_for("mysql+oursql")
def _init_connection_args(url, engine_args, **kw):
# Those drivers require use_unicode=0 to avoid performance drop due
# to internal usage of Python unicode objects in the driver
# http://docs.sqlalchemy.org/en/rel_0_9/dialects/mysql.html
if 'use_unicode' not in url.query:
engine_args['connect_args']['use_unicode'] = 0
@utils.dispatch_for_dialect('*', multiple=True)
def _init_events(engine, thread_checkin=True, connection_trace=False, **kw):
"""Set up event listeners for all database backends."""
if connection_trace:
_add_trace_comments(engine)
if thread_checkin:
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
@_init_events.dispatch_for("mysql")
def _init_events(engine, mysql_sql_mode=None, **kw):
"""Set up event listeners for MySQL."""
if mysql_sql_mode is not None:
@sqlalchemy.event.listens_for(engine, "connect")
def _set_session_sql_mode(dbapi_con, connection_rec):
cursor = dbapi_con.cursor()
cursor.execute("SET SESSION sql_mode = %s", [mysql_sql_mode])
@sqlalchemy.event.listens_for(engine, "first_connect")
def _check_effective_sql_mode(dbapi_con, connection_rec):
if mysql_sql_mode is not None:
_set_session_sql_mode(dbapi_con, connection_rec)
cursor = dbapi_con.cursor()
cursor.execute("SHOW VARIABLES LIKE 'sql_mode'")
realmode = cursor.fetchone()
if realmode is None:
LOG.warning(_LW('Unable to detect effective SQL mode'))
else:
realmode = realmode[1]
LOG.debug('MySQL server mode set to %s', realmode)
if 'TRADITIONAL' not in realmode.upper() and \
'STRICT_ALL_TABLES' not in realmode.upper():
LOG.warning(
_LW(
"MySQL SQL mode is '%s', "
"consider enabling TRADITIONAL or STRICT_ALL_TABLES"),
realmode)
@_init_events.dispatch_for("sqlite")
def _init_events(engine, sqlite_synchronous=True, sqlite_fk=False, **kw):
"""Set up event listeners for SQLite.
This includes several settings made on connections as they are
created, as well as transactional control extensions.
"""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
@sqlalchemy.event.listens_for(engine, "connect")
def _sqlite_connect_events(dbapi_con, con_record):
# Add REGEXP functionality on SQLite connections
dbapi_con.create_function('regexp', 2, regexp)
if not sqlite_synchronous:
# Switch sqlite connections to non-synchronous mode
dbapi_con.execute("PRAGMA synchronous = OFF")
# Disable pysqlite's emitting of the BEGIN statement entirely.
# Also stops it from emitting COMMIT before any DDL.
# below, we emit BEGIN ourselves.
# see http://docs.sqlalchemy.org/en/rel_0_9/dialects/\
# sqlite.html#serializable-isolation-savepoints-transactional-ddl
dbapi_con.isolation_level = None
if sqlite_fk:
# Ensures that the foreign key constraints are enforced in SQLite.
dbapi_con.execute('pragma foreign_keys=ON')
@sqlalchemy.event.listens_for(engine, "begin")
def _sqlite_emit_begin(conn):
# emit our own BEGIN, checking for existing
# transactional state
if 'in_transaction' not in conn.info:
conn.execute("BEGIN")
conn.info['in_transaction'] = True
@sqlalchemy.event.listens_for(engine, "rollback")
@sqlalchemy.event.listens_for(engine, "commit")
def _sqlite_end_transaction(conn):
# remove transactional marker
conn.info.pop('in_transaction', None)
def _test_connection(engine, max_retries, retry_interval):
if max_retries == -1:
attempts = itertools.count()
else:
attempts = six.moves.range(max_retries)
# See: http://legacy.python.org/dev/peps/pep-3110/#semantic-changes for
# why we are not using 'de' directly (it can be removed from the local
# scope).
de_ref = None
for attempt in attempts:
try:
return exc_filters.handle_connect_error(engine)
except exception.DBConnectionError as de:
msg = _LW('SQL connection failed. %s attempts left.')
LOG.warning(msg, max_retries - attempt)
time.sleep(retry_interval)
de_ref = de
else:
if de_ref is not None:
six.reraise(type(de_ref), de_ref)
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query)
def _add_trace_comments(engine):
"""Add trace comments.
Augment statements with a trace of the immediate calling code
for a given statement.
"""
import os
import sys
import traceback
target_paths = set([
os.path.dirname(sys.modules['oslo.db'].__file__),
os.path.dirname(sys.modules['sqlalchemy'].__file__)
])
@sqlalchemy.event.listens_for(engine, "before_cursor_execute", retval=True)
def before_cursor_execute(conn, cursor, statement, parameters, context,
executemany):
# NOTE(zzzeek) - if different steps per DB dialect are desirable
# here, switch out on engine.name for now.
stack = traceback.extract_stack()
our_line = None
for idx, (filename, line, method, function) in enumerate(stack):
for tgt in target_paths:
if filename.startswith(tgt):
our_line = idx
break
if our_line:
break
if our_line:
trace = "; ".join(
"File: %s (%s) %s" % (
line[0], line[1], line[2]
)
# include three lines of context.
for line in stack[our_line - 3:our_line]
)
statement = "%s -- %s" % (statement, trace)
return statement, parameters
class EngineFacade(object):
"""A helper class for removing of global engine instances from oslo.db.
As a library, oslo.db can't decide where to store/when to create engine
and sessionmaker instances, so this must be left for a target application.
On the other hand, in order to simplify the adoption of oslo.db changes,
we'll provide a helper class, which creates engine and sessionmaker
on its instantiation and provides get_engine()/get_session() methods
that are compatible with corresponding utility functions that currently
exist in target projects, e.g. in Nova.
engine/sessionmaker instances will still be global (and they are meant to
be global), but they will be stored in the app context, rather that in the
oslo.db context.
Note: using of this helper is completely optional and you are encouraged to
integrate engine/sessionmaker instances into your apps any way you like
(e.g. one might want to bind a session to a request context). Two important
things to remember:
1. An Engine instance is effectively a pool of DB connections, so it's
meant to be shared (and it's thread-safe).
2. A Session instance is not meant to be shared and represents a DB
transactional context (i.e. it's not thread-safe). sessionmaker is
a factory of sessions.
"""
def __init__(self, sql_connection, slave_connection=None,
sqlite_fk=False, autocommit=True,
expire_on_commit=False, **kwargs):
"""Initialize engine and sessionmaker instances.
:param sql_connection: the connection string for the database to use
:type sql_connection: string
:param slave_connection: the connection string for the 'slave' database
to use. If not provided, the master database
will be used for all operations. Note: this
is meant to be used for offloading of read
operations to asynchronously replicated slaves
to reduce the load on the master database.
:type slave_connection: string
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
Keyword arguments:
:keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions.
(defaults to TRADITIONAL)
:keyword idle_timeout: timeout before idle sql connections are reaped
(defaults to 3600)
:keyword connection_debug: verbosity of SQL debugging information.
-1=Off, 0=None, 100=Everything (defaults
to 0)
:keyword max_pool_size: maximum number of SQL connections to keep open
in a pool (defaults to SQLAlchemy settings)
:keyword max_overflow: if set, use this value for max_overflow with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword pool_timeout: if set, use this value for pool_timeout with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
(defaults to True)
:keyword connection_trace: add python stack traces to SQL as comment
strings (defaults to False)
:keyword max_retries: maximum db connection retries during startup.
(setting -1 implies an infinite retry count)
(defaults to 10)
:keyword retry_interval: interval between retries of opening a sql
connection (defaults to 10)
:keyword thread_checkin: boolean that indicates that between each
engine checkin event a sleep(0) will occur to
allow other greenthreads to run (defaults to
True)
"""
super(EngineFacade, self).__init__()
engine_kwargs = {
'sqlite_fk': sqlite_fk,
'mysql_sql_mode': kwargs.get('mysql_sql_mode', 'TRADITIONAL'),
'idle_timeout': kwargs.get('idle_timeout', 3600),
'connection_debug': kwargs.get('connection_debug', 0),
'max_pool_size': kwargs.get('max_pool_size'),
'max_overflow': kwargs.get('max_overflow'),
'pool_timeout': kwargs.get('pool_timeout'),
'sqlite_synchronous': kwargs.get('sqlite_synchronous', True),
'connection_trace': kwargs.get('connection_trace', False),
'max_retries': kwargs.get('max_retries', 10),
'retry_interval': kwargs.get('retry_interval', 10),
'thread_checkin': kwargs.get('thread_checkin', True)
}
maker_kwargs = {
'autocommit': autocommit,
'expire_on_commit': expire_on_commit
}
self._engine = create_engine(sql_connection=sql_connection,
**engine_kwargs)
self._session_maker = get_maker(engine=self._engine,
**maker_kwargs)
if slave_connection:
self._slave_engine = create_engine(sql_connection=slave_connection,
**engine_kwargs)
self._slave_session_maker = get_maker(engine=self._slave_engine,
**maker_kwargs)
else:
self._slave_engine = None
self._slave_session_maker = None
def get_engine(self, use_slave=False):
"""Get the engine instance (note, that it's shared).
:param use_slave: if possible, use 'slave' database for this engine.
If the connection string for the slave database
wasn't provided, 'master' engine will be returned.
(defaults to False)
:type use_slave: bool
"""
if use_slave and self._slave_engine:
return self._slave_engine
return self._engine
def get_session(self, use_slave=False, **kwargs):
"""Get a Session instance.
:param use_slave: if possible, use 'slave' database connection for
this session. If the connection string for the
slave database wasn't provided, a session bound
to the 'master' engine will be returned.
(defaults to False)
:type use_slave: bool
Keyword arugments will be passed to a sessionmaker instance as is (if
passed, they will override the ones used when the sessionmaker instance
was created). See SQLAlchemy Session docs for details.
"""
if use_slave and self._slave_session_maker:
return self._slave_session_maker(**kwargs)
return self._session_maker(**kwargs)
@classmethod
def from_config(cls, conf,
sqlite_fk=False, autocommit=True, expire_on_commit=False):
"""Initialize EngineFacade using oslo.config config instance options.
:param conf: oslo.config config instance
:type conf: oslo.config.cfg.ConfigOpts
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
conf.register_opts(options.database_opts, 'database')
return cls(sql_connection=conf.database.connection,
slave_connection=conf.database.slave_connection,
sqlite_fk=sqlite_fk,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
mysql_sql_mode=conf.database.mysql_sql_mode,
idle_timeout=conf.database.idle_timeout,
connection_debug=conf.database.connection_debug,
max_pool_size=conf.database.max_pool_size,
max_overflow=conf.database.max_overflow,
pool_timeout=conf.database.pool_timeout,
sqlite_synchronous=conf.database.sqlite_synchronous,
connection_trace=conf.database.connection_trace,
max_retries=conf.database.max_retries,
retry_interval=conf.database.retry_interval)
from oslo_db.sqlalchemy.session import * # noqa

View File

@ -1,127 +1,15 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
try:
from oslotest import base as test_base
except ImportError:
raise NameError('Oslotest is not installed. Please add oslotest in your'
' test-requirements')
import six
from oslo.db import exception
from oslo.db.sqlalchemy import provision
from oslo.db.sqlalchemy import session
from oslo.db.sqlalchemy import utils
class DbFixture(fixtures.Fixture):
"""Basic database fixture.
Allows to run tests on various db backends, such as SQLite, MySQL and
PostgreSQL. By default use sqlite backend. To override default backend
uri set env variable OS_TEST_DBAPI_CONNECTION with database admin
credentials for specific backend.
"""
DRIVER = "sqlite"
# these names are deprecated, and are not used by DbFixture.
# they are here for backwards compatibility with test suites that
# are referring to them directly.
DBNAME = PASSWORD = USERNAME = 'openstack_citest'
def __init__(self, test):
super(DbFixture, self).__init__()
self.test = test
def setUp(self):
super(DbFixture, self).setUp()
try:
self.provision = provision.ProvisionedDatabase(self.DRIVER)
self.addCleanup(self.provision.dispose)
except exception.BackendNotAvailable:
msg = '%s backend is not available.' % self.DRIVER
return self.test.skip(msg)
else:
self.test.engine = self.provision.engine
self.addCleanup(setattr, self.test, 'engine', None)
self.test.sessionmaker = session.get_maker(self.test.engine)
self.addCleanup(setattr, self.test, 'sessionmaker', None)
class DbTestCase(test_base.BaseTestCase):
"""Base class for testing of DB code.
Using `DbFixture`. Intended to be the main database test case to use all
the tests on a given backend with user defined uri. Backend specific
tests should be decorated with `backend_specific` decorator.
"""
FIXTURE = DbFixture
def setUp(self):
super(DbTestCase, self).setUp()
self.useFixture(self.FIXTURE(self))
class OpportunisticTestCase(DbTestCase):
"""Placeholder for backwards compatibility."""
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
def backend_specific(*dialects):
"""Decorator to skip backend specific tests on inappropriate engines.
::dialects: list of dialects names under which the test will be launched.
"""
def wrap(f):
@six.wraps(f)
def ins_wrap(self):
if not set(dialects).issubset(ALLOWED_DIALECTS):
raise ValueError(
"Please use allowed dialects: %s" % ALLOWED_DIALECTS)
if self.engine.name not in dialects:
msg = ('The test "%s" can be run '
'only on %s. Current engine is %s.')
args = (utils.get_callable_name(f), ' '.join(dialects),
self.engine.name)
self.skip(msg % args)
else:
return f(self)
return ins_wrap
return wrap
class MySQLOpportunisticFixture(DbFixture):
DRIVER = 'mysql'
class PostgreSQLOpportunisticFixture(DbFixture):
DRIVER = 'postgresql'
class MySQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = MySQLOpportunisticFixture
class PostgreSQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = PostgreSQLOpportunisticFixture
from oslo_db.sqlalchemy.test_base import * # noqa

View File

@ -1,5 +1,3 @@
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -14,600 +12,4 @@
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import logging
import pprint
import alembic
import alembic.autogenerate
import alembic.migration
import pkg_resources as pkg
import six
import sqlalchemy
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy import schema
import sqlalchemy.sql.expression as expr
import sqlalchemy.types as types
from oslo.db._i18n import _LE
from oslo.db import exception as exc
from oslo.db.sqlalchemy import utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class WalkVersionsMixin(object):
"""Test mixin to check upgrade and downgrade ability of migration.
This is only suitable for testing of migrate_ migration scripts. An
abstract class mixin. `INIT_VERSION`, `REPOSITORY` and `migration_api`
attributes must be implemented in subclasses.
.. _auxiliary-dynamic-methods: Auxiliary Methods
Auxiliary Methods:
`migrate_up` and `migrate_down` instance methods of the class can be
used with auxiliary methods named `_pre_upgrade_<revision_id>`,
`_check_<revision_id>`, `_post_downgrade_<revision_id>`. The methods
intended to check applied changes for correctness of data operations.
This methods should be implemented for every particular revision
which you want to check with data. Implementation recommendations for
`_pre_upgrade_<revision_id>`, `_check_<revision_id>`,
`_post_downgrade_<revision_id>` implementation:
* `_pre_upgrade_<revision_id>`: provide a data appropriate to
a next revision. Should be used an id of revision which
going to be applied.
* `_check_<revision_id>`: Insert, select, delete operations
with newly applied changes. The data provided by
`_pre_upgrade_<revision_id>` will be used.
* `_post_downgrade_<revision_id>`: check for absence
(inability to use) changes provided by reverted revision.
Execution order of auxiliary methods when revision is upgrading:
`_pre_upgrade_###` => `upgrade` => `_check_###`
Execution order of auxiliary methods when revision is downgrading:
`downgrade` => `_post_downgrade_###`
.. _migrate: https://sqlalchemy-migrate.readthedocs.org/en/latest/
"""
@abc.abstractproperty
def INIT_VERSION(self):
"""Initial version of a migration repository.
Can be different from 0, if a migrations were squashed.
:rtype: int
"""
pass
@abc.abstractproperty
def REPOSITORY(self):
"""Allows basic manipulation with migration repository.
:returns: `migrate.versioning.repository.Repository` subclass.
"""
pass
@abc.abstractproperty
def migration_api(self):
"""Provides API for upgrading, downgrading and version manipulations.
:returns: `migrate.api` or overloaded analog.
"""
pass
@abc.abstractproperty
def migrate_engine(self):
"""Provides engine instance.
Should be the same instance as used when migrations are applied. In
most cases, the `engine` attribute provided by the test class in a
`setUp` method will work.
Example of implementation:
def migrate_engine(self):
return self.engine
:returns: sqlalchemy engine instance
"""
pass
def _walk_versions(self, snake_walk=False, downgrade=True):
"""Check if migration upgrades and downgrades successfully.
DEPRECATED: this function is deprecated and will be removed from
oslo.db in a few releases. Please use walk_versions() method instead.
"""
self.walk_versions(snake_walk, downgrade)
def _migrate_down(self, version, with_data=False):
"""Migrate down to a previous version of the db.
DEPRECATED: this function is deprecated and will be removed from
oslo.db in a few releases. Please use migrate_down() method instead.
"""
return self.migrate_down(version, with_data)
def _migrate_up(self, version, with_data=False):
"""Migrate up to a new version of the db.
DEPRECATED: this function is deprecated and will be removed from
oslo.db in a few releases. Please use migrate_up() method instead.
"""
self.migrate_up(version, with_data)
def walk_versions(self, snake_walk=False, downgrade=True):
"""Check if migration upgrades and downgrades successfully.
Determine the latest version script from the repo, then
upgrade from 1 through to the latest, with no data
in the databases. This just checks that the schema itself
upgrades successfully.
`walk_versions` calls `migrate_up` and `migrate_down` with
`with_data` argument to check changes with data, but these methods
can be called without any extra check outside of `walk_versions`
method.
:param snake_walk: enables checking that each individual migration can
be upgraded/downgraded by itself.
If we have ordered migrations 123abc, 456def, 789ghi and we run
upgrading with the `snake_walk` argument set to `True`, the
migrations will be applied in the following order:
`123abc => 456def => 123abc =>
456def => 789ghi => 456def => 789ghi`
:type snake_walk: bool
:param downgrade: Check downgrade behavior if True.
:type downgrade: bool
"""
# Place the database under version control
self.migration_api.version_control(self.migrate_engine,
self.REPOSITORY,
self.INIT_VERSION)
self.assertEqual(self.INIT_VERSION,
self.migration_api.db_version(self.migrate_engine,
self.REPOSITORY))
LOG.debug('latest version is %s', self.REPOSITORY.latest)
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
for version in versions:
# upgrade -> downgrade -> upgrade
self.migrate_up(version, with_data=True)
if snake_walk:
downgraded = self.migrate_down(version - 1, with_data=True)
if downgraded:
self.migrate_up(version)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(versions):
# downgrade -> upgrade -> downgrade
downgraded = self.migrate_down(version - 1)
if snake_walk and downgraded:
self.migrate_up(version)
self.migrate_down(version - 1)
def migrate_down(self, version, with_data=False):
"""Migrate down to a previous version of the db.
:param version: id of revision to downgrade.
:type version: str
:keyword with_data: Whether to verify the absence of changes from
migration(s) being downgraded, see
:ref:`auxiliary-dynamic-methods <Auxiliary Methods>`.
:type with_data: Bool
"""
try:
self.migration_api.downgrade(self.migrate_engine,
self.REPOSITORY, version)
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(version, self.migration_api.db_version(
self.migrate_engine, self.REPOSITORY))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%03d" % (version + 1), None)
if post_downgrade:
post_downgrade(self.migrate_engine)
return True
def migrate_up(self, version, with_data=False):
"""Migrate up to a new version of the db.
:param version: id of revision to upgrade.
:type version: str
:keyword with_data: Whether to verify the applied changes with data,
see :ref:`auxiliary-dynamic-methods <Auxiliary Methods>`.
:type with_data: Bool
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%03d" % version, None)
if pre_upgrade:
data = pre_upgrade(self.migrate_engine)
self.migration_api.upgrade(self.migrate_engine,
self.REPOSITORY, version)
self.assertEqual(version,
self.migration_api.db_version(self.migrate_engine,
self.REPOSITORY))
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if check:
check(self.migrate_engine, data)
except exc.DbMigrationError:
msg = _LE("Failed to migrate to version %(ver)s on engine %(eng)s")
LOG.error(msg, {"ver": version, "eng": self.migrate_engine})
raise
@six.add_metaclass(abc.ABCMeta)
class ModelsMigrationsSync(object):
"""A helper class for comparison of DB migration scripts and models.
It's intended to be inherited by test cases in target projects. They have
to provide implementations for methods used internally in the test (as
we have no way to implement them here).
test_model_sync() will run migration scripts for the engine provided and
then compare the given metadata to the one reflected from the database.
The difference between MODELS and MIGRATION scripts will be printed and
the test will fail, if the difference is not empty. The return value is
really a list of actions, that should be performed in order to make the
current database schema state (i.e. migration scripts) consistent with
models definitions. It's left up to developers to analyze the output and
decide whether the models definitions or the migration scripts should be
modified to make them consistent.
Output::
[(
'add_table',
description of the table from models
),
(
'remove_table',
description of the table from database
),
(
'add_column',
schema,
table name,
column description from models
),
(
'remove_column',
schema,
table name,
column description from database
),
(
'add_index',
description of the index from models
),
(
'remove_index',
description of the index from database
),
(
'add_constraint',
description of constraint from models
),
(
'remove_constraint,
description of constraint from database
),
(
'modify_nullable',
schema,
table name,
column name,
{
'existing_type': type of the column from database,
'existing_server_default': default value from database
},
nullable from database,
nullable from models
),
(
'modify_type',
schema,
table name,
column name,
{
'existing_nullable': database nullable,
'existing_server_default': default value from database
},
database column type,
type of the column from models
),
(
'modify_default',
schema,
table name,
column name,
{
'existing_nullable': database nullable,
'existing_type': type of the column from database
},
connection column default value,
default from models
)]
Method include_object() can be overridden to exclude some tables from
comparison (e.g. migrate_repo).
"""
@abc.abstractmethod
def db_sync(self, engine):
"""Run migration scripts with the given engine instance.
This method must be implemented in subclasses and run migration scripts
for a DB the given engine is connected to.
"""
@abc.abstractmethod
def get_engine(self):
"""Return the engine instance to be used when running tests.
This method must be implemented in subclasses and return an engine
instance to be used when running tests.
"""
@abc.abstractmethod
def get_metadata(self):
"""Return the metadata instance to be used for schema comparison.
This method must be implemented in subclasses and return the metadata
instance attached to the BASE model.
"""
def include_object(self, object_, name, type_, reflected, compare_to):
"""Return True for objects that should be compared.
:param object_: a SchemaItem object such as a Table or Column object
:param name: the name of the object
:param type_: a string describing the type of object (e.g. "table")
:param reflected: True if the given object was produced based on
table reflection, False if it's from a local
MetaData object
:param compare_to: the object being compared against, if available,
else None
"""
return True
def compare_type(self, ctxt, insp_col, meta_col, insp_type, meta_type):
"""Return True if types are different, False if not.
Return None to allow the default implementation to compare these types.
:param ctxt: alembic MigrationContext instance
:param insp_col: reflected column
:param meta_col: column from model
:param insp_type: reflected column type
:param meta_type: column type from model
"""
# some backends (e.g. mysql) don't provide native boolean type
BOOLEAN_METADATA = (types.BOOLEAN, types.Boolean)
BOOLEAN_SQL = BOOLEAN_METADATA + (types.INTEGER, types.Integer)
if issubclass(type(meta_type), BOOLEAN_METADATA):
return not issubclass(type(insp_type), BOOLEAN_SQL)
return None # tells alembic to use the default comparison method
def compare_server_default(self, ctxt, ins_col, meta_col,
insp_def, meta_def, rendered_meta_def):
"""Compare default values between model and db table.
Return True if the defaults are different, False if not, or None to
allow the default implementation to compare these defaults.
:param ctxt: alembic MigrationContext instance
:param insp_col: reflected column
:param meta_col: column from model
:param insp_def: reflected column default value
:param meta_def: column default value from model
:param rendered_meta_def: rendered column default value (from model)
"""
return self._compare_server_default(ctxt.bind, meta_col, insp_def,
meta_def)
@utils.DialectFunctionDispatcher.dispatch_for_dialect("*")
def _compare_server_default(bind, meta_col, insp_def, meta_def):
pass
@_compare_server_default.dispatch_for('mysql')
def _compare_server_default(bind, meta_col, insp_def, meta_def):
if isinstance(meta_col.type, sqlalchemy.Boolean):
if meta_def is None or insp_def is None:
return meta_def != insp_def
return not (
isinstance(meta_def.arg, expr.True_) and insp_def == "'1'" or
isinstance(meta_def.arg, expr.False_) and insp_def == "'0'"
)
if isinstance(meta_col.type, sqlalchemy.Integer):
if meta_def is None or insp_def is None:
return meta_def != insp_def
return meta_def.arg != insp_def.split("'")[1]
@_compare_server_default.dispatch_for('postgresql')
def _compare_server_default(bind, meta_col, insp_def, meta_def):
if isinstance(meta_col.type, sqlalchemy.Enum):
if meta_def is None or insp_def is None:
return meta_def != insp_def
return insp_def != "'%s'::%s" % (meta_def.arg, meta_col.type.name)
elif isinstance(meta_col.type, sqlalchemy.String):
if meta_def is None or insp_def is None:
return meta_def != insp_def
return insp_def != "'%s'::character varying" % meta_def.arg
def _cleanup(self):
engine = self.get_engine()
with engine.begin() as conn:
inspector = reflection.Inspector.from_engine(engine)
metadata = schema.MetaData()
tbs = []
all_fks = []
for table_name in inspector.get_table_names():
fks = []
for fk in inspector.get_foreign_keys(table_name):
if not fk['name']:
continue
fks.append(
schema.ForeignKeyConstraint((), (), name=fk['name'])
)
table = schema.Table(table_name, metadata, *fks)
tbs.append(table)
all_fks.extend(fks)
for fkc in all_fks:
conn.execute(schema.DropConstraint(fkc))
for table in tbs:
conn.execute(schema.DropTable(table))
FKInfo = collections.namedtuple('fk_info', ['constrained_columns',
'referred_table',
'referred_columns'])
def check_foreign_keys(self, metadata, bind):
"""Compare foreign keys between model and db table.
:returns: a list that contains information about:
* should be a new key added or removed existing,
* name of that key,
* source table,
* referred table,
* constrained columns,
* referred columns
Output::
[('drop_key',
'testtbl_fk_check_fkey',
'testtbl',
fk_info(constrained_columns=(u'fk_check',),
referred_table=u'table',
referred_columns=(u'fk_check',)))]
"""
diff = []
insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind)
# Get all tables from db
db_tables = insp.get_table_names()
# Get all tables from models
model_tables = metadata.tables
for table in db_tables:
if table not in model_tables:
continue
# Get all necessary information about key of current table from db
fk_db = dict((self._get_fk_info_from_db(i), i['name'])
for i in insp.get_foreign_keys(table))
fk_db_set = set(fk_db.keys())
# Get all necessary information about key of current table from
# models
fk_models = dict((self._get_fk_info_from_model(fk), fk)
for fk in model_tables[table].foreign_keys)
fk_models_set = set(fk_models.keys())
for key in (fk_db_set - fk_models_set):
diff.append(('drop_key', fk_db[key], table, key))
LOG.info(("Detected removed foreign key %(fk)r on "
"table %(table)r"), {'fk': fk_db[key],
'table': table})
for key in (fk_models_set - fk_db_set):
diff.append(('add_key', fk_models[key], table, key))
LOG.info((
"Detected added foreign key for column %(fk)r on table "
"%(table)r"), {'fk': fk_models[key].column.name,
'table': table})
return diff
def _get_fk_info_from_db(self, fk):
return self.FKInfo(tuple(fk['constrained_columns']),
fk['referred_table'],
tuple(fk['referred_columns']))
def _get_fk_info_from_model(self, fk):
return self.FKInfo((fk.parent.name,), fk.column.table.name,
(fk.column.name,))
def test_models_sync(self):
# recent versions of sqlalchemy and alembic are needed for running of
# this test, but we already have them in requirements
try:
pkg.require('sqlalchemy>=0.8.4', 'alembic>=0.6.2')
except (pkg.VersionConflict, pkg.DistributionNotFound) as e:
self.skipTest('sqlalchemy>=0.8.4 and alembic>=0.6.3 are required'
' for running of this test: %s' % e)
# drop all tables after a test run
self.addCleanup(self._cleanup)
# run migration scripts
self.db_sync(self.get_engine())
with self.get_engine().connect() as conn:
opts = {
'include_object': self.include_object,
'compare_type': self.compare_type,
'compare_server_default': self.compare_server_default,
}
mc = alembic.migration.MigrationContext.configure(conn, opts=opts)
# compare schemas and fail with diff, if it's not empty
diff1 = alembic.autogenerate.compare_metadata(mc,
self.get_metadata())
diff2 = self.check_foreign_keys(self.get_metadata(),
self.get_engine())
diff = diff1 + diff2
if diff:
msg = pprint.pformat(diff, indent=2, width=20)
self.fail(
"Models and migration scripts aren't in sync:\n%s" % msg)
from oslo_db.sqlalchemy.test_migrations import * # noqa

File diff suppressed because it is too large Load Diff

229
oslo_db/api.py Normal file
View File

@ -0,0 +1,229 @@
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
=================================
Multiple DB API backend support.
=================================
A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB
API methods.
"""
import logging
import threading
import time
from oslo.utils import importutils
import six
from oslo_db._i18n import _LE
from oslo_db import exception
from oslo_db import options
LOG = logging.getLogger(__name__)
def safe_for_db_retry(f):
"""Indicate api method as safe for re-connection to database.
Database connection retries will be enabled for the decorated api method.
Database connection failure can have many causes, which can be temporary.
In such cases retry may increase the likelihood of connection.
Usage::
@safe_for_db_retry
def api_method(self):
self.engine.connect()
:param f: database api method.
:type f: function.
"""
f.__dict__['enable_retry'] = True
return f
class wrap_db_retry(object):
"""Decorator class. Retry db.api methods, if DBConnectionError() raised.
Retry decorated db.api methods. If we enabled `use_db_reconnect`
in config, this decorator will be applied to all db.api functions,
marked with @safe_for_db_retry decorator.
Decorator catches DBConnectionError() and retries function in a
loop until it succeeds, or until maximum retries count will be reached.
Keyword arguments:
:param retry_interval: seconds between transaction retries
:type retry_interval: int
:param max_retries: max number of retries before an error is raised
:type max_retries: int
:param inc_retry_interval: determine increase retry interval or not
:type inc_retry_interval: bool
:param max_retry_interval: max interval value between retries
:type max_retry_interval: int
"""
def __init__(self, retry_interval, max_retries, inc_retry_interval,
max_retry_interval):
super(wrap_db_retry, self).__init__()
self.retry_interval = retry_interval
self.max_retries = max_retries
self.inc_retry_interval = inc_retry_interval
self.max_retry_interval = max_retry_interval
def __call__(self, f):
@six.wraps(f)
def wrapper(*args, **kwargs):
next_interval = self.retry_interval
remaining = self.max_retries
while True:
try:
return f(*args, **kwargs)
except exception.DBConnectionError as e:
if remaining == 0:
LOG.exception(_LE('DB exceeded retry limit.'))
raise exception.DBError(e)
if remaining != -1:
remaining -= 1
LOG.exception(_LE('DB connection error.'))
# NOTE(vsergeyev): We are using patched time module, so
# this effectively yields the execution
# context to another green thread.
time.sleep(next_interval)
if self.inc_retry_interval:
next_interval = min(
next_interval * 2,
self.max_retry_interval
)
return wrapper
class DBAPI(object):
"""Initialize the chosen DB API backend.
After initialization API methods is available as normal attributes of
``DBAPI`` subclass. Database API methods are supposed to be called as
DBAPI instance methods.
:param backend_name: name of the backend to load
:type backend_name: str
:param backend_mapping: backend name -> module/class to load mapping
:type backend_mapping: dict
:default backend_mapping: None
:param lazy: load the DB backend lazily on the first DB API method call
:type lazy: bool
:default lazy: False
:keyword use_db_reconnect: retry DB transactions on disconnect or not
:type use_db_reconnect: bool
:keyword retry_interval: seconds between transaction retries
:type retry_interval: int
:keyword inc_retry_interval: increase retry interval or not
:type inc_retry_interval: bool
:keyword max_retry_interval: max interval value between retries
:type max_retry_interval: int
:keyword max_retries: max number of retries before an error is raised
:type max_retries: int
"""
def __init__(self, backend_name, backend_mapping=None, lazy=False,
**kwargs):
self._backend = None
self._backend_name = backend_name
self._backend_mapping = backend_mapping or {}
self._lock = threading.Lock()
if not lazy:
self._load_backend()
self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
self.retry_interval = kwargs.get('retry_interval', 1)
self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
self.max_retry_interval = kwargs.get('max_retry_interval', 10)
self.max_retries = kwargs.get('max_retries', 20)
def _load_backend(self):
with self._lock:
if not self._backend:
# Import the untranslated name if we don't have a mapping
backend_path = self._backend_mapping.get(self._backend_name,
self._backend_name)
LOG.debug('Loading backend %(name)r from %(path)r',
{'name': self._backend_name,
'path': backend_path})
backend_mod = importutils.import_module(backend_path)
self._backend = backend_mod.get_backend()
def __getattr__(self, key):
if not self._backend:
self._load_backend()
attr = getattr(self._backend, key)
if not hasattr(attr, '__call__'):
return attr
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
# DB API methods, decorated with @safe_for_db_retry
# on disconnect.
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
attr = wrap_db_retry(
retry_interval=self.retry_interval,
max_retries=self.max_retries,
inc_retry_interval=self.inc_retry_interval,
max_retry_interval=self.max_retry_interval)(attr)
return attr
@classmethod
def from_config(cls, conf, backend_mapping=None, lazy=False):
"""Initialize DBAPI instance given a config instance.
:param conf: oslo.config config instance
:type conf: oslo.config.cfg.ConfigOpts
:param backend_mapping: backend name -> module/class to load mapping
:type backend_mapping: dict
:param lazy: load the DB backend lazily on the first DB API method call
:type lazy: bool
"""
conf.register_opts(options.database_opts, 'database')
return cls(backend_name=conf.database.backend,
backend_mapping=backend_mapping,
lazy=lazy,
use_db_reconnect=conf.database.use_db_reconnect,
retry_interval=conf.database.db_retry_interval,
inc_retry_interval=conf.database.db_inc_retry_interval,
max_retry_interval=conf.database.db_max_retry_interval,
max_retries=conf.database.db_max_retries)

81
oslo_db/concurrency.py Normal file
View File

@ -0,0 +1,81 @@
# Copyright 2014 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import logging
import threading
from oslo.config import cfg
from oslo_db._i18n import _LE
from oslo_db import api
LOG = logging.getLogger(__name__)
tpool_opts = [
cfg.BoolOpt('use_tpool',
default=False,
deprecated_name='dbapi_use_tpool',
deprecated_group='DEFAULT',
help='Enable the experimental use of thread pooling for '
'all DB API calls'),
]
class TpoolDbapiWrapper(object):
"""DB API wrapper class.
This wraps the oslo DB API with an option to be able to use eventlet's
thread pooling. Since the CONF variable may not be loaded at the time
this class is instantiated, we must look at it on the first DB API call.
"""
def __init__(self, conf, backend_mapping):
self._db_api = None
self._backend_mapping = backend_mapping
self._conf = conf
self._conf.register_opts(tpool_opts, 'database')
self._lock = threading.Lock()
@property
def _api(self):
if not self._db_api:
with self._lock:
if not self._db_api:
db_api = api.DBAPI.from_config(
conf=self._conf, backend_mapping=self._backend_mapping)
if self._conf.database.use_tpool:
try:
from eventlet import tpool
except ImportError:
LOG.exception(_LE("'eventlet' is required for "
"TpoolDbapiWrapper."))
raise
self._db_api = tpool.Proxy(db_api)
else:
self._db_api = db_api
return self._db_api
def __getattr__(self, key):
return getattr(self._api, key)
def list_opts():
"""Returns a list of oslo.config options available in this module.
:returns: a list of (group_name, opts) tuples
"""
return [('database', copy.deepcopy(tpool_opts))]

173
oslo_db/exception.py Normal file
View File

@ -0,0 +1,173 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""DB related custom exceptions.
Custom exceptions intended to determine the causes of specific database
errors. This module provides more generic exceptions than the database-specific
driver libraries, and so users of oslo.db can catch these no matter which
database the application is using. Most of the exceptions are wrappers. Wrapper
exceptions take an original exception as positional argument and keep it for
purposes of deeper debug.
Example::
try:
statement(arg)
except sqlalchemy.exc.OperationalError as e:
raise DBDuplicateEntry(e)
This is useful to determine more specific error cases further at execution,
when you need to add some extra information to an error message. Wrapper
exceptions takes care about original error message displaying to not to loose
low level cause of an error. All the database api exceptions wrapped into
the specific exceptions provided belove.
Please use only database related custom exceptions with database manipulations
with `try/except` statement. This is required for consistent handling of
database errors.
"""
import six
from oslo_db._i18n import _
class DBError(Exception):
"""Base exception for all custom database exceptions.
:kwarg inner_exception: an original exception which was wrapped with
DBError or its subclasses.
"""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(six.text_type(inner_exception))
class DBDuplicateEntry(DBError):
"""Duplicate entry at unique column error.
Raised when made an attempt to write to a unique column the same entry as
existing one. :attr: `columns` available on an instance of the exception
and could be used at error handling::
try:
instance_type_ref.save()
except DBDuplicateEntry as e:
if 'colname' in e.columns:
# Handle error.
:kwarg columns: a list of unique columns have been attempted to write a
duplicate entry.
:type columns: list
:kwarg value: a value which has been attempted to write. The value will
be None, if we can't extract it for a particular database backend. Only
MySQL and PostgreSQL 9.x are supported right now.
"""
def __init__(self, columns=None, inner_exception=None, value=None):
self.columns = columns or []
self.value = value
super(DBDuplicateEntry, self).__init__(inner_exception)
class DBReferenceError(DBError):
"""Foreign key violation error.
:param table: a table name in which the reference is directed.
:type table: str
:param constraint: a problematic constraint name.
:type constraint: str
:param key: a broken reference key name.
:type key: str
:param key_table: a table name which contains the key.
:type key_table: str
"""
def __init__(self, table, constraint, key, key_table,
inner_exception=None):
self.table = table
self.constraint = constraint
self.key = key
self.key_table = key_table
super(DBReferenceError, self).__init__(inner_exception)
class DBDeadlock(DBError):
"""Database dead lock error.
Deadlock is a situation that occurs when two or more different database
sessions have some data locked, and each database session requests a lock
on the data that another, different, session has already locked.
"""
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)
class DBInvalidUnicodeParameter(Exception):
"""Database unicode error.
Raised when unicode parameter is passed to a database
without encoding directive.
"""
message = _("Invalid Parameter: "
"Encoding directive wasn't provided.")
class DbMigrationError(DBError):
"""Wrapped migration specific exception.
Raised when migrations couldn't be completed successfully.
"""
def __init__(self, message=None):
super(DbMigrationError, self).__init__(message)
class DBConnectionError(DBError):
"""Wrapped connection specific exception.
Raised when database connection is failed.
"""
pass
class InvalidSortKey(Exception):
"""A sort key destined for database query usage is invalid."""
message = _("Sort key supplied was not valid.")
class ColumnError(Exception):
"""Error raised when no column or an invalid column is found."""
class BackendNotAvailable(Exception):
"""Error raised when a particular database backend is not available
within a test suite.
"""

220
oslo_db/options.py Normal file
View File

@ -0,0 +1,220 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo.config import cfg
database_opts = [
cfg.StrOpt('sqlite_db',
deprecated_group='DEFAULT',
default='oslo.sqlite',
help='The file name to use with SQLite.'),
cfg.BoolOpt('sqlite_synchronous',
deprecated_group='DEFAULT',
default=True,
help='If True, SQLite uses synchronous mode.'),
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The back end to use for the database.'),
cfg.StrOpt('connection',
help='The SQLAlchemy connection string to use to connect to '
'the database.',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('slave_connection',
secret=True,
help='The SQLAlchemy connection string to use to connect to the'
' slave database.'),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help='The SQL mode to be used for MySQL sessions. '
'This option, including the default, overrides any '
'server-set SQL mode. To use whatever SQL mode '
'is set by the server configuration, '
'set this to no value. Example: mysql_sql_mode='),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle SQL connections are reaped.'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool.'),
cfg.IntOpt('max_pool_size',
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool.'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum number of database connection retries '
'during startup. Set to -1 to specify an infinite '
'retry count.'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a SQL connection.'),
cfg.IntOpt('max_overflow',
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with '
'SQLAlchemy.'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information: 0=None, '
'100=Everything.'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add Python stack traces to SQL as comment strings.'),
cfg.IntOpt('pool_timeout',
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with '
'SQLAlchemy.'),
cfg.BoolOpt('use_db_reconnect',
default=False,
help='Enable the experimental use of database reconnect '
'on connection lost.'),
cfg.IntOpt('db_retry_interval',
default=1,
help='Seconds between database connection retries.'),
cfg.BoolOpt('db_inc_retry_interval',
default=True,
help='If True, increases the interval between database '
'connection retries up to db_max_retry_interval.'),
cfg.IntOpt('db_max_retry_interval',
default=10,
help='If db_inc_retry_interval is set, the '
'maximum seconds between database connection retries.'),
cfg.IntOpt('db_max_retries',
default=20,
help='Maximum database connection retries before error is '
'raised. Set to -1 to specify an infinite retry '
'count.'),
]
def set_defaults(conf, connection=None, sqlite_db=None,
max_pool_size=None, max_overflow=None,
pool_timeout=None):
"""Set defaults for configuration variables.
Overrides default options values.
:param conf: Config instance specified to set default options in it. Using
of instances instead of a global config object prevents conflicts between
options declaration.
:type conf: oslo.config.cfg.ConfigOpts instance.
:keyword connection: SQL connection string.
Valid SQLite URL forms are:
* sqlite:///:memory: (or, sqlite://)
* sqlite:///relative/path/to/file.db
* sqlite:////absolute/path/to/file.db
:type connection: str
:keyword sqlite_db: path to SQLite database file.
:type sqlite_db: str
:keyword max_pool_size: maximum connections pool size. The size of the pool
to be maintained, defaults to 5, will be used if value of the parameter is
`None`. This is the largest number of connections that will be kept
persistently in the pool. Note that the pool begins with no connections;
once this number of connections is requested, that number of connections
will remain.
:type max_pool_size: int
:default max_pool_size: None
:keyword max_overflow: The maximum overflow size of the pool. When the
number of checked-out connections reaches the size set in pool_size,
additional connections will be returned up to this limit. When those
additional connections are returned to the pool, they are disconnected and
discarded. It follows then that the total number of simultaneous
connections the pool will allow is pool_size + max_overflow, and the total
number of "sleeping" connections the pool will allow is pool_size.
max_overflow can be set to -1 to indicate no overflow limit; no limit will
be placed on the total number of concurrent connections. Defaults to 10,
will be used if value of the parameter in `None`.
:type max_overflow: int
:default max_overflow: None
:keyword pool_timeout: The number of seconds to wait before giving up on
returning a connection. Defaults to 30, will be used if value of the
parameter is `None`.
:type pool_timeout: int
:default pool_timeout: None
"""
conf.register_opts(database_opts, group='database')
if connection is not None:
conf.set_default('connection', connection, group='database')
if sqlite_db is not None:
conf.set_default('sqlite_db', sqlite_db, group='database')
if max_pool_size is not None:
conf.set_default('max_pool_size', max_pool_size, group='database')
if max_overflow is not None:
conf.set_default('max_overflow', max_overflow, group='database')
if pool_timeout is not None:
conf.set_default('pool_timeout', pool_timeout, group='database')
def list_opts():
"""Returns a list of oslo.config options available in the library.
The returned list includes all oslo.config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
return [('database', copy.deepcopy(database_opts))]

View File

@ -0,0 +1,30 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""compatiblity extensions for SQLAlchemy versions.
Elements within this module provide SQLAlchemy features that have been
added at some point but for which oslo.db provides a compatible versions
for previous SQLAlchemy versions.
"""
from oslo_db.sqlalchemy.compat import engine_connect as _e_conn
from oslo_db.sqlalchemy.compat import handle_error as _h_err
# trying to get: "from oslo_db.sqlalchemy import compat; compat.handle_error"
# flake8 won't let me import handle_error directly
engine_connect = _e_conn.engine_connect
handle_error = _h_err.handle_error
handle_connect_context = _h_err.handle_connect_context
__all__ = [
'engine_connect', 'handle_error',
'handle_connect_context']

View File

@ -20,7 +20,7 @@ http://docs.sqlalchemy.org/en/rel_0_9/core/events.html.
from sqlalchemy.engine import Engine
from sqlalchemy import event
from oslo.db.sqlalchemy.compat import utils
from oslo_db.sqlalchemy.compat import utils
def engine_connect(engine, listener):

View File

@ -24,7 +24,7 @@ from sqlalchemy.engine import Engine
from sqlalchemy import event
from sqlalchemy import exc as sqla_exc
from oslo.db.sqlalchemy.compat import utils
from oslo_db.sqlalchemy.compat import utils
def handle_error(engine, listener):

View File

@ -0,0 +1,26 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sqlalchemy
_SQLA_VERSION = tuple(
int(num) if re.match(r'^\d+$', num) else num
for num in sqlalchemy.__version__.split(".")
)
sqla_100 = _SQLA_VERSION >= (1, 0, 0)
sqla_097 = _SQLA_VERSION >= (0, 9, 7)
sqla_094 = _SQLA_VERSION >= (0, 9, 4)
sqla_090 = _SQLA_VERSION >= (0, 9, 0)
sqla_08 = _SQLA_VERSION >= (0, 8)

View File

@ -0,0 +1,358 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Define exception redefinitions for SQLAlchemy DBAPI exceptions."""
import collections
import logging
import re
from sqlalchemy import exc as sqla_exc
from oslo_db._i18n import _LE
from oslo_db import exception
from oslo_db.sqlalchemy import compat
LOG = logging.getLogger(__name__)
_registry = collections.defaultdict(
lambda: collections.defaultdict(
list
)
)
def filters(dbname, exception_type, regex):
"""Mark a function as receiving a filtered exception.
:param dbname: string database name, e.g. 'mysql'
:param exception_type: a SQLAlchemy database exception class, which
extends from :class:`sqlalchemy.exc.DBAPIError`.
:param regex: a string, or a tuple of strings, that will be processed
as matching regular expressions.
"""
def _receive(fn):
_registry[dbname][exception_type].extend(
(fn, re.compile(reg))
for reg in
((regex,) if not isinstance(regex, tuple) else regex)
)
return fn
return _receive
# NOTE(zzzeek) - for Postgresql, catch both OperationalError, as the
# actual error is
# psycopg2.extensions.TransactionRollbackError(OperationalError),
# as well as sqlalchemy.exc.DBAPIError, as SQLAlchemy will reraise it
# as this until issue #3075 is fixed.
@filters("mysql", sqla_exc.OperationalError, r"^.*\b1213\b.*Deadlock found.*")
@filters("mysql", sqla_exc.OperationalError,
r"^.*\b1205\b.*Lock wait timeout exceeded.*")
@filters("mysql", sqla_exc.InternalError, r"^.*\b1213\b.*Deadlock found.*")
@filters("postgresql", sqla_exc.OperationalError, r"^.*deadlock detected.*")
@filters("postgresql", sqla_exc.DBAPIError, r"^.*deadlock detected.*")
@filters("ibm_db_sa", sqla_exc.DBAPIError, r"^.*SQL0911N.*")
def _deadlock_error(operational_error, match, engine_name, is_disconnect):
"""Filter for MySQL or Postgresql deadlock error.
NOTE(comstud): In current versions of DB backends, Deadlock violation
messages follow the structure:
mysql+mysqldb:
(OperationalError) (1213, 'Deadlock found when trying to get lock; try '
'restarting transaction') <query_str> <query_args>
mysql+mysqlconnector:
(InternalError) 1213 (40001): Deadlock found when trying to get lock; try
restarting transaction
postgresql:
(TransactionRollbackError) deadlock detected <deadlock_details>
ibm_db_sa:
SQL0911N The current transaction has been rolled back because of a
deadlock or timeout <deadlock details>
"""
raise exception.DBDeadlock(operational_error)
@filters("mysql", sqla_exc.IntegrityError,
r"^.*\b1062\b.*Duplicate entry '(?P<value>[^']+)'"
r" for key '(?P<columns>[^']+)'.*$")
# NOTE(pkholkin): the first regex is suitable only for PostgreSQL 9.x versions
# the second regex is suitable for PostgreSQL 8.x versions
@filters("postgresql", sqla_exc.IntegrityError,
(r'^.*duplicate\s+key.*"(?P<columns>[^"]+)"\s*\n.*'
r'Key\s+\((?P<key>.*)\)=\((?P<value>.*)\)\s+already\s+exists.*$',
r"^.*duplicate\s+key.*\"(?P<columns>[^\"]+)\"\s*\n.*$"))
def _default_dupe_key_error(integrity_error, match, engine_name,
is_disconnect):
"""Filter for MySQL or Postgresql duplicate key error.
note(boris-42): In current versions of DB backends unique constraint
violation messages follow the structure:
postgres:
1 column - (IntegrityError) duplicate key value violates unique
constraint "users_c1_key"
N columns - (IntegrityError) duplicate key value violates unique
constraint "name_of_our_constraint"
mysql+mysqldb:
1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
'c1'")
N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
with -' for key 'name_of_our_constraint'")
mysql+mysqlconnector:
1 column - (IntegrityError) 1062 (23000): Duplicate entry 'value_of_c1' for
key 'c1'
N columns - (IntegrityError) 1062 (23000): Duplicate entry 'values
joined with -' for key 'name_of_our_constraint'
"""
columns = match.group('columns')
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
columns = [columns[columns.index("_") + 1:columns.rindex("_")]]
else:
columns = [columns]
else:
columns = columns[len(uniqbase):].split("0")[1:]
value = match.groupdict().get('value')
raise exception.DBDuplicateEntry(columns, integrity_error, value)
@filters("sqlite", sqla_exc.IntegrityError,
(r"^.*columns?(?P<columns>[^)]+)(is|are)\s+not\s+unique$",
r"^.*UNIQUE\s+constraint\s+failed:\s+(?P<columns>.+)$",
r"^.*PRIMARY\s+KEY\s+must\s+be\s+unique.*$"))
def _sqlite_dupe_key_error(integrity_error, match, engine_name, is_disconnect):
"""Filter for SQLite duplicate key error.
note(boris-42): In current versions of DB backends unique constraint
violation messages follow the structure:
sqlite:
1 column - (IntegrityError) column c1 is not unique
N columns - (IntegrityError) column c1, c2, ..., N are not unique
sqlite since 3.7.16:
1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
sqlite since 3.8.2:
(IntegrityError) PRIMARY KEY must be unique
"""
columns = []
# NOTE(ochuprykov): We can get here by last filter in which there are no
# groups. Trying to access the substring that matched by
# the group will lead to IndexError. In this case just
# pass empty list to exception.DBDuplicateEntry
try:
columns = match.group('columns')
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
except IndexError:
pass
raise exception.DBDuplicateEntry(columns, integrity_error)
@filters("sqlite", sqla_exc.IntegrityError,
r"(?i).*foreign key constraint failed")
@filters("postgresql", sqla_exc.IntegrityError,
r".*on table \"(?P<table>[^\"]+)\" violates "
"foreign key constraint \"(?P<constraint>[^\"]+)\"\s*\n"
"DETAIL: Key \((?P<key>.+)\)=\(.+\) "
"is not present in table "
"\"(?P<key_table>[^\"]+)\".")
@filters("mysql", sqla_exc.IntegrityError,
r".* 'Cannot add or update a child row: "
'a foreign key constraint fails \([`"].+[`"]\.[`"](?P<table>.+)[`"], '
'CONSTRAINT [`"](?P<constraint>.+)[`"] FOREIGN KEY '
'\([`"](?P<key>.+)[`"]\) REFERENCES [`"](?P<key_table>.+)[`"] ')
def _foreign_key_error(integrity_error, match, engine_name, is_disconnect):
"""Filter for foreign key errors."""
try:
table = match.group("table")
except IndexError:
table = None
try:
constraint = match.group("constraint")
except IndexError:
constraint = None
try:
key = match.group("key")
except IndexError:
key = None
try:
key_table = match.group("key_table")
except IndexError:
key_table = None
raise exception.DBReferenceError(table, constraint, key, key_table,
integrity_error)
@filters("ibm_db_sa", sqla_exc.IntegrityError, r"^.*SQL0803N.*$")
def _db2_dupe_key_error(integrity_error, match, engine_name, is_disconnect):
"""Filter for DB2 duplicate key errors.
N columns - (IntegrityError) SQL0803N One or more values in the INSERT
statement, UPDATE statement, or foreign key update caused by a
DELETE statement are not valid because the primary key, unique
constraint or unique index identified by "2" constrains table
"NOVA.KEY_PAIRS" from having duplicate values for the index
key.
"""
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
# columns so we have to omit that from the DBDuplicateEntry error.
raise exception.DBDuplicateEntry([], integrity_error)
@filters("mysql", sqla_exc.DBAPIError, r".*\b1146\b")
def _raise_mysql_table_doesnt_exist_asis(
error, match, engine_name, is_disconnect):
"""Raise MySQL error 1146 as is.
Raise MySQL error 1146 as is, so that it does not conflict with
the MySQL dialect's checking a table not existing.
"""
raise error
@filters("*", sqla_exc.OperationalError, r".*")
def _raise_operational_errors_directly_filter(operational_error,
match, engine_name,
is_disconnect):
"""Filter for all remaining OperationalError classes and apply.
Filter for all remaining OperationalError classes and apply
special rules.
"""
if is_disconnect:
# operational errors that represent disconnect
# should be wrapped
raise exception.DBConnectionError(operational_error)
else:
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise operational_error
@filters("mysql", sqla_exc.OperationalError, r".*\(.*(?:2002|2003|2006|2013)")
@filters("ibm_db_sa", sqla_exc.OperationalError, r".*(?:30081)")
def _is_db_connection_error(operational_error, match, engine_name,
is_disconnect):
"""Detect the exception as indicating a recoverable error on connect."""
raise exception.DBConnectionError(operational_error)
@filters("*", sqla_exc.DBAPIError, r".*")
def _raise_for_remaining_DBAPIError(error, match, engine_name, is_disconnect):
"""Filter for remaining DBAPIErrors.
Filter for remaining DBAPIErrors and wrap if they represent
a disconnect error.
"""
if is_disconnect:
raise exception.DBConnectionError(error)
else:
LOG.exception(
_LE('DBAPIError exception wrapped from %s') % error)
raise exception.DBError(error)
@filters('*', UnicodeEncodeError, r".*")
def _raise_for_unicode_encode(error, match, engine_name, is_disconnect):
raise exception.DBInvalidUnicodeParameter()
@filters("*", Exception, r".*")
def _raise_for_all_others(error, match, engine_name, is_disconnect):
LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(error)
def handler(context):
"""Iterate through available filters and invoke those which match.
The first one which raises wins. The order in which the filters
are attempted is sorted by specificity - dialect name or "*",
exception class per method resolution order (``__mro__``).
Method resolution order is used so that filter rules indicating a
more specific exception class are attempted first.
"""
def _dialect_registries(engine):
if engine.dialect.name in _registry:
yield _registry[engine.dialect.name]
if '*' in _registry:
yield _registry['*']
for per_dialect in _dialect_registries(context.engine):
for exc in (
context.sqlalchemy_exception,
context.original_exception):
for super_ in exc.__class__.__mro__:
if super_ in per_dialect:
regexp_reg = per_dialect[super_]
for fn, regexp in regexp_reg:
match = regexp.match(exc.args[0])
if match:
try:
fn(
exc,
match,
context.engine.dialect.name,
context.is_disconnect)
except exception.DBConnectionError:
context.is_disconnect = True
raise
def register_engine(engine):
compat.handle_error(engine, handler)
def handle_connect_error(engine):
"""Handle connect error.
Provide a special context that will allow on-connect errors
to be treated within the filtering context.
This routine is dependent on SQLAlchemy version, as version 1.0.0
provides this functionality natively.
"""
with compat.handle_connect_context(handler, engine):
return engine.connect()

View File

@ -0,0 +1,160 @@
# coding=utf-8
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Base on code in migrate/changeset/databases/sqlite.py which is under
# the following license:
#
# The MIT License
#
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from oslo_db._i18n import _
from oslo_db import exception
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
available version.
:param init_version: Initial database version
:param sanity_check: Require schema sanity checking for all tables
"""
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.DbMigrationError(
message=_("version should be an integer"))
current_version = db_version(engine, abs_path, init_version)
repository = _find_migrate_repo(abs_path)
if sanity_check:
_db_schema_sanity_check(engine)
if version is None or version > current_version:
return versioning_api.upgrade(engine, repository, version)
else:
return versioning_api.downgrade(engine, repository,
version)
def _db_schema_sanity_check(engine):
"""Ensure all database tables were created with required parameters.
:param engine: SQLAlchemy engine instance for a given database
"""
if engine.name == 'mysql':
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
'from information_schema.TABLES '
'where TABLE_SCHEMA=%s and '
'TABLE_COLLATION NOT LIKE \'%%utf8%%\'')
# NOTE(morganfainberg): exclude the sqlalchemy-migrate and alembic
# versioning tables from the tables we need to verify utf8 status on.
# Non-standard table names are not supported.
EXCLUDED_TABLES = ['migrate_version', 'alembic_version']
table_names = [res[0] for res in
engine.execute(onlyutf8_sql, engine.url.database) if
res[0].lower() not in EXCLUDED_TABLES]
if len(table_names) > 0:
raise ValueError(_('Tables "%s" have non utf8 collation, '
'please make sure all tables are CHARSET=utf8'
) % ','.join(table_names))
def db_version(engine, abs_path, init_version):
"""Show the current version of the repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
try:
return versioning_api.db_version(engine, repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0 or 'alembic_version' in tables:
db_version_control(engine, abs_path, version=init_version)
return versioning_api.db_version(engine, repository)
else:
raise exception.DbMigrationError(
message=_(
"The database is not under version control, but has "
"tables. Please stamp the current version of the schema "
"manually."))
def db_version_control(engine, abs_path, version=None):
"""Mark a database as under this repository's version control.
Once a database is under version control, schema changes should
only be done via change scripts in this repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
versioning_api.version_control(engine, repository, version)
return version
def _find_migrate_repo(abs_path):
"""Get the project's change script repository
:param abs_path: Absolute path to migrate repository
"""
if not os.path.exists(abs_path):
raise exception.DbMigrationError("Path %s not found" % abs_path)
return Repository(abs_path)

View File

@ -16,8 +16,8 @@ import alembic
from alembic import config as alembic_config
import alembic.migration as alembic_migration
from oslo.db.sqlalchemy.migration_cli import ext_base
from oslo.db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy.migration_cli import ext_base
from oslo_db.sqlalchemy import session as db_session
class AlembicExtension(ext_base.MigrationExtensionBase):

View File

@ -13,10 +13,10 @@
import logging
import os
from oslo.db._i18n import _LE
from oslo.db.sqlalchemy import migration
from oslo.db.sqlalchemy.migration_cli import ext_base
from oslo.db.sqlalchemy import session as db_session
from oslo_db._i18n import _LE
from oslo_db.sqlalchemy import migration
from oslo_db.sqlalchemy.migration_cli import ext_base
from oslo_db.sqlalchemy import session as db_session
LOG = logging.getLogger(__name__)

View File

@ -0,0 +1,128 @@
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models.
"""
import six
from oslo.utils import timeutils
from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
class ModelBase(six.Iterator):
"""Base class for models."""
__table_initialized__ = False
def save(self, session):
"""Save this object."""
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def __contains__(self, key):
return hasattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
@property
def _extra_keys(self):
"""Specifies custom fields
Subclasses can override this property to return a list
of custom fields that should be included in their dict
representation.
For reference check tests/db/sqlalchemy/test_models.py
"""
return []
def __iter__(self):
columns = list(dict(object_mapper(self).columns).keys())
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
columns.extend(self._extra_keys)
return ModelIterator(self, iter(columns))
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in six.iteritems(values):
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict(self)
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
if not k[0] == '_'])
local.update(joined)
return six.iteritems(local)
class ModelIterator(ModelBase, six.Iterator):
def __init__(self, model, columns):
self.model = model
self.i = columns
def __iter__(self):
return self
# In Python 3, __next__() has replaced next().
def __next__(self):
n = six.advance_iterator(self.i)
return n, getattr(self.model, n)
class TimestampMixin(object):
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
def soft_delete(self, session):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)

View File

@ -0,0 +1,507 @@
# Copyright 2013 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provision test environment for specific DB backends"""
import abc
import argparse
import logging
import os
import random
import re
import string
import six
from six import moves
import sqlalchemy
from sqlalchemy.engine import url as sa_url
from oslo_db._i18n import _LI
from oslo_db import exception
from oslo_db.sqlalchemy import session
from oslo_db.sqlalchemy import utils
LOG = logging.getLogger(__name__)
class ProvisionedDatabase(object):
"""Represent a single database node that can be used for testing in
a serialized fashion.
``ProvisionedDatabase`` includes features for full lifecycle management
of a node, in a way that is context-specific. Depending on how the
test environment runs, ``ProvisionedDatabase`` should know if it needs
to create and drop databases or if it is making use of a database that
is maintained by an external process.
"""
def __init__(self, database_type):
self.backend = Backend.backend_for_database_type(database_type)
self.db_token = _random_ident()
self.backend.create_named_database(self.db_token)
self.engine = self.backend.provisioned_engine(self.db_token)
def dispose(self):
self.engine.dispose()
self.backend.drop_named_database(self.db_token)
class Backend(object):
"""Represent a particular database backend that may be provisionable.
The ``Backend`` object maintains a database type (e.g. database without
specific driver type, such as "sqlite", "postgresql", etc.),
a target URL, a base ``Engine`` for that URL object that can be used
to provision databases and a ``BackendImpl`` which knows how to perform
operations against this type of ``Engine``.
"""
backends_by_database_type = {}
def __init__(self, database_type, url):
self.database_type = database_type
self.url = url
self.verified = False
self.engine = None
self.impl = BackendImpl.impl(database_type)
Backend.backends_by_database_type[database_type] = self
@classmethod
def backend_for_database_type(cls, database_type):
"""Return and verify the ``Backend`` for the given database type.
Creates the engine if it does not already exist and raises
``BackendNotAvailable`` if it cannot be produced.
:return: a base ``Engine`` that allows provisioning of databases.
:raises: ``BackendNotAvailable``, if an engine for this backend
cannot be produced.
"""
try:
backend = cls.backends_by_database_type[database_type]
except KeyError:
raise exception.BackendNotAvailable(database_type)
else:
return backend._verify()
@classmethod
def all_viable_backends(cls):
"""Return an iterator of all ``Backend`` objects that are present
and provisionable.
"""
for backend in cls.backends_by_database_type.values():
try:
yield backend._verify()
except exception.BackendNotAvailable:
pass
def _verify(self):
"""Verify that this ``Backend`` is available and provisionable.
:return: this ``Backend``
:raises: ``BackendNotAvailable`` if the backend is not available.
"""
if not self.verified:
try:
eng = self._ensure_backend_available(self.url)
except exception.BackendNotAvailable:
raise
else:
self.engine = eng
finally:
self.verified = True
if self.engine is None:
raise exception.BackendNotAvailable(self.database_type)
return self
@classmethod
def _ensure_backend_available(cls, url):
url = sa_url.make_url(str(url))
try:
eng = sqlalchemy.create_engine(url)
except ImportError as i_e:
# SQLAlchemy performs an "import" of the DBAPI module
# within create_engine(). So if ibm_db_sa, cx_oracle etc.
# isn't installed, we get an ImportError here.
LOG.info(
_LI("The %(dbapi)s backend is unavailable: %(err)s"),
dict(dbapi=url.drivername, err=i_e))
raise exception.BackendNotAvailable("No DBAPI installed")
else:
try:
conn = eng.connect()
except sqlalchemy.exc.DBAPIError as d_e:
# upon connect, SQLAlchemy calls dbapi.connect(). This
# usually raises OperationalError and should always at
# least raise a SQLAlchemy-wrapped DBAPI Error.
LOG.info(
_LI("The %(dbapi)s backend is unavailable: %(err)s"),
dict(dbapi=url.drivername, err=d_e)
)
raise exception.BackendNotAvailable("Could not connect")
else:
conn.close()
return eng
def create_named_database(self, ident):
"""Create a database with the given name."""
self.impl.create_named_database(self.engine, ident)
def drop_named_database(self, ident, conditional=False):
"""Drop a database with the given name."""
self.impl.drop_named_database(
self.engine, ident,
conditional=conditional)
def database_exists(self, ident):
"""Return True if a database of the given name exists."""
return self.impl.database_exists(self.engine, ident)
def provisioned_engine(self, ident):
"""Given the URL of a particular database backend and the string
name of a particular 'database' within that backend, return
an Engine instance whose connections will refer directly to the
named database.
For hostname-based URLs, this typically involves switching just the
'database' portion of the URL with the given name and creating
an engine.
For URLs that instead deal with DSNs, the rules may be more custom;
for example, the engine may need to connect to the root URL and
then emit a command to switch to the named database.
"""
return self.impl.provisioned_engine(self.url, ident)
@classmethod
def _setup(cls):
"""Initial startup feature will scan the environment for configured
URLs and place them into the list of URLs we will use for provisioning.
This searches through OS_TEST_DBAPI_ADMIN_CONNECTION for URLs. If
not present, we set up URLs based on the "opportunstic" convention,
e.g. username+password = "openstack_citest".
The provisioning system will then use or discard these URLs as they
are requested, based on whether or not the target database is actually
found to be available.
"""
configured_urls = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', None)
if configured_urls:
configured_urls = configured_urls.split(";")
else:
configured_urls = [
impl.create_opportunistic_driver_url()
for impl in BackendImpl.all_impls()
]
for url_str in configured_urls:
url = sa_url.make_url(url_str)
m = re.match(r'([^+]+?)(?:\+(.+))?$', url.drivername)
database_type, drivertype = m.group(1, 2)
Backend(database_type, url)
@six.add_metaclass(abc.ABCMeta)
class BackendImpl(object):
"""Provide database-specific implementations of key provisioning
functions.
``BackendImpl`` is owned by a ``Backend`` instance which delegates
to it for all database-specific features.
"""
@classmethod
def all_impls(cls):
"""Return an iterator of all possible BackendImpl objects.
These are BackendImpls that are implemented, but not
necessarily provisionable.
"""
for database_type in cls.impl.reg:
if database_type == '*':
continue
yield BackendImpl.impl(database_type)
@utils.dispatch_for_dialect("*")
def impl(drivername):
"""Return a ``BackendImpl`` instance corresponding to the
given driver name.
This is a dispatched method which will refer to the constructor
of implementing subclasses.
"""
raise NotImplementedError(
"No provision impl available for driver: %s" % drivername)
def __init__(self, drivername):
self.drivername = drivername
@abc.abstractmethod
def create_opportunistic_driver_url(self):
"""Produce a string url known as the 'opportunistic' URL.
This URL is one that corresponds to an established Openstack
convention for a pre-established database login, which, when
detected as available in the local environment, is automatically
used as a test platform for a specific type of driver.
"""
@abc.abstractmethod
def create_named_database(self, engine, ident):
"""Create a database with the given name."""
@abc.abstractmethod
def drop_named_database(self, engine, ident, conditional=False):
"""Drop a database with the given name."""
def provisioned_engine(self, base_url, ident):
"""Return a provisioned engine.
Given the URL of a particular database backend and the string
name of a particular 'database' within that backend, return
an Engine instance whose connections will refer directly to the
named database.
For hostname-based URLs, this typically involves switching just the
'database' portion of the URL with the given name and creating
an engine.
For URLs that instead deal with DSNs, the rules may be more custom;
for example, the engine may need to connect to the root URL and
then emit a command to switch to the named database.
"""
url = sa_url.make_url(str(base_url))
url.database = ident
return session.create_engine(
url,
logging_name="%s@%s" % (self.drivername, ident))
@BackendImpl.impl.dispatch_for("mysql")
class MySQLBackendImpl(BackendImpl):
def create_opportunistic_driver_url(self):
return "mysql://openstack_citest:openstack_citest@localhost/"
def create_named_database(self, engine, ident):
with engine.connect() as conn:
conn.execute("CREATE DATABASE %s" % ident)
def drop_named_database(self, engine, ident, conditional=False):
with engine.connect() as conn:
if not conditional or self.database_exists(conn, ident):
conn.execute("DROP DATABASE %s" % ident)
def database_exists(self, engine, ident):
return bool(engine.scalar("SHOW DATABASES LIKE '%s'" % ident))
@BackendImpl.impl.dispatch_for("sqlite")
class SQLiteBackendImpl(BackendImpl):
def create_opportunistic_driver_url(self):
return "sqlite://"
def create_named_database(self, engine, ident):
url = self._provisioned_database_url(engine.url, ident)
eng = sqlalchemy.create_engine(url)
eng.connect().close()
def provisioned_engine(self, base_url, ident):
return session.create_engine(
self._provisioned_database_url(base_url, ident))
def drop_named_database(self, engine, ident, conditional=False):
url = self._provisioned_database_url(engine.url, ident)
filename = url.database
if filename and (not conditional or os.access(filename, os.F_OK)):
os.remove(filename)
def database_exists(self, engine, ident):
url = self._provisioned_database_url(engine.url, ident)
filename = url.database
return not filename or os.access(filename, os.F_OK)
def _provisioned_database_url(self, base_url, ident):
if base_url.database:
return sa_url.make_url("sqlite:////tmp/%s.db" % ident)
else:
return base_url
@BackendImpl.impl.dispatch_for("postgresql")
class PostgresqlBackendImpl(BackendImpl):
def create_opportunistic_driver_url(self):
return "postgresql://openstack_citest:openstack_citest"\
"@localhost/postgres"
def create_named_database(self, engine, ident):
with engine.connect().execution_options(
isolation_level="AUTOCOMMIT") as conn:
conn.execute("CREATE DATABASE %s" % ident)
def drop_named_database(self, engine, ident, conditional=False):
with engine.connect().execution_options(
isolation_level="AUTOCOMMIT") as conn:
self._close_out_database_users(conn, ident)
if conditional:
conn.execute("DROP DATABASE IF EXISTS %s" % ident)
else:
conn.execute("DROP DATABASE %s" % ident)
def database_exists(self, engine, ident):
return bool(
engine.scalar(
sqlalchemy.text(
"select datname from pg_database "
"where datname=:name"), name=ident)
)
def _close_out_database_users(self, conn, ident):
"""Attempt to guarantee a database can be dropped.
Optional feature which guarantees no connections with our
username are attached to the DB we're going to drop.
This method has caveats; for one, the 'pid' column was named
'procpid' prior to Postgresql 9.2. But more critically,
prior to 9.2 this operation required superuser permissions,
even if the connections we're closing are under the same username
as us. In more recent versions this restriction has been
lifted for same-user connections.
"""
if conn.dialect.server_version_info >= (9, 2):
conn.execute(
sqlalchemy.text(
"select pg_terminate_backend(pid) "
"from pg_stat_activity "
"where usename=current_user and "
"pid != pg_backend_pid() "
"and datname=:dname"
), dname=ident)
def _random_ident():
return ''.join(
random.choice(string.ascii_lowercase)
for i in moves.range(10))
def _echo_cmd(args):
idents = [_random_ident() for i in moves.range(args.instances_count)]
print("\n".join(idents))
def _create_cmd(args):
idents = [_random_ident() for i in moves.range(args.instances_count)]
for backend in Backend.all_viable_backends():
for ident in idents:
backend.create_named_database(ident)
print("\n".join(idents))
def _drop_cmd(args):
for backend in Backend.all_viable_backends():
for ident in args.instances:
backend.drop_named_database(ident, args.conditional)
Backend._setup()
def main(argv=None):
"""Command line interface to create/drop databases.
::create: Create test database with random names.
::drop: Drop database created by previous command.
::echo: create random names and display them; don't create.
"""
parser = argparse.ArgumentParser(
description='Controller to handle database creation and dropping'
' commands.',
epilog='Typically called by the test runner, e.g. shell script, '
'testr runner via .testr.conf, or other system.')
subparsers = parser.add_subparsers(
help='Subcommands to manipulate temporary test databases.')
create = subparsers.add_parser(
'create',
help='Create temporary test databases.')
create.set_defaults(which=_create_cmd)
create.add_argument(
'instances_count',
type=int,
help='Number of databases to create.')
drop = subparsers.add_parser(
'drop',
help='Drop temporary test databases.')
drop.set_defaults(which=_drop_cmd)
drop.add_argument(
'instances',
nargs='+',
help='List of databases uri to be dropped.')
drop.add_argument(
'--conditional',
action="store_true",
help="Check if database exists first before dropping"
)
echo = subparsers.add_parser(
'echo',
help="Create random database names and display only."
)
echo.set_defaults(which=_echo_cmd)
echo.add_argument(
'instances_count',
type=int,
help='Number of identifiers to create.')
args = parser.parse_args(argv)
cmd = args.which
cmd(args)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,847 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
`model_query()` will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
.. note:: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and `reservation_rollback()`.
Examples:
.. code-block:: python
def get_foo(context, foo):
return (model_query(context, models.Foo).
filter_by(foo=foo).
first())
def update_foo(context, id, newfoo):
(model_query(context, models.Foo).
filter_by(id=id).
update({'foo': newfoo}))
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keep all the reads and writes within
the context managed by a single session. In this way, the session's
`__exit__` handler will take care of calling `flush()` and `commit()` for
you. If using this approach, you should not explicitly call `flush()` or
`commit()`. Any error within the context of the session will cause the
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
raised in `session`'s `__exit__` handler, and any try/except within the
context managed by `session` will not be triggered. And catching other
non-database errors in the session will not trigger the ROLLBACK, so
exception handlers should always be outside the session, unless the
developer wants to do a partial commit on purpose. If the connection is
dropped before this is possible, the database will implicitly roll back the
transaction.
.. note:: Statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call `model.save()`:
.. code-block:: python
def create_many_foo(context, foos):
session = sessionmaker()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = sessionmaker()
with session.begin():
foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id).
first())
(model_query(context, models.Bar, session).
filter_by(id=foo_ref['bar_id']).
update({'bar': newbar}))
.. note:: `update_bar` is a trivially simple example of using
``with session.begin``. Whereas `create_many_foo` is a good example of
when a transaction is needed, it is always best to use as few queries as
possible.
The two queries in `update_bar` can be better expressed using a single query
which avoids the need for an explicit transaction. It can be expressed like
so:
.. code-block:: python
def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
filter_by(id=foo_id).
limit(1).
subquery())
(model_query(context, models.Bar).
filter_by(id=subq.as_scalar()).
update({'bar': newbar}))
For reference, this emits approximately the following SQL statement:
.. code-block:: sql
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
.. note:: `create_duplicate_foo` is a trivially simple example of catching an
exception while using ``with session.begin``. Here create two duplicate
instances with same primary key, must catch the exception out of context
managed by a single session:
.. code-block:: python
def create_duplicate_foo(context):
foo1 = models.Foo()
foo2 = models.Foo()
foo1.id = foo2.id = 1
session = sessionmaker()
try:
with session.begin():
session.add(foo1)
session.add(foo2)
except exception.DBDuplicateEntry as e:
handle_error(e)
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call `session.begin()` on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
.. code-block:: python
def myfunc(foo):
session = sessionmaker()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = sessionmaker()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your ``with session.begin()`` block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid ``with_lockmode('UPDATE')`` when possible.
In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use
``INSERT .. ON DUPLICATE KEY UPDATE``.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
to your model class. For example:
.. code-block:: python
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
`model.soft_delete()` and `query.soft_delete()`.
The `model.soft_delete()` method works with a single already-fetched entry.
`query.soft_delete()` makes only one db request for all entries that
correspond to the query.
* In almost all cases you should use `query.soft_delete()`. Some examples:
.. code-block:: python
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = sessionmaker()
with session.begin(subtransactions=True):
count = (model_query(BarModel).
find(some_condition).
soft_delete(synchronize_session=True))
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where `model.soft_delete()` is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
.. code-block:: python
def soft_delete_bar_model():
session = sessionmaker()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use the `query.soft_delete()` method:
.. code-block:: python
def soft_delete_multi_models():
session = sessionmaker()
with session.begin():
query = (model_query(BarModel, session=session).
find(some_condition))
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient.
.. code-block:: python
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import itertools
import logging
import re
import time
from oslo.utils import timeutils
import six
import sqlalchemy.orm
from sqlalchemy import pool
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import select
from oslo_db._i18n import _LW
from oslo_db import exception
from oslo_db import options
from oslo_db.sqlalchemy import compat
from oslo_db.sqlalchemy import exc_filters
from oslo_db.sqlalchemy import utils
LOG = logging.getLogger(__name__)
def _thread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
execute instead of time.sleep(0).
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
time.sleep(0)
def _connect_ping_listener(connection, branch):
"""Ping the server at connection startup.
Ping the server at transaction begin and transparently reconnect
if a disconnect exception occurs.
"""
if branch:
return
# turn off "close with result". This can also be accomplished
# by branching the connection, however just setting the flag is
# more performant and also doesn't get involved with some
# connection-invalidation awkardness that occurs (see
# https://bitbucket.org/zzzeek/sqlalchemy/issue/3215/)
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
try:
# run a SELECT 1. use a core select() so that
# any details like that needed by Oracle, DB2 etc. are handled.
connection.scalar(select([1]))
except exception.DBConnectionError:
# catch DBConnectionError, which is raised by the filter
# system.
# disconnect detected. The connection is now
# "invalid", but the pool should be ready to return
# new connections assuming they are good now.
# run the select again to re-validate the Connection.
connection.scalar(select([1]))
finally:
connection.should_close_with_result = save_should_close_with_result
def _setup_logging(connection_debug=0):
"""setup_logging function maps SQL debug level to Python log level.
Connection_debug is a verbosity of SQL debugging information.
0=None(default value),
1=Processed only messages with WARNING level or higher
50=Processed only messages with INFO level or higher
100=Processed only messages with DEBUG level
"""
if connection_debug >= 0:
logger = logging.getLogger('sqlalchemy.engine')
if connection_debug >= 100:
logger.setLevel(logging.DEBUG)
elif connection_debug >= 50:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
idle_timeout=3600,
connection_debug=0, max_pool_size=None, max_overflow=None,
pool_timeout=None, sqlite_synchronous=True,
connection_trace=False, max_retries=10, retry_interval=10,
thread_checkin=True, logging_name=None):
"""Return a new SQLAlchemy engine."""
url = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": idle_timeout,
'convert_unicode': True,
'connect_args': {},
'logging_name': logging_name
}
_setup_logging(connection_debug)
_init_connection_args(
url, engine_args,
sqlite_fk=sqlite_fk,
max_pool_size=max_pool_size,
max_overflow=max_overflow,
pool_timeout=pool_timeout
)
engine = sqlalchemy.create_engine(url, **engine_args)
_init_events(
engine,
mysql_sql_mode=mysql_sql_mode,
sqlite_synchronous=sqlite_synchronous,
sqlite_fk=sqlite_fk,
thread_checkin=thread_checkin,
connection_trace=connection_trace
)
# register alternate exception handler
exc_filters.register_engine(engine)
# register engine connect handler
compat.engine_connect(engine, _connect_ping_listener)
# initial connect + test
_test_connection(engine, max_retries, retry_interval)
return engine
@utils.dispatch_for_dialect('*', multiple=True)
def _init_connection_args(
url, engine_args,
max_pool_size=None, max_overflow=None, pool_timeout=None, **kw):
pool_class = url.get_dialect().get_pool_class(url)
if issubclass(pool_class, pool.QueuePool):
if max_pool_size is not None:
engine_args['pool_size'] = max_pool_size
if max_overflow is not None:
engine_args['max_overflow'] = max_overflow
if pool_timeout is not None:
engine_args['pool_timeout'] = pool_timeout
@_init_connection_args.dispatch_for("sqlite")
def _init_connection_args(url, engine_args, **kw):
pool_class = url.get_dialect().get_pool_class(url)
# singletonthreadpool is used for :memory: connections;
# replace it with StaticPool.
if issubclass(pool_class, pool.SingletonThreadPool):
engine_args["poolclass"] = pool.StaticPool
engine_args['connect_args']['check_same_thread'] = False
@_init_connection_args.dispatch_for("postgresql")
def _init_connection_args(url, engine_args, **kw):
if 'client_encoding' not in url.query:
# Set encoding using engine_args instead of connect_args since
# it's supported for PostgreSQL 8.*. More details at:
# http://docs.sqlalchemy.org/en/rel_0_9/dialects/postgresql.html
engine_args['client_encoding'] = 'utf8'
@_init_connection_args.dispatch_for("mysql")
def _init_connection_args(url, engine_args, **kw):
if 'charset' not in url.query:
engine_args['connect_args']['charset'] = 'utf8'
@_init_connection_args.dispatch_for("mysql+mysqlconnector")
def _init_connection_args(url, engine_args, **kw):
# mysqlconnector engine (<1.0) incorrectly defaults to
# raise_on_warnings=True
# https://bitbucket.org/zzzeek/sqlalchemy/issue/2515
if 'raise_on_warnings' not in url.query:
engine_args['connect_args']['raise_on_warnings'] = False
@_init_connection_args.dispatch_for("mysql+mysqldb")
@_init_connection_args.dispatch_for("mysql+oursql")
def _init_connection_args(url, engine_args, **kw):
# Those drivers require use_unicode=0 to avoid performance drop due
# to internal usage of Python unicode objects in the driver
# http://docs.sqlalchemy.org/en/rel_0_9/dialects/mysql.html
if 'use_unicode' not in url.query:
engine_args['connect_args']['use_unicode'] = 0
@utils.dispatch_for_dialect('*', multiple=True)
def _init_events(engine, thread_checkin=True, connection_trace=False, **kw):
"""Set up event listeners for all database backends."""
if connection_trace:
_add_trace_comments(engine)
if thread_checkin:
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
@_init_events.dispatch_for("mysql")
def _init_events(engine, mysql_sql_mode=None, **kw):
"""Set up event listeners for MySQL."""
if mysql_sql_mode is not None:
@sqlalchemy.event.listens_for(engine, "connect")
def _set_session_sql_mode(dbapi_con, connection_rec):
cursor = dbapi_con.cursor()
cursor.execute("SET SESSION sql_mode = %s", [mysql_sql_mode])
@sqlalchemy.event.listens_for(engine, "first_connect")
def _check_effective_sql_mode(dbapi_con, connection_rec):
if mysql_sql_mode is not None:
_set_session_sql_mode(dbapi_con, connection_rec)
cursor = dbapi_con.cursor()
cursor.execute("SHOW VARIABLES LIKE 'sql_mode'")
realmode = cursor.fetchone()
if realmode is None:
LOG.warning(_LW('Unable to detect effective SQL mode'))
else:
realmode = realmode[1]
LOG.debug('MySQL server mode set to %s', realmode)
if 'TRADITIONAL' not in realmode.upper() and \
'STRICT_ALL_TABLES' not in realmode.upper():
LOG.warning(
_LW(
"MySQL SQL mode is '%s', "
"consider enabling TRADITIONAL or STRICT_ALL_TABLES"),
realmode)
@_init_events.dispatch_for("sqlite")
def _init_events(engine, sqlite_synchronous=True, sqlite_fk=False, **kw):
"""Set up event listeners for SQLite.
This includes several settings made on connections as they are
created, as well as transactional control extensions.
"""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
@sqlalchemy.event.listens_for(engine, "connect")
def _sqlite_connect_events(dbapi_con, con_record):
# Add REGEXP functionality on SQLite connections
dbapi_con.create_function('regexp', 2, regexp)
if not sqlite_synchronous:
# Switch sqlite connections to non-synchronous mode
dbapi_con.execute("PRAGMA synchronous = OFF")
# Disable pysqlite's emitting of the BEGIN statement entirely.
# Also stops it from emitting COMMIT before any DDL.
# below, we emit BEGIN ourselves.
# see http://docs.sqlalchemy.org/en/rel_0_9/dialects/\
# sqlite.html#serializable-isolation-savepoints-transactional-ddl
dbapi_con.isolation_level = None
if sqlite_fk:
# Ensures that the foreign key constraints are enforced in SQLite.
dbapi_con.execute('pragma foreign_keys=ON')
@sqlalchemy.event.listens_for(engine, "begin")
def _sqlite_emit_begin(conn):
# emit our own BEGIN, checking for existing
# transactional state
if 'in_transaction' not in conn.info:
conn.execute("BEGIN")
conn.info['in_transaction'] = True
@sqlalchemy.event.listens_for(engine, "rollback")
@sqlalchemy.event.listens_for(engine, "commit")
def _sqlite_end_transaction(conn):
# remove transactional marker
conn.info.pop('in_transaction', None)
def _test_connection(engine, max_retries, retry_interval):
if max_retries == -1:
attempts = itertools.count()
else:
attempts = six.moves.range(max_retries)
# See: http://legacy.python.org/dev/peps/pep-3110/#semantic-changes for
# why we are not using 'de' directly (it can be removed from the local
# scope).
de_ref = None
for attempt in attempts:
try:
return exc_filters.handle_connect_error(engine)
except exception.DBConnectionError as de:
msg = _LW('SQL connection failed. %s attempts left.')
LOG.warning(msg, max_retries - attempt)
time.sleep(retry_interval)
de_ref = de
else:
if de_ref is not None:
six.reraise(type(de_ref), de_ref)
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query)
def _add_trace_comments(engine):
"""Add trace comments.
Augment statements with a trace of the immediate calling code
for a given statement.
"""
import os
import sys
import traceback
target_paths = set([
os.path.dirname(sys.modules['oslo_db'].__file__),
os.path.dirname(sys.modules['sqlalchemy'].__file__)
])
@sqlalchemy.event.listens_for(engine, "before_cursor_execute", retval=True)
def before_cursor_execute(conn, cursor, statement, parameters, context,
executemany):
# NOTE(zzzeek) - if different steps per DB dialect are desirable
# here, switch out on engine.name for now.
stack = traceback.extract_stack()
our_line = None
for idx, (filename, line, method, function) in enumerate(stack):
for tgt in target_paths:
if filename.startswith(tgt):
our_line = idx
break
if our_line:
break
if our_line:
trace = "; ".join(
"File: %s (%s) %s" % (
line[0], line[1], line[2]
)
# include three lines of context.
for line in stack[our_line - 3:our_line]
)
statement = "%s -- %s" % (statement, trace)
return statement, parameters
class EngineFacade(object):
"""A helper class for removing of global engine instances from oslo.db.
As a library, oslo.db can't decide where to store/when to create engine
and sessionmaker instances, so this must be left for a target application.
On the other hand, in order to simplify the adoption of oslo.db changes,
we'll provide a helper class, which creates engine and sessionmaker
on its instantiation and provides get_engine()/get_session() methods
that are compatible with corresponding utility functions that currently
exist in target projects, e.g. in Nova.
engine/sessionmaker instances will still be global (and they are meant to
be global), but they will be stored in the app context, rather that in the
oslo.db context.
Note: using of this helper is completely optional and you are encouraged to
integrate engine/sessionmaker instances into your apps any way you like
(e.g. one might want to bind a session to a request context). Two important
things to remember:
1. An Engine instance is effectively a pool of DB connections, so it's
meant to be shared (and it's thread-safe).
2. A Session instance is not meant to be shared and represents a DB
transactional context (i.e. it's not thread-safe). sessionmaker is
a factory of sessions.
"""
def __init__(self, sql_connection, slave_connection=None,
sqlite_fk=False, autocommit=True,
expire_on_commit=False, **kwargs):
"""Initialize engine and sessionmaker instances.
:param sql_connection: the connection string for the database to use
:type sql_connection: string
:param slave_connection: the connection string for the 'slave' database
to use. If not provided, the master database
will be used for all operations. Note: this
is meant to be used for offloading of read
operations to asynchronously replicated slaves
to reduce the load on the master database.
:type slave_connection: string
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
Keyword arguments:
:keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions.
(defaults to TRADITIONAL)
:keyword idle_timeout: timeout before idle sql connections are reaped
(defaults to 3600)
:keyword connection_debug: verbosity of SQL debugging information.
-1=Off, 0=None, 100=Everything (defaults
to 0)
:keyword max_pool_size: maximum number of SQL connections to keep open
in a pool (defaults to SQLAlchemy settings)
:keyword max_overflow: if set, use this value for max_overflow with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword pool_timeout: if set, use this value for pool_timeout with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
(defaults to True)
:keyword connection_trace: add python stack traces to SQL as comment
strings (defaults to False)
:keyword max_retries: maximum db connection retries during startup.
(setting -1 implies an infinite retry count)
(defaults to 10)
:keyword retry_interval: interval between retries of opening a sql
connection (defaults to 10)
:keyword thread_checkin: boolean that indicates that between each
engine checkin event a sleep(0) will occur to
allow other greenthreads to run (defaults to
True)
"""
super(EngineFacade, self).__init__()
engine_kwargs = {
'sqlite_fk': sqlite_fk,
'mysql_sql_mode': kwargs.get('mysql_sql_mode', 'TRADITIONAL'),
'idle_timeout': kwargs.get('idle_timeout', 3600),
'connection_debug': kwargs.get('connection_debug', 0),
'max_pool_size': kwargs.get('max_pool_size'),
'max_overflow': kwargs.get('max_overflow'),
'pool_timeout': kwargs.get('pool_timeout'),
'sqlite_synchronous': kwargs.get('sqlite_synchronous', True),
'connection_trace': kwargs.get('connection_trace', False),
'max_retries': kwargs.get('max_retries', 10),
'retry_interval': kwargs.get('retry_interval', 10),
'thread_checkin': kwargs.get('thread_checkin', True)
}
maker_kwargs = {
'autocommit': autocommit,
'expire_on_commit': expire_on_commit
}
self._engine = create_engine(sql_connection=sql_connection,
**engine_kwargs)
self._session_maker = get_maker(engine=self._engine,
**maker_kwargs)
if slave_connection:
self._slave_engine = create_engine(sql_connection=slave_connection,
**engine_kwargs)
self._slave_session_maker = get_maker(engine=self._slave_engine,
**maker_kwargs)
else:
self._slave_engine = None
self._slave_session_maker = None
def get_engine(self, use_slave=False):
"""Get the engine instance (note, that it's shared).
:param use_slave: if possible, use 'slave' database for this engine.
If the connection string for the slave database
wasn't provided, 'master' engine will be returned.
(defaults to False)
:type use_slave: bool
"""
if use_slave and self._slave_engine:
return self._slave_engine
return self._engine
def get_session(self, use_slave=False, **kwargs):
"""Get a Session instance.
:param use_slave: if possible, use 'slave' database connection for
this session. If the connection string for the
slave database wasn't provided, a session bound
to the 'master' engine will be returned.
(defaults to False)
:type use_slave: bool
Keyword arugments will be passed to a sessionmaker instance as is (if
passed, they will override the ones used when the sessionmaker instance
was created). See SQLAlchemy Session docs for details.
"""
if use_slave and self._slave_session_maker:
return self._slave_session_maker(**kwargs)
return self._session_maker(**kwargs)
@classmethod
def from_config(cls, conf,
sqlite_fk=False, autocommit=True, expire_on_commit=False):
"""Initialize EngineFacade using oslo.config config instance options.
:param conf: oslo.config config instance
:type conf: oslo.config.cfg.ConfigOpts
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
conf.register_opts(options.database_opts, 'database')
return cls(sql_connection=conf.database.connection,
slave_connection=conf.database.slave_connection,
sqlite_fk=sqlite_fk,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
mysql_sql_mode=conf.database.mysql_sql_mode,
idle_timeout=conf.database.idle_timeout,
connection_debug=conf.database.connection_debug,
max_pool_size=conf.database.max_pool_size,
max_overflow=conf.database.max_overflow,
pool_timeout=conf.database.pool_timeout,
sqlite_synchronous=conf.database.sqlite_synchronous,
connection_trace=conf.database.connection_trace,
max_retries=conf.database.max_retries,
retry_interval=conf.database.retry_interval)

View File

@ -0,0 +1,127 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
try:
from oslotest import base as test_base
except ImportError:
raise NameError('Oslotest is not installed. Please add oslotest in your'
' test-requirements')
import six
from oslo_db import exception
from oslo_db.sqlalchemy import provision
from oslo_db.sqlalchemy import session
from oslo_db.sqlalchemy import utils
class DbFixture(fixtures.Fixture):
"""Basic database fixture.
Allows to run tests on various db backends, such as SQLite, MySQL and
PostgreSQL. By default use sqlite backend. To override default backend
uri set env variable OS_TEST_DBAPI_CONNECTION with database admin
credentials for specific backend.
"""
DRIVER = "sqlite"
# these names are deprecated, and are not used by DbFixture.
# they are here for backwards compatibility with test suites that
# are referring to them directly.
DBNAME = PASSWORD = USERNAME = 'openstack_citest'
def __init__(self, test):
super(DbFixture, self).__init__()
self.test = test
def setUp(self):
super(DbFixture, self).setUp()
try:
self.provision = provision.ProvisionedDatabase(self.DRIVER)
self.addCleanup(self.provision.dispose)
except exception.BackendNotAvailable:
msg = '%s backend is not available.' % self.DRIVER
return self.test.skip(msg)
else:
self.test.engine = self.provision.engine
self.addCleanup(setattr, self.test, 'engine', None)
self.test.sessionmaker = session.get_maker(self.test.engine)
self.addCleanup(setattr, self.test, 'sessionmaker', None)
class DbTestCase(test_base.BaseTestCase):
"""Base class for testing of DB code.
Using `DbFixture`. Intended to be the main database test case to use all
the tests on a given backend with user defined uri. Backend specific
tests should be decorated with `backend_specific` decorator.
"""
FIXTURE = DbFixture
def setUp(self):
super(DbTestCase, self).setUp()
self.useFixture(self.FIXTURE(self))
class OpportunisticTestCase(DbTestCase):
"""Placeholder for backwards compatibility."""
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
def backend_specific(*dialects):
"""Decorator to skip backend specific tests on inappropriate engines.
::dialects: list of dialects names under which the test will be launched.
"""
def wrap(f):
@six.wraps(f)
def ins_wrap(self):
if not set(dialects).issubset(ALLOWED_DIALECTS):
raise ValueError(
"Please use allowed dialects: %s" % ALLOWED_DIALECTS)
if self.engine.name not in dialects:
msg = ('The test "%s" can be run '
'only on %s. Current engine is %s.')
args = (utils.get_callable_name(f), ' '.join(dialects),
self.engine.name)
self.skip(msg % args)
else:
return f(self)
return ins_wrap
return wrap
class MySQLOpportunisticFixture(DbFixture):
DRIVER = 'mysql'
class PostgreSQLOpportunisticFixture(DbFixture):
DRIVER = 'postgresql'
class MySQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = MySQLOpportunisticFixture
class PostgreSQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = PostgreSQLOpportunisticFixture

View File

@ -0,0 +1,613 @@
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import logging
import pprint
import alembic
import alembic.autogenerate
import alembic.migration
import pkg_resources as pkg
import six
import sqlalchemy
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy import schema
import sqlalchemy.sql.expression as expr
import sqlalchemy.types as types
from oslo_db._i18n import _LE
from oslo_db import exception as exc
from oslo_db.sqlalchemy import utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class WalkVersionsMixin(object):
"""Test mixin to check upgrade and downgrade ability of migration.
This is only suitable for testing of migrate_ migration scripts. An
abstract class mixin. `INIT_VERSION`, `REPOSITORY` and `migration_api`
attributes must be implemented in subclasses.
.. _auxiliary-dynamic-methods: Auxiliary Methods
Auxiliary Methods:
`migrate_up` and `migrate_down` instance methods of the class can be
used with auxiliary methods named `_pre_upgrade_<revision_id>`,
`_check_<revision_id>`, `_post_downgrade_<revision_id>`. The methods
intended to check applied changes for correctness of data operations.
This methods should be implemented for every particular revision
which you want to check with data. Implementation recommendations for
`_pre_upgrade_<revision_id>`, `_check_<revision_id>`,
`_post_downgrade_<revision_id>` implementation:
* `_pre_upgrade_<revision_id>`: provide a data appropriate to
a next revision. Should be used an id of revision which
going to be applied.
* `_check_<revision_id>`: Insert, select, delete operations
with newly applied changes. The data provided by
`_pre_upgrade_<revision_id>` will be used.
* `_post_downgrade_<revision_id>`: check for absence
(inability to use) changes provided by reverted revision.
Execution order of auxiliary methods when revision is upgrading:
`_pre_upgrade_###` => `upgrade` => `_check_###`
Execution order of auxiliary methods when revision is downgrading:
`downgrade` => `_post_downgrade_###`
.. _migrate: https://sqlalchemy-migrate.readthedocs.org/en/latest/
"""
@abc.abstractproperty
def INIT_VERSION(self):
"""Initial version of a migration repository.
Can be different from 0, if a migrations were squashed.
:rtype: int
"""
pass
@abc.abstractproperty
def REPOSITORY(self):
"""Allows basic manipulation with migration repository.
:returns: `migrate.versioning.repository.Repository` subclass.
"""
pass
@abc.abstractproperty
def migration_api(self):
"""Provides API for upgrading, downgrading and version manipulations.
:returns: `migrate.api` or overloaded analog.
"""
pass
@abc.abstractproperty
def migrate_engine(self):
"""Provides engine instance.
Should be the same instance as used when migrations are applied. In
most cases, the `engine` attribute provided by the test class in a
`setUp` method will work.
Example of implementation:
def migrate_engine(self):
return self.engine
:returns: sqlalchemy engine instance
"""
pass
def _walk_versions(self, snake_walk=False, downgrade=True):
"""Check if migration upgrades and downgrades successfully.
DEPRECATED: this function is deprecated and will be removed from
oslo.db in a few releases. Please use walk_versions() method instead.
"""
self.walk_versions(snake_walk, downgrade)
def _migrate_down(self, version, with_data=False):
"""Migrate down to a previous version of the db.
DEPRECATED: this function is deprecated and will be removed from
oslo.db in a few releases. Please use migrate_down() method instead.
"""
return self.migrate_down(version, with_data)
def _migrate_up(self, version, with_data=False):
"""Migrate up to a new version of the db.
DEPRECATED: this function is deprecated and will be removed from
oslo.db in a few releases. Please use migrate_up() method instead.
"""
self.migrate_up(version, with_data)
def walk_versions(self, snake_walk=False, downgrade=True):
"""Check if migration upgrades and downgrades successfully.
Determine the latest version script from the repo, then
upgrade from 1 through to the latest, with no data
in the databases. This just checks that the schema itself
upgrades successfully.
`walk_versions` calls `migrate_up` and `migrate_down` with
`with_data` argument to check changes with data, but these methods
can be called without any extra check outside of `walk_versions`
method.
:param snake_walk: enables checking that each individual migration can
be upgraded/downgraded by itself.
If we have ordered migrations 123abc, 456def, 789ghi and we run
upgrading with the `snake_walk` argument set to `True`, the
migrations will be applied in the following order:
`123abc => 456def => 123abc =>
456def => 789ghi => 456def => 789ghi`
:type snake_walk: bool
:param downgrade: Check downgrade behavior if True.
:type downgrade: bool
"""
# Place the database under version control
self.migration_api.version_control(self.migrate_engine,
self.REPOSITORY,
self.INIT_VERSION)
self.assertEqual(self.INIT_VERSION,
self.migration_api.db_version(self.migrate_engine,
self.REPOSITORY))
LOG.debug('latest version is %s', self.REPOSITORY.latest)
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
for version in versions:
# upgrade -> downgrade -> upgrade
self.migrate_up(version, with_data=True)
if snake_walk:
downgraded = self.migrate_down(version - 1, with_data=True)
if downgraded:
self.migrate_up(version)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(versions):
# downgrade -> upgrade -> downgrade
downgraded = self.migrate_down(version - 1)
if snake_walk and downgraded:
self.migrate_up(version)
self.migrate_down(version - 1)
def migrate_down(self, version, with_data=False):
"""Migrate down to a previous version of the db.
:param version: id of revision to downgrade.
:type version: str
:keyword with_data: Whether to verify the absence of changes from
migration(s) being downgraded, see
:ref:`auxiliary-dynamic-methods <Auxiliary Methods>`.
:type with_data: Bool
"""
try:
self.migration_api.downgrade(self.migrate_engine,
self.REPOSITORY, version)
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(version, self.migration_api.db_version(
self.migrate_engine, self.REPOSITORY))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%03d" % (version + 1), None)
if post_downgrade:
post_downgrade(self.migrate_engine)
return True
def migrate_up(self, version, with_data=False):
"""Migrate up to a new version of the db.
:param version: id of revision to upgrade.
:type version: str
:keyword with_data: Whether to verify the applied changes with data,
see :ref:`auxiliary-dynamic-methods <Auxiliary Methods>`.
:type with_data: Bool
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%03d" % version, None)
if pre_upgrade:
data = pre_upgrade(self.migrate_engine)
self.migration_api.upgrade(self.migrate_engine,
self.REPOSITORY, version)
self.assertEqual(version,
self.migration_api.db_version(self.migrate_engine,
self.REPOSITORY))
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if check:
check(self.migrate_engine, data)
except exc.DbMigrationError:
msg = _LE("Failed to migrate to version %(ver)s on engine %(eng)s")
LOG.error(msg, {"ver": version, "eng": self.migrate_engine})
raise
@six.add_metaclass(abc.ABCMeta)
class ModelsMigrationsSync(object):
"""A helper class for comparison of DB migration scripts and models.
It's intended to be inherited by test cases in target projects. They have
to provide implementations for methods used internally in the test (as
we have no way to implement them here).
test_model_sync() will run migration scripts for the engine provided and
then compare the given metadata to the one reflected from the database.
The difference between MODELS and MIGRATION scripts will be printed and
the test will fail, if the difference is not empty. The return value is
really a list of actions, that should be performed in order to make the
current database schema state (i.e. migration scripts) consistent with
models definitions. It's left up to developers to analyze the output and
decide whether the models definitions or the migration scripts should be
modified to make them consistent.
Output::
[(
'add_table',
description of the table from models
),
(
'remove_table',
description of the table from database
),
(
'add_column',
schema,
table name,
column description from models
),
(
'remove_column',
schema,
table name,
column description from database
),
(
'add_index',
description of the index from models
),
(
'remove_index',
description of the index from database
),
(
'add_constraint',
description of constraint from models
),
(
'remove_constraint,
description of constraint from database
),
(
'modify_nullable',
schema,
table name,
column name,
{
'existing_type': type of the column from database,
'existing_server_default': default value from database
},
nullable from database,
nullable from models
),
(
'modify_type',
schema,
table name,
column name,
{
'existing_nullable': database nullable,
'existing_server_default': default value from database
},
database column type,
type of the column from models
),
(
'modify_default',
schema,
table name,
column name,
{
'existing_nullable': database nullable,
'existing_type': type of the column from database
},
connection column default value,
default from models
)]
Method include_object() can be overridden to exclude some tables from
comparison (e.g. migrate_repo).
"""
@abc.abstractmethod
def db_sync(self, engine):
"""Run migration scripts with the given engine instance.
This method must be implemented in subclasses and run migration scripts
for a DB the given engine is connected to.
"""
@abc.abstractmethod
def get_engine(self):
"""Return the engine instance to be used when running tests.
This method must be implemented in subclasses and return an engine
instance to be used when running tests.
"""
@abc.abstractmethod
def get_metadata(self):
"""Return the metadata instance to be used for schema comparison.
This method must be implemented in subclasses and return the metadata
instance attached to the BASE model.
"""
def include_object(self, object_, name, type_, reflected, compare_to):
"""Return True for objects that should be compared.
:param object_: a SchemaItem object such as a Table or Column object
:param name: the name of the object
:param type_: a string describing the type of object (e.g. "table")
:param reflected: True if the given object was produced based on
table reflection, False if it's from a local
MetaData object
:param compare_to: the object being compared against, if available,
else None
"""
return True
def compare_type(self, ctxt, insp_col, meta_col, insp_type, meta_type):
"""Return True if types are different, False if not.
Return None to allow the default implementation to compare these types.
:param ctxt: alembic MigrationContext instance
:param insp_col: reflected column
:param meta_col: column from model
:param insp_type: reflected column type
:param meta_type: column type from model
"""
# some backends (e.g. mysql) don't provide native boolean type
BOOLEAN_METADATA = (types.BOOLEAN, types.Boolean)
BOOLEAN_SQL = BOOLEAN_METADATA + (types.INTEGER, types.Integer)
if issubclass(type(meta_type), BOOLEAN_METADATA):
return not issubclass(type(insp_type), BOOLEAN_SQL)
return None # tells alembic to use the default comparison method
def compare_server_default(self, ctxt, ins_col, meta_col,
insp_def, meta_def, rendered_meta_def):
"""Compare default values between model and db table.
Return True if the defaults are different, False if not, or None to
allow the default implementation to compare these defaults.
:param ctxt: alembic MigrationContext instance
:param insp_col: reflected column
:param meta_col: column from model
:param insp_def: reflected column default value
:param meta_def: column default value from model
:param rendered_meta_def: rendered column default value (from model)
"""
return self._compare_server_default(ctxt.bind, meta_col, insp_def,
meta_def)
@utils.DialectFunctionDispatcher.dispatch_for_dialect("*")
def _compare_server_default(bind, meta_col, insp_def, meta_def):
pass
@_compare_server_default.dispatch_for('mysql')
def _compare_server_default(bind, meta_col, insp_def, meta_def):
if isinstance(meta_col.type, sqlalchemy.Boolean):
if meta_def is None or insp_def is None:
return meta_def != insp_def
return not (
isinstance(meta_def.arg, expr.True_) and insp_def == "'1'" or
isinstance(meta_def.arg, expr.False_) and insp_def == "'0'"
)
if isinstance(meta_col.type, sqlalchemy.Integer):
if meta_def is None or insp_def is None:
return meta_def != insp_def
return meta_def.arg != insp_def.split("'")[1]
@_compare_server_default.dispatch_for('postgresql')
def _compare_server_default(bind, meta_col, insp_def, meta_def):
if isinstance(meta_col.type, sqlalchemy.Enum):
if meta_def is None or insp_def is None:
return meta_def != insp_def
return insp_def != "'%s'::%s" % (meta_def.arg, meta_col.type.name)
elif isinstance(meta_col.type, sqlalchemy.String):
if meta_def is None or insp_def is None:
return meta_def != insp_def
return insp_def != "'%s'::character varying" % meta_def.arg
def _cleanup(self):
engine = self.get_engine()
with engine.begin() as conn:
inspector = reflection.Inspector.from_engine(engine)
metadata = schema.MetaData()
tbs = []
all_fks = []
for table_name in inspector.get_table_names():
fks = []
for fk in inspector.get_foreign_keys(table_name):
if not fk['name']:
continue
fks.append(
schema.ForeignKeyConstraint((), (), name=fk['name'])
)
table = schema.Table(table_name, metadata, *fks)
tbs.append(table)
all_fks.extend(fks)
for fkc in all_fks:
conn.execute(schema.DropConstraint(fkc))
for table in tbs:
conn.execute(schema.DropTable(table))
FKInfo = collections.namedtuple('fk_info', ['constrained_columns',
'referred_table',
'referred_columns'])
def check_foreign_keys(self, metadata, bind):
"""Compare foreign keys between model and db table.
:returns: a list that contains information about:
* should be a new key added or removed existing,
* name of that key,
* source table,
* referred table,
* constrained columns,
* referred columns
Output::
[('drop_key',
'testtbl_fk_check_fkey',
'testtbl',
fk_info(constrained_columns=(u'fk_check',),
referred_table=u'table',
referred_columns=(u'fk_check',)))]
"""
diff = []
insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind)
# Get all tables from db
db_tables = insp.get_table_names()
# Get all tables from models
model_tables = metadata.tables
for table in db_tables:
if table not in model_tables:
continue
# Get all necessary information about key of current table from db
fk_db = dict((self._get_fk_info_from_db(i), i['name'])
for i in insp.get_foreign_keys(table))
fk_db_set = set(fk_db.keys())
# Get all necessary information about key of current table from
# models
fk_models = dict((self._get_fk_info_from_model(fk), fk)
for fk in model_tables[table].foreign_keys)
fk_models_set = set(fk_models.keys())
for key in (fk_db_set - fk_models_set):
diff.append(('drop_key', fk_db[key], table, key))
LOG.info(("Detected removed foreign key %(fk)r on "
"table %(table)r"), {'fk': fk_db[key],
'table': table})
for key in (fk_models_set - fk_db_set):
diff.append(('add_key', fk_models[key], table, key))
LOG.info((
"Detected added foreign key for column %(fk)r on table "
"%(table)r"), {'fk': fk_models[key].column.name,
'table': table})
return diff
def _get_fk_info_from_db(self, fk):
return self.FKInfo(tuple(fk['constrained_columns']),
fk['referred_table'],
tuple(fk['referred_columns']))
def _get_fk_info_from_model(self, fk):
return self.FKInfo((fk.parent.name,), fk.column.table.name,
(fk.column.name,))
def test_models_sync(self):
# recent versions of sqlalchemy and alembic are needed for running of
# this test, but we already have them in requirements
try:
pkg.require('sqlalchemy>=0.8.4', 'alembic>=0.6.2')
except (pkg.VersionConflict, pkg.DistributionNotFound) as e:
self.skipTest('sqlalchemy>=0.8.4 and alembic>=0.6.3 are required'
' for running of this test: %s' % e)
# drop all tables after a test run
self.addCleanup(self._cleanup)
# run migration scripts
self.db_sync(self.get_engine())
with self.get_engine().connect() as conn:
opts = {
'include_object': self.include_object,
'compare_type': self.compare_type,
'compare_server_default': self.compare_server_default,
}
mc = alembic.migration.MigrationContext.configure(conn, opts=opts)
# compare schemas and fail with diff, if it's not empty
diff1 = alembic.autogenerate.compare_metadata(mc,
self.get_metadata())
diff2 = self.check_foreign_keys(self.get_metadata(),
self.get_engine())
diff = diff1 + diff2
if diff:
msg = pprint.pformat(diff, indent=2, width=20)
self.fail(
"Models and migration scripts aren't in sync:\n%s" % msg)

1012
oslo_db/sqlalchemy/utils.py Normal file

File diff suppressed because it is too large Load Diff

View File

View File

View File

@ -0,0 +1,53 @@
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
import testtools
_TRUE_VALUES = ('true', '1', 'yes')
# FIXME(dhellmann) Update this to use oslo.test library
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.log_fixture = self.useFixture(fixtures.FakeLogger())

View File

@ -0,0 +1,68 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the compatibility layer for the engine_connect() event.
This event is added as of SQLAlchemy 0.9.0; oslo.db provides a compatibility
layer for prior SQLAlchemy versions.
"""
import mock
from oslotest import base as test_base
import sqlalchemy as sqla
from oslo.db.sqlalchemy import compat
class EngineConnectTest(test_base.BaseTestCase):
def setUp(self):
super(EngineConnectTest, self).setUp()
self.engine = engine = sqla.create_engine("sqlite://")
self.addCleanup(engine.dispose)
def test_connect_event(self):
engine = self.engine
listener = mock.Mock()
compat.engine_connect(engine, listener)
conn = engine.connect()
self.assertEqual(
listener.mock_calls,
[mock.call(conn, False)]
)
conn.close()
conn2 = engine.connect()
conn2.close()
self.assertEqual(
listener.mock_calls,
[mock.call(conn, False), mock.call(conn2, False)]
)
def test_branch(self):
engine = self.engine
listener = mock.Mock()
compat.engine_connect(engine, listener)
conn = engine.connect()
branched = conn.connect()
conn.close()
self.assertEqual(
listener.mock_calls,
[mock.call(conn, False), mock.call(branched, True)]
)

View File

@ -0,0 +1,833 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test exception filters applied to engines."""
import contextlib
import itertools
import mock
from oslotest import base as oslo_test_base
import six
import sqlalchemy as sqla
from sqlalchemy.orm import mapper
from oslo.db import exception
from oslo.db.sqlalchemy import compat
from oslo.db.sqlalchemy import exc_filters
from oslo.db.sqlalchemy import test_base
from oslo_db.sqlalchemy import session as private_session
from oslo_db.tests.old_import_api import utils as test_utils
_TABLE_NAME = '__tmp__test__tmp__'
class _SQLAExceptionMatcher(object):
def assertInnerException(
self,
matched, exception_type, message, sql=None, params=None):
exc = matched.inner_exception
self.assertSQLAException(exc, exception_type, message, sql, params)
def assertSQLAException(
self,
exc, exception_type, message, sql=None, params=None):
if isinstance(exception_type, (type, tuple)):
self.assertTrue(issubclass(exc.__class__, exception_type))
else:
self.assertEqual(exc.__class__.__name__, exception_type)
self.assertEqual(str(exc.orig).lower(), message.lower())
if sql is not None:
self.assertEqual(exc.statement, sql)
if params is not None:
self.assertEqual(exc.params, params)
class TestsExceptionFilter(_SQLAExceptionMatcher, oslo_test_base.BaseTestCase):
class Error(Exception):
"""DBAPI base error.
This exception and subclasses are used in a mock context
within these tests.
"""
class OperationalError(Error):
pass
class InterfaceError(Error):
pass
class InternalError(Error):
pass
class IntegrityError(Error):
pass
class ProgrammingError(Error):
pass
class TransactionRollbackError(OperationalError):
"""Special psycopg2-only error class.
SQLAlchemy has an issue with this per issue #3075:
https://bitbucket.org/zzzeek/sqlalchemy/issue/3075/
"""
def setUp(self):
super(TestsExceptionFilter, self).setUp()
self.engine = sqla.create_engine("sqlite://")
exc_filters.register_engine(self.engine)
self.engine.connect().close() # initialize
@contextlib.contextmanager
def _dbapi_fixture(self, dialect_name):
engine = self.engine
with test_utils.nested(
mock.patch.object(engine.dialect.dbapi,
"Error",
self.Error),
mock.patch.object(engine.dialect, "name", dialect_name),
):
yield
@contextlib.contextmanager
def _fixture(self, dialect_name, exception, is_disconnect=False):
def do_execute(self, cursor, statement, parameters, **kw):
raise exception
engine = self.engine
# ensure the engine has done its initial checks against the
# DB as we are going to be removing its ability to execute a
# statement
self.engine.connect().close()
with test_utils.nested(
mock.patch.object(engine.dialect, "do_execute", do_execute),
# replace the whole DBAPI rather than patching "Error"
# as some DBAPIs might not be patchable (?)
mock.patch.object(engine.dialect,
"dbapi",
mock.Mock(Error=self.Error)),
mock.patch.object(engine.dialect, "name", dialect_name),
mock.patch.object(engine.dialect,
"is_disconnect",
lambda *args: is_disconnect)
):
yield
def _run_test(self, dialect_name, statement, raises, expected,
is_disconnect=False, params=()):
with self._fixture(dialect_name, raises, is_disconnect=is_disconnect):
with self.engine.connect() as conn:
matched = self.assertRaises(
expected, conn.execute, statement, params
)
return matched
class TestFallthroughsAndNonDBAPI(TestsExceptionFilter):
def test_generic_dbapi(self):
matched = self._run_test(
"mysql", "select you_made_a_programming_error",
self.ProgrammingError("Error 123, you made a mistake"),
exception.DBError
)
self.assertInnerException(
matched,
"ProgrammingError",
"Error 123, you made a mistake",
'select you_made_a_programming_error', ())
def test_generic_dbapi_disconnect(self):
matched = self._run_test(
"mysql", "select the_db_disconnected",
self.InterfaceError("connection lost"),
exception.DBConnectionError,
is_disconnect=True
)
self.assertInnerException(
matched,
"InterfaceError", "connection lost",
"select the_db_disconnected", ()),
def test_operational_dbapi_disconnect(self):
matched = self._run_test(
"mysql", "select the_db_disconnected",
self.OperationalError("connection lost"),
exception.DBConnectionError,
is_disconnect=True
)
self.assertInnerException(
matched,
"OperationalError", "connection lost",
"select the_db_disconnected", ()),
def test_operational_error_asis(self):
"""Test operational errors.
test that SQLAlchemy OperationalErrors that aren't disconnects
are passed through without wrapping.
"""
matched = self._run_test(
"mysql", "select some_operational_error",
self.OperationalError("some op error"),
sqla.exc.OperationalError
)
self.assertSQLAException(
matched,
"OperationalError", "some op error"
)
def test_unicode_encode(self):
# intentionally generate a UnicodeEncodeError, as its
# constructor is quite complicated and seems to be non-public
# or at least not documented anywhere.
uee_ref = None
try:
six.u('\u2435').encode('ascii')
except UnicodeEncodeError as uee:
# Python3.x added new scoping rules here (sadly)
# http://legacy.python.org/dev/peps/pep-3110/#semantic-changes
uee_ref = uee
self._run_test(
"postgresql", six.u('select \u2435'),
uee_ref,
exception.DBInvalidUnicodeParameter
)
def test_garden_variety(self):
matched = self._run_test(
"mysql", "select some_thing_that_breaks",
AttributeError("mysqldb has an attribute error"),
exception.DBError
)
self.assertEqual("mysqldb has an attribute error", matched.args[0])
class TestReferenceErrorSQLite(_SQLAExceptionMatcher, test_base.DbTestCase):
def setUp(self):
super(TestReferenceErrorSQLite, self).setUp()
meta = sqla.MetaData(bind=self.engine)
table_1 = sqla.Table(
"resource_foo", meta,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("foo", sqla.Integer),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
table_1.create()
self.table_2 = sqla.Table(
"resource_entity", meta,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("foo_id", sqla.Integer,
sqla.ForeignKey("resource_foo.id", name="foo_fkey")),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
self.table_2.create()
def test_raise(self):
self.engine.execute("PRAGMA foreign_keys = ON;")
matched = self.assertRaises(
exception.DBReferenceError,
self.engine.execute,
self.table_2.insert({'id': 1, 'foo_id': 2})
)
self.assertInnerException(
matched,
"IntegrityError",
"FOREIGN KEY constraint failed",
'INSERT INTO resource_entity (id, foo_id) VALUES (?, ?)',
(1, 2)
)
self.assertIsNone(matched.table)
self.assertIsNone(matched.constraint)
self.assertIsNone(matched.key)
self.assertIsNone(matched.key_table)
class TestReferenceErrorPostgreSQL(TestReferenceErrorSQLite,
test_base.PostgreSQLOpportunisticTestCase):
def test_raise(self):
params = {'id': 1, 'foo_id': 2}
matched = self.assertRaises(
exception.DBReferenceError,
self.engine.execute,
self.table_2.insert(params)
)
self.assertInnerException(
matched,
"IntegrityError",
"insert or update on table \"resource_entity\" "
"violates foreign key constraint \"foo_fkey\"\nDETAIL: Key "
"(foo_id)=(2) is not present in table \"resource_foo\".\n",
"INSERT INTO resource_entity (id, foo_id) VALUES (%(id)s, "
"%(foo_id)s)",
params,
)
self.assertEqual("resource_entity", matched.table)
self.assertEqual("foo_fkey", matched.constraint)
self.assertEqual("foo_id", matched.key)
self.assertEqual("resource_foo", matched.key_table)
class TestReferenceErrorMySQL(TestReferenceErrorSQLite,
test_base.MySQLOpportunisticTestCase):
def test_raise(self):
matched = self.assertRaises(
exception.DBReferenceError,
self.engine.execute,
self.table_2.insert({'id': 1, 'foo_id': 2})
)
self.assertInnerException(
matched,
"IntegrityError",
"(1452, 'Cannot add or update a child row: a "
"foreign key constraint fails (`{0}`.`resource_entity`, "
"CONSTRAINT `foo_fkey` FOREIGN KEY (`foo_id`) REFERENCES "
"`resource_foo` (`id`))')".format(self.engine.url.database),
"INSERT INTO resource_entity (id, foo_id) VALUES (%s, %s)",
(1, 2)
)
self.assertEqual("resource_entity", matched.table)
self.assertEqual("foo_fkey", matched.constraint)
self.assertEqual("foo_id", matched.key)
self.assertEqual("resource_foo", matched.key_table)
def test_raise_ansi_quotes(self):
self.engine.execute("SET SESSION sql_mode = 'ANSI';")
matched = self.assertRaises(
exception.DBReferenceError,
self.engine.execute,
self.table_2.insert({'id': 1, 'foo_id': 2})
)
self.assertInnerException(
matched,
"IntegrityError",
'(1452, \'Cannot add or update a child row: a '
'foreign key constraint fails ("{0}"."resource_entity", '
'CONSTRAINT "foo_fkey" FOREIGN KEY ("foo_id") REFERENCES '
'"resource_foo" ("id"))\')'.format(self.engine.url.database),
"INSERT INTO resource_entity (id, foo_id) VALUES (%s, %s)",
(1, 2)
)
self.assertEqual("resource_entity", matched.table)
self.assertEqual("foo_fkey", matched.constraint)
self.assertEqual("foo_id", matched.key)
self.assertEqual("resource_foo", matched.key_table)
class TestDuplicate(TestsExceptionFilter):
def _run_dupe_constraint_test(self, dialect_name, message,
expected_columns=['a', 'b'],
expected_value=None):
matched = self._run_test(
dialect_name, "insert into table some_values",
self.IntegrityError(message),
exception.DBDuplicateEntry
)
self.assertEqual(expected_columns, matched.columns)
self.assertEqual(expected_value, matched.value)
def _not_dupe_constraint_test(self, dialect_name, statement, message,
expected_cls):
matched = self._run_test(
dialect_name, statement,
self.IntegrityError(message),
expected_cls
)
self.assertInnerException(
matched,
"IntegrityError",
str(self.IntegrityError(message)),
statement
)
def test_sqlite(self):
self._run_dupe_constraint_test("sqlite", 'column a, b are not unique')
def test_sqlite_3_7_16_or_3_8_2_and_higher(self):
self._run_dupe_constraint_test(
"sqlite",
'UNIQUE constraint failed: tbl.a, tbl.b')
def test_sqlite_dupe_primary_key(self):
self._run_dupe_constraint_test(
"sqlite",
"PRIMARY KEY must be unique 'insert into t values(10)'",
expected_columns=[])
def test_mysql_mysqldb(self):
self._run_dupe_constraint_test(
"mysql",
'(1062, "Duplicate entry '
'\'2-3\' for key \'uniq_tbl0a0b\'")', expected_value='2-3')
def test_mysql_mysqlconnector(self):
self._run_dupe_constraint_test(
"mysql",
'1062 (23000): Duplicate entry '
'\'2-3\' for key \'uniq_tbl0a0b\'")', expected_value='2-3')
def test_postgresql(self):
self._run_dupe_constraint_test(
'postgresql',
'duplicate key value violates unique constraint'
'"uniq_tbl0a0b"'
'\nDETAIL: Key (a, b)=(2, 3) already exists.\n',
expected_value='2, 3'
)
def test_mysql_single(self):
self._run_dupe_constraint_test(
"mysql",
"1062 (23000): Duplicate entry '2' for key 'b'",
expected_columns=['b'],
expected_value='2'
)
def test_postgresql_single(self):
self._run_dupe_constraint_test(
'postgresql',
'duplicate key value violates unique constraint "uniq_tbl0b"\n'
'DETAIL: Key (b)=(2) already exists.\n',
expected_columns=['b'],
expected_value='2'
)
def test_unsupported_backend(self):
self._not_dupe_constraint_test(
"nonexistent", "insert into table some_values",
self.IntegrityError("constraint violation"),
exception.DBError
)
def test_ibm_db_sa(self):
self._run_dupe_constraint_test(
'ibm_db_sa',
'SQL0803N One or more values in the INSERT statement, UPDATE '
'statement, or foreign key update caused by a DELETE statement are'
' not valid because the primary key, unique constraint or unique '
'index identified by "2" constrains table "NOVA.KEY_PAIRS" from '
'having duplicate values for the index key.',
expected_columns=[]
)
def test_ibm_db_sa_notadupe(self):
self._not_dupe_constraint_test(
'ibm_db_sa',
'ALTER TABLE instance_types ADD CONSTRAINT '
'uniq_name_x_deleted UNIQUE (name, deleted)',
'SQL0542N The column named "NAME" cannot be a column of a '
'primary key or unique key constraint because it can contain null '
'values.',
exception.DBError
)
class TestDeadlock(TestsExceptionFilter):
statement = ('SELECT quota_usages.created_at AS '
'quota_usages_created_at FROM quota_usages '
'WHERE quota_usages.project_id = %(project_id_1)s '
'AND quota_usages.deleted = %(deleted_1)s FOR UPDATE')
params = {
'project_id_1': '8891d4478bbf48ad992f050cdf55e9b5',
'deleted_1': 0
}
def _run_deadlock_detect_test(
self, dialect_name, message,
orig_exception_cls=TestsExceptionFilter.OperationalError):
self._run_test(
dialect_name, self.statement,
orig_exception_cls(message),
exception.DBDeadlock,
params=self.params
)
def _not_deadlock_test(
self, dialect_name, message,
expected_cls, expected_dbapi_cls,
orig_exception_cls=TestsExceptionFilter.OperationalError):
matched = self._run_test(
dialect_name, self.statement,
orig_exception_cls(message),
expected_cls,
params=self.params
)
if isinstance(matched, exception.DBError):
matched = matched.inner_exception
self.assertEqual(matched.orig.__class__.__name__, expected_dbapi_cls)
def test_mysql_mysqldb_deadlock(self):
self._run_deadlock_detect_test(
"mysql",
"(1213, 'Deadlock found when trying "
"to get lock; try restarting "
"transaction')"
)
def test_mysql_mysqldb_galera_deadlock(self):
self._run_deadlock_detect_test(
"mysql",
"(1205, 'Lock wait timeout exceeded; "
"try restarting transaction')"
)
def test_mysql_mysqlconnector_deadlock(self):
self._run_deadlock_detect_test(
"mysql",
"1213 (40001): Deadlock found when trying to get lock; try "
"restarting transaction",
orig_exception_cls=self.InternalError
)
def test_mysql_not_deadlock(self):
self._not_deadlock_test(
"mysql",
"(1005, 'some other error')",
sqla.exc.OperationalError, # note OperationalErrors are sent thru
"OperationalError",
)
def test_postgresql_deadlock(self):
self._run_deadlock_detect_test(
"postgresql",
"deadlock detected",
orig_exception_cls=self.TransactionRollbackError
)
def test_postgresql_not_deadlock(self):
self._not_deadlock_test(
"postgresql",
'relation "fake" does not exist',
# can be either depending on #3075
(exception.DBError, sqla.exc.OperationalError),
"TransactionRollbackError",
orig_exception_cls=self.TransactionRollbackError
)
def test_ibm_db_sa_deadlock(self):
self._run_deadlock_detect_test(
"ibm_db_sa",
"SQL0911N The current transaction has been "
"rolled back because of a deadlock or timeout",
# use the lowest class b.c. I don't know what actual error
# class DB2's driver would raise for this
orig_exception_cls=self.Error
)
def test_ibm_db_sa_not_deadlock(self):
self._not_deadlock_test(
"ibm_db_sa",
"SQL01234B Some other error.",
exception.DBError,
"Error",
orig_exception_cls=self.Error
)
class IntegrationTest(test_base.DbTestCase):
"""Test an actual error-raising round trips against the database."""
def setUp(self):
super(IntegrationTest, self).setUp()
meta = sqla.MetaData()
self.test_table = sqla.Table(
_TABLE_NAME, meta,
sqla.Column('id', sqla.Integer,
primary_key=True, nullable=False),
sqla.Column('counter', sqla.Integer,
nullable=False),
sqla.UniqueConstraint('counter',
name='uniq_counter'))
self.test_table.create(self.engine)
self.addCleanup(self.test_table.drop, self.engine)
class Foo(object):
def __init__(self, counter):
self.counter = counter
mapper(Foo, self.test_table)
self.Foo = Foo
def test_flush_wrapper_duplicate_entry(self):
"""test a duplicate entry exception."""
_session = self.sessionmaker()
with _session.begin():
foo = self.Foo(counter=1)
_session.add(foo)
_session.begin()
self.addCleanup(_session.rollback)
foo = self.Foo(counter=1)
_session.add(foo)
self.assertRaises(exception.DBDuplicateEntry, _session.flush)
def test_autoflush_wrapper_duplicate_entry(self):
"""Test a duplicate entry exception raised.
test a duplicate entry exception raised via query.all()-> autoflush
"""
_session = self.sessionmaker()
with _session.begin():
foo = self.Foo(counter=1)
_session.add(foo)
_session.begin()
self.addCleanup(_session.rollback)
foo = self.Foo(counter=1)
_session.add(foo)
self.assertTrue(_session.autoflush)
self.assertRaises(exception.DBDuplicateEntry,
_session.query(self.Foo).all)
def test_flush_wrapper_plain_integrity_error(self):
"""test a plain integrity error wrapped as DBError."""
_session = self.sessionmaker()
with _session.begin():
foo = self.Foo(counter=1)
_session.add(foo)
_session.begin()
self.addCleanup(_session.rollback)
foo = self.Foo(counter=None)
_session.add(foo)
self.assertRaises(exception.DBError, _session.flush)
def test_flush_wrapper_operational_error(self):
"""test an operational error from flush() raised as-is."""
_session = self.sessionmaker()
with _session.begin():
foo = self.Foo(counter=1)
_session.add(foo)
_session.begin()
self.addCleanup(_session.rollback)
foo = self.Foo(counter=sqla.func.imfake(123))
_session.add(foo)
matched = self.assertRaises(sqla.exc.OperationalError, _session.flush)
self.assertTrue("no such function" in str(matched))
def test_query_wrapper_operational_error(self):
"""test an operational error from query.all() raised as-is."""
_session = self.sessionmaker()
_session.begin()
self.addCleanup(_session.rollback)
q = _session.query(self.Foo).filter(
self.Foo.counter == sqla.func.imfake(123))
matched = self.assertRaises(sqla.exc.OperationalError, q.all)
self.assertTrue("no such function" in str(matched))
class TestDBDisconnected(TestsExceptionFilter):
@contextlib.contextmanager
def _fixture(
self,
dialect_name, exception, num_disconnects, is_disconnect=True):
engine = self.engine
compat.engine_connect(engine, private_session._connect_ping_listener)
real_do_execute = engine.dialect.do_execute
counter = itertools.count(1)
def fake_do_execute(self, *arg, **kw):
if next(counter) > num_disconnects:
return real_do_execute(self, *arg, **kw)
else:
raise exception
with self._dbapi_fixture(dialect_name):
with test_utils.nested(
mock.patch.object(engine.dialect,
"do_execute",
fake_do_execute),
mock.patch.object(engine.dialect,
"is_disconnect",
mock.Mock(return_value=is_disconnect))
):
yield
def _test_ping_listener_disconnected(
self, dialect_name, exc_obj, is_disconnect=True):
with self._fixture(dialect_name, exc_obj, 1, is_disconnect):
conn = self.engine.connect()
with conn.begin():
self.assertEqual(conn.scalar(sqla.select([1])), 1)
self.assertFalse(conn.closed)
self.assertFalse(conn.invalidated)
self.assertTrue(conn.in_transaction())
with self._fixture(dialect_name, exc_obj, 2, is_disconnect):
self.assertRaises(
exception.DBConnectionError,
self.engine.connect
)
# test implicit execution
with self._fixture(dialect_name, exc_obj, 1):
self.assertEqual(self.engine.scalar(sqla.select([1])), 1)
def test_mysql_ping_listener_disconnected(self):
for code in [2006, 2013, 2014, 2045, 2055]:
self._test_ping_listener_disconnected(
"mysql",
self.OperationalError('%d MySQL server has gone away' % code)
)
def test_mysql_ping_listener_disconnected_regex_only(self):
# intentionally set the is_disconnect flag to False
# in the "sqlalchemy" layer to make sure the regexp
# on _is_db_connection_error is catching
for code in [2002, 2003, 2006, 2013]:
self._test_ping_listener_disconnected(
"mysql",
self.OperationalError('%d MySQL server has gone away' % code),
is_disconnect=False
)
def test_db2_ping_listener_disconnected(self):
self._test_ping_listener_disconnected(
"ibm_db_sa",
self.OperationalError(
'SQL30081N: DB2 Server connection is no longer active')
)
def test_db2_ping_listener_disconnected_regex_only(self):
self._test_ping_listener_disconnected(
"ibm_db_sa",
self.OperationalError(
'SQL30081N: DB2 Server connection is no longer active'),
is_disconnect=False
)
class TestDBConnectRetry(TestsExceptionFilter):
def _run_test(self, dialect_name, exception, count, retries):
counter = itertools.count()
engine = self.engine
# empty out the connection pool
engine.dispose()
connect_fn = engine.dialect.connect
def cant_connect(*arg, **kw):
if next(counter) < count:
raise exception
else:
return connect_fn(*arg, **kw)
with self._dbapi_fixture(dialect_name):
with mock.patch.object(engine.dialect, "connect", cant_connect):
return private_session._test_connection(engine, retries, .01)
def test_connect_no_retries(self):
conn = self._run_test(
"mysql",
self.OperationalError("Error: (2003) something wrong"),
2, 0
)
# didnt connect because nothing was tried
self.assertIsNone(conn)
def test_connect_inifinite_retries(self):
conn = self._run_test(
"mysql",
self.OperationalError("Error: (2003) something wrong"),
2, -1
)
# conn is good
self.assertEqual(conn.scalar(sqla.select([1])), 1)
def test_connect_retry_past_failure(self):
conn = self._run_test(
"mysql",
self.OperationalError("Error: (2003) something wrong"),
2, 3
)
# conn is good
self.assertEqual(conn.scalar(sqla.select([1])), 1)
def test_connect_retry_not_candidate_exception(self):
self.assertRaises(
sqla.exc.OperationalError, # remember, we pass OperationalErrors
# through at the moment :)
self._run_test,
"mysql",
self.OperationalError("Error: (2015) I can't connect period"),
2, 3
)
def test_connect_retry_stops_infailure(self):
self.assertRaises(
exception.DBConnectionError,
self._run_test,
"mysql",
self.OperationalError("Error: (2003) something wrong"),
3, 2
)
def test_db2_error_positive(self):
conn = self._run_test(
"ibm_db_sa",
self.OperationalError("blah blah -30081 blah blah"),
2, -1
)
# conn is good
self.assertEqual(conn.scalar(sqla.select([1])), 1)
def test_db2_error_negative(self):
self.assertRaises(
sqla.exc.OperationalError,
self._run_test,
"ibm_db_sa",
self.OperationalError("blah blah -39981 blah blah"),
2, 3
)

View File

@ -0,0 +1,194 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the compatibility layer for the handle_error() event.
This event is added as of SQLAlchemy 0.9.7; oslo.db provides a compatibility
layer for prior SQLAlchemy versions.
"""
import mock
from oslotest import base as test_base
import sqlalchemy as sqla
from sqlalchemy.sql import column
from sqlalchemy.sql import literal
from sqlalchemy.sql import select
from sqlalchemy.types import Integer
from sqlalchemy.types import TypeDecorator
from oslo.db.sqlalchemy import compat
from oslo.db.sqlalchemy.compat import utils
from oslo_db.tests.old_import_api import utils as test_utils
class MyException(Exception):
pass
class ExceptionReraiseTest(test_base.BaseTestCase):
def setUp(self):
super(ExceptionReraiseTest, self).setUp()
self.engine = engine = sqla.create_engine("sqlite://")
self.addCleanup(engine.dispose)
def _fixture(self):
engine = self.engine
def err(context):
if "ERROR ONE" in str(context.statement):
raise MyException("my exception")
compat.handle_error(engine, err)
def test_exception_event_altered(self):
self._fixture()
with mock.patch.object(self.engine.dialect.execution_ctx_cls,
"handle_dbapi_exception") as patched:
matchee = self.assertRaises(
MyException,
self.engine.execute, "SELECT 'ERROR ONE' FROM I_DONT_EXIST"
)
self.assertEqual(1, patched.call_count)
self.assertEqual("my exception", matchee.args[0])
def test_exception_event_non_altered(self):
self._fixture()
with mock.patch.object(self.engine.dialect.execution_ctx_cls,
"handle_dbapi_exception") as patched:
self.assertRaises(
sqla.exc.DBAPIError,
self.engine.execute, "SELECT 'ERROR TWO' FROM I_DONT_EXIST"
)
self.assertEqual(1, patched.call_count)
def test_is_disconnect_not_interrupted(self):
self._fixture()
with test_utils.nested(
mock.patch.object(
self.engine.dialect.execution_ctx_cls,
"handle_dbapi_exception"
),
mock.patch.object(
self.engine.dialect, "is_disconnect",
lambda *args: True
)
) as (handle_dbapi_exception, is_disconnect):
with self.engine.connect() as conn:
self.assertRaises(
MyException,
conn.execute, "SELECT 'ERROR ONE' FROM I_DONT_EXIST"
)
self.assertEqual(1, handle_dbapi_exception.call_count)
self.assertTrue(conn.invalidated)
def test_no_is_disconnect_not_invalidated(self):
self._fixture()
with test_utils.nested(
mock.patch.object(
self.engine.dialect.execution_ctx_cls,
"handle_dbapi_exception"
),
mock.patch.object(
self.engine.dialect, "is_disconnect",
lambda *args: False
)
) as (handle_dbapi_exception, is_disconnect):
with self.engine.connect() as conn:
self.assertRaises(
MyException,
conn.execute, "SELECT 'ERROR ONE' FROM I_DONT_EXIST"
)
self.assertEqual(1, handle_dbapi_exception.call_count)
self.assertFalse(conn.invalidated)
def test_exception_event_ad_hoc_context(self):
engine = self.engine
nope = MyException("nope")
class MyType(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
raise nope
listener = mock.Mock(return_value=None)
compat.handle_error(engine, listener)
self.assertRaises(
sqla.exc.StatementError,
engine.execute,
select([1]).where(column('foo') == literal('bar', MyType))
)
ctx = listener.mock_calls[0][1][0]
self.assertTrue(ctx.statement.startswith("SELECT 1 "))
self.assertIs(ctx.is_disconnect, False)
self.assertIs(ctx.original_exception, nope)
def _test_alter_disconnect(self, orig_error, evt_value):
engine = self.engine
def evt(ctx):
ctx.is_disconnect = evt_value
compat.handle_error(engine, evt)
# if we are under sqla 0.9.7, and we are expecting to take
# an "is disconnect" exception and make it not a disconnect,
# that isn't supported b.c. the wrapped handler has already
# done the invalidation.
expect_failure = not utils.sqla_097 and orig_error and not evt_value
with mock.patch.object(engine.dialect,
"is_disconnect",
mock.Mock(return_value=orig_error)):
with engine.connect() as c:
conn_rec = c.connection._connection_record
try:
c.execute("SELECT x FROM nonexistent")
assert False
except sqla.exc.StatementError as st:
self.assertFalse(expect_failure)
# check the exception's invalidation flag
self.assertEqual(st.connection_invalidated, evt_value)
# check the Connection object's invalidation flag
self.assertEqual(c.invalidated, evt_value)
# this is the ConnectionRecord object; it's invalidated
# when its .connection member is None
self.assertEqual(conn_rec.connection is None, evt_value)
except NotImplementedError as ne:
self.assertTrue(expect_failure)
self.assertEqual(
str(ne),
"Can't reset 'disconnect' status of exception once it "
"is set with this version of SQLAlchemy")
def test_alter_disconnect_to_true(self):
self._test_alter_disconnect(False, True)
self._test_alter_disconnect(True, True)
def test_alter_disconnect_to_false(self):
self._test_alter_disconnect(True, False)
self._test_alter_disconnect(False, False)

View File

@ -28,7 +28,7 @@ class MockWithCmp(mock.MagicMock):
self.__lt__ = lambda self, other: self.order < other.order
@mock.patch(('oslo.db.sqlalchemy.migration_cli.'
@mock.patch(('oslo_db.sqlalchemy.migration_cli.'
'ext_alembic.alembic.command'))
class TestAlembicExtension(test_base.BaseTestCase):

View File

@ -0,0 +1,174 @@
# Copyright 2013 Mirantis Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import tempfile
from migrate import exceptions as migrate_exception
from migrate.versioning import api as versioning_api
import mock
import sqlalchemy
from oslo.db import exception as db_exception
from oslo.db.sqlalchemy import migration
from oslo.db.sqlalchemy import test_base
from oslo_db.sqlalchemy import migration as private_migration
from oslo_db.tests.old_import_api import utils as test_utils
class TestMigrationCommon(test_base.DbTestCase):
def setUp(self):
super(TestMigrationCommon, self).setUp()
migration._REPOSITORY = None
self.path = tempfile.mkdtemp('test_migration')
self.path1 = tempfile.mkdtemp('test_migration')
self.return_value = '/home/openstack/migrations'
self.return_value1 = '/home/extension/migrations'
self.init_version = 1
self.test_version = 123
self.patcher_repo = mock.patch.object(private_migration, 'Repository')
self.repository = self.patcher_repo.start()
self.repository.side_effect = [self.return_value, self.return_value1]
self.mock_api_db = mock.patch.object(versioning_api, 'db_version')
self.mock_api_db_version = self.mock_api_db.start()
self.mock_api_db_version.return_value = self.test_version
def tearDown(self):
os.rmdir(self.path)
self.mock_api_db.stop()
self.patcher_repo.stop()
super(TestMigrationCommon, self).tearDown()
def test_db_version_control(self):
with test_utils.nested(
mock.patch('oslo_db.sqlalchemy.migration._find_migrate_repo'),
mock.patch.object(versioning_api, 'version_control'),
) as (mock_find_repo, mock_version_control):
mock_find_repo.return_value = self.return_value
version = migration.db_version_control(
self.engine, self.path, self.test_version)
self.assertEqual(version, self.test_version)
mock_version_control.assert_called_once_with(
self.engine, self.return_value, self.test_version)
def test_db_version_return(self):
ret_val = migration.db_version(self.engine, self.path,
self.init_version)
self.assertEqual(ret_val, self.test_version)
def test_db_version_raise_not_controlled_error_first(self):
patcher = mock.patch.object(private_migration, 'db_version_control')
with patcher as mock_ver:
self.mock_api_db_version.side_effect = [
migrate_exception.DatabaseNotControlledError('oups'),
self.test_version]
ret_val = migration.db_version(self.engine, self.path,
self.init_version)
self.assertEqual(ret_val, self.test_version)
mock_ver.assert_called_once_with(self.engine, self.path,
version=self.init_version)
def test_db_version_raise_not_controlled_error_tables(self):
with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
self.mock_api_db_version.side_effect = \
migrate_exception.DatabaseNotControlledError('oups')
my_meta = mock.MagicMock()
my_meta.tables = {'a': 1, 'b': 2}
mock_meta.return_value = my_meta
self.assertRaises(
db_exception.DbMigrationError, migration.db_version,
self.engine, self.path, self.init_version)
@mock.patch.object(versioning_api, 'version_control')
def test_db_version_raise_not_controlled_error_no_tables(self, mock_vc):
with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
self.mock_api_db_version.side_effect = (
migrate_exception.DatabaseNotControlledError('oups'),
self.init_version)
my_meta = mock.MagicMock()
my_meta.tables = {}
mock_meta.return_value = my_meta
migration.db_version(self.engine, self.path, self.init_version)
mock_vc.assert_called_once_with(self.engine, self.return_value1,
self.init_version)
def test_db_sync_wrong_version(self):
self.assertRaises(db_exception.DbMigrationError,
migration.db_sync, self.engine, self.path, 'foo')
def test_db_sync_upgrade(self):
init_ver = 55
with test_utils.nested(
mock.patch('oslo_db.sqlalchemy.migration._find_migrate_repo'),
mock.patch.object(versioning_api, 'upgrade')
) as (mock_find_repo, mock_upgrade):
mock_find_repo.return_value = self.return_value
self.mock_api_db_version.return_value = self.test_version - 1
migration.db_sync(self.engine, self.path, self.test_version,
init_ver)
mock_upgrade.assert_called_once_with(
self.engine, self.return_value, self.test_version)
def test_db_sync_downgrade(self):
with test_utils.nested(
mock.patch('oslo_db.sqlalchemy.migration._find_migrate_repo'),
mock.patch.object(versioning_api, 'downgrade')
) as (mock_find_repo, mock_downgrade):
mock_find_repo.return_value = self.return_value
self.mock_api_db_version.return_value = self.test_version + 1
migration.db_sync(self.engine, self.path, self.test_version)
mock_downgrade.assert_called_once_with(
self.engine, self.return_value, self.test_version)
def test_db_sync_sanity_called(self):
with test_utils.nested(
mock.patch('oslo_db.sqlalchemy.migration._find_migrate_repo'),
mock.patch('oslo_db.sqlalchemy.migration._db_schema_sanity_check'),
mock.patch.object(versioning_api, 'downgrade')
) as (mock_find_repo, mock_sanity, mock_downgrade):
mock_find_repo.return_value = self.return_value
migration.db_sync(self.engine, self.path, self.test_version)
mock_sanity.assert_called_once_with(self.engine)
def test_db_sync_sanity_skipped(self):
with test_utils.nested(
mock.patch('oslo_db.sqlalchemy.migration._find_migrate_repo'),
mock.patch('oslo_db.sqlalchemy.migration._db_schema_sanity_check'),
mock.patch.object(versioning_api, 'downgrade')
) as (mock_find_repo, mock_sanity, mock_downgrade):
mock_find_repo.return_value = self.return_value
migration.db_sync(self.engine, self.path, self.test_version,
sanity_check=False)
self.assertFalse(mock_sanity.called)

View File

@ -15,7 +15,7 @@ from oslo.config import cfg
from oslo.config import fixture as config
from oslo.db import options
from tests import utils as test_utils
from oslo_db.tests.old_import_api import utils as test_utils
class DbApiOptionsTestCase(test_utils.BaseTestCase):

View File

@ -0,0 +1,554 @@
# coding=utf-8
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for SQLAlchemy specific code."""
import logging
import fixtures
import mock
from oslo.config import cfg
from oslotest import base as oslo_test
import sqlalchemy
from sqlalchemy import Column, MetaData, Table
from sqlalchemy import Integer, String
from sqlalchemy.ext.declarative import declarative_base
from oslo.db import exception
from oslo.db.sqlalchemy import models
from oslo.db.sqlalchemy import session
from oslo.db.sqlalchemy import test_base
from oslo_db import options as db_options
from oslo_db.sqlalchemy import session as private_session
BASE = declarative_base()
_TABLE_NAME = '__tmp__test__tmp__'
_REGEXP_TABLE_NAME = _TABLE_NAME + "regexp"
class RegexpTable(BASE, models.ModelBase):
__tablename__ = _REGEXP_TABLE_NAME
id = Column(Integer, primary_key=True)
bar = Column(String(255))
class RegexpFilterTestCase(test_base.DbTestCase):
def setUp(self):
super(RegexpFilterTestCase, self).setUp()
meta = MetaData()
meta.bind = self.engine
test_table = Table(_REGEXP_TABLE_NAME, meta,
Column('id', Integer, primary_key=True,
nullable=False),
Column('bar', String(255)))
test_table.create()
self.addCleanup(test_table.drop)
def _test_regexp_filter(self, regexp, expected):
_session = self.sessionmaker()
with _session.begin():
for i in ['10', '20', u'']:
tbl = RegexpTable()
tbl.update({'bar': i})
tbl.save(session=_session)
regexp_op = RegexpTable.bar.op('REGEXP')(regexp)
result = _session.query(RegexpTable).filter(regexp_op).all()
self.assertEqual([r.bar for r in result], expected)
def test_regexp_filter(self):
self._test_regexp_filter('10', ['10'])
def test_regexp_filter_nomatch(self):
self._test_regexp_filter('11', [])
def test_regexp_filter_unicode(self):
self._test_regexp_filter(u'', [u''])
def test_regexp_filter_unicode_nomatch(self):
self._test_regexp_filter(u'', [])
class SQLiteSavepointTest(test_base.DbTestCase):
def setUp(self):
super(SQLiteSavepointTest, self).setUp()
meta = MetaData()
self.test_table = Table(
"test_table", meta,
Column('id', Integer, primary_key=True),
Column('data', String(10)))
self.test_table.create(self.engine)
self.addCleanup(self.test_table.drop, self.engine)
def test_plain_transaction(self):
conn = self.engine.connect()
trans = conn.begin()
conn.execute(
self.test_table.insert(),
{'data': 'data 1'}
)
self.assertEqual(
[(1, 'data 1')],
self.engine.execute(
self.test_table.select().
order_by(self.test_table.c.id)
).fetchall()
)
trans.rollback()
self.assertEqual(
0,
self.engine.scalar(self.test_table.count())
)
def test_savepoint_middle(self):
with self.engine.begin() as conn:
conn.execute(
self.test_table.insert(),
{'data': 'data 1'}
)
savepoint = conn.begin_nested()
conn.execute(
self.test_table.insert(),
{'data': 'data 2'}
)
savepoint.rollback()
conn.execute(
self.test_table.insert(),
{'data': 'data 3'}
)
self.assertEqual(
[(1, 'data 1'), (2, 'data 3')],
self.engine.execute(
self.test_table.select().
order_by(self.test_table.c.id)
).fetchall()
)
def test_savepoint_beginning(self):
with self.engine.begin() as conn:
savepoint = conn.begin_nested()
conn.execute(
self.test_table.insert(),
{'data': 'data 1'}
)
savepoint.rollback()
conn.execute(
self.test_table.insert(),
{'data': 'data 2'}
)
self.assertEqual(
[(1, 'data 2')],
self.engine.execute(
self.test_table.select().
order_by(self.test_table.c.id)
).fetchall()
)
class FakeDBAPIConnection():
def cursor(self):
return FakeCursor()
class FakeCursor():
def execute(self, sql):
pass
class FakeConnectionProxy():
pass
class FakeConnectionRec():
pass
class OperationalError(Exception):
pass
class ProgrammingError(Exception):
pass
class FakeDB2Engine(object):
class Dialect():
def is_disconnect(self, e, *args):
expected_error = ('SQL30081N: DB2 Server connection is no longer '
'active')
return (str(e) == expected_error)
dialect = Dialect()
name = 'ibm_db_sa'
def dispose(self):
pass
class MySQLModeTestCase(test_base.MySQLOpportunisticTestCase):
def __init__(self, *args, **kwargs):
super(MySQLModeTestCase, self).__init__(*args, **kwargs)
# By default, run in empty SQL mode.
# Subclasses override this with specific modes.
self.mysql_mode = ''
def setUp(self):
super(MySQLModeTestCase, self).setUp()
self.engine = session.create_engine(self.engine.url,
mysql_sql_mode=self.mysql_mode)
self.connection = self.engine.connect()
meta = MetaData()
meta.bind = self.engine
self.test_table = Table(_TABLE_NAME + "mode", meta,
Column('id', Integer, primary_key=True),
Column('bar', String(255)))
self.test_table.create()
self.addCleanup(self.test_table.drop)
self.addCleanup(self.connection.close)
def _test_string_too_long(self, value):
with self.connection.begin():
self.connection.execute(self.test_table.insert(),
bar=value)
result = self.connection.execute(self.test_table.select())
return result.fetchone()['bar']
def test_string_too_long(self):
value = 'a' * 512
# String is too long.
# With no SQL mode set, this gets truncated.
self.assertNotEqual(value,
self._test_string_too_long(value))
class MySQLStrictAllTablesModeTestCase(MySQLModeTestCase):
"Test data integrity enforcement in MySQL STRICT_ALL_TABLES mode."
def __init__(self, *args, **kwargs):
super(MySQLStrictAllTablesModeTestCase, self).__init__(*args, **kwargs)
self.mysql_mode = 'STRICT_ALL_TABLES'
def test_string_too_long(self):
value = 'a' * 512
# String is too long.
# With STRICT_ALL_TABLES or TRADITIONAL mode set, this is an error.
self.assertRaises(exception.DBError,
self._test_string_too_long, value)
class MySQLTraditionalModeTestCase(MySQLStrictAllTablesModeTestCase):
"""Test data integrity enforcement in MySQL TRADITIONAL mode.
Since TRADITIONAL includes STRICT_ALL_TABLES, this inherits all
STRICT_ALL_TABLES mode tests.
"""
def __init__(self, *args, **kwargs):
super(MySQLTraditionalModeTestCase, self).__init__(*args, **kwargs)
self.mysql_mode = 'TRADITIONAL'
class EngineFacadeTestCase(oslo_test.BaseTestCase):
def setUp(self):
super(EngineFacadeTestCase, self).setUp()
self.facade = session.EngineFacade('sqlite://')
def test_get_engine(self):
eng1 = self.facade.get_engine()
eng2 = self.facade.get_engine()
self.assertIs(eng1, eng2)
def test_get_session(self):
ses1 = self.facade.get_session()
ses2 = self.facade.get_session()
self.assertIsNot(ses1, ses2)
def test_get_session_arguments_override_default_settings(self):
ses = self.facade.get_session(autocommit=False, expire_on_commit=True)
self.assertFalse(ses.autocommit)
self.assertTrue(ses.expire_on_commit)
@mock.patch('oslo_db.sqlalchemy.session.get_maker')
@mock.patch('oslo_db.sqlalchemy.session.create_engine')
def test_creation_from_config(self, create_engine, get_maker):
conf = cfg.ConfigOpts()
conf.register_opts(db_options.database_opts, group='database')
overrides = {
'connection': 'sqlite:///:memory:',
'slave_connection': None,
'connection_debug': 100,
'max_pool_size': 10,
'mysql_sql_mode': 'TRADITIONAL',
}
for optname, optvalue in overrides.items():
conf.set_override(optname, optvalue, group='database')
session.EngineFacade.from_config(conf,
autocommit=False,
expire_on_commit=True)
create_engine.assert_called_once_with(
sql_connection='sqlite:///:memory:',
connection_debug=100,
max_pool_size=10,
mysql_sql_mode='TRADITIONAL',
sqlite_fk=False,
idle_timeout=mock.ANY,
retry_interval=mock.ANY,
max_retries=mock.ANY,
max_overflow=mock.ANY,
connection_trace=mock.ANY,
sqlite_synchronous=mock.ANY,
pool_timeout=mock.ANY,
thread_checkin=mock.ANY,
)
get_maker.assert_called_once_with(engine=create_engine(),
autocommit=False,
expire_on_commit=True)
def test_slave_connection(self):
paths = self.create_tempfiles([('db.master', ''), ('db.slave', '')],
ext='')
master_path = 'sqlite:///' + paths[0]
slave_path = 'sqlite:///' + paths[1]
facade = session.EngineFacade(
sql_connection=master_path,
slave_connection=slave_path
)
master = facade.get_engine()
self.assertEqual(master_path, str(master.url))
slave = facade.get_engine(use_slave=True)
self.assertEqual(slave_path, str(slave.url))
master_session = facade.get_session()
self.assertEqual(master_path, str(master_session.bind.url))
slave_session = facade.get_session(use_slave=True)
self.assertEqual(slave_path, str(slave_session.bind.url))
def test_slave_connection_string_not_provided(self):
master_path = 'sqlite:///' + self.create_tempfiles(
[('db.master', '')], ext='')[0]
facade = session.EngineFacade(sql_connection=master_path)
master = facade.get_engine()
slave = facade.get_engine(use_slave=True)
self.assertIs(master, slave)
self.assertEqual(master_path, str(master.url))
master_session = facade.get_session()
self.assertEqual(master_path, str(master_session.bind.url))
slave_session = facade.get_session(use_slave=True)
self.assertEqual(master_path, str(slave_session.bind.url))
class SQLiteConnectTest(oslo_test.BaseTestCase):
def _fixture(self, **kw):
return session.create_engine("sqlite://", **kw)
def test_sqlite_fk_listener(self):
engine = self._fixture(sqlite_fk=True)
self.assertEqual(
engine.scalar("pragma foreign_keys"),
1
)
engine = self._fixture(sqlite_fk=False)
self.assertEqual(
engine.scalar("pragma foreign_keys"),
0
)
def test_sqlite_synchronous_listener(self):
engine = self._fixture()
# "The default setting is synchronous=FULL." (e.g. 2)
# http://www.sqlite.org/pragma.html#pragma_synchronous
self.assertEqual(
engine.scalar("pragma synchronous"),
2
)
engine = self._fixture(sqlite_synchronous=False)
self.assertEqual(
engine.scalar("pragma synchronous"),
0
)
class MysqlConnectTest(test_base.MySQLOpportunisticTestCase):
def _fixture(self, sql_mode):
return session.create_engine(self.engine.url, mysql_sql_mode=sql_mode)
def _assert_sql_mode(self, engine, sql_mode_present, sql_mode_non_present):
mode = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone()[1]
self.assertTrue(
sql_mode_present in mode
)
if sql_mode_non_present:
self.assertTrue(
sql_mode_non_present not in mode
)
def test_set_mode_traditional(self):
engine = self._fixture(sql_mode='TRADITIONAL')
self._assert_sql_mode(engine, "TRADITIONAL", "ANSI")
def test_set_mode_ansi(self):
engine = self._fixture(sql_mode='ANSI')
self._assert_sql_mode(engine, "ANSI", "TRADITIONAL")
def test_set_mode_no_mode(self):
# If _mysql_set_mode_callback is called with sql_mode=None, then
# the SQL mode is NOT set on the connection.
expected = self.engine.execute(
"SHOW VARIABLES LIKE 'sql_mode'").fetchone()[1]
engine = self._fixture(sql_mode=None)
self._assert_sql_mode(engine, expected, None)
def test_fail_detect_mode(self):
# If "SHOW VARIABLES LIKE 'sql_mode'" results in no row, then
# we get a log indicating can't detect the mode.
log = self.useFixture(fixtures.FakeLogger(level=logging.WARN))
mysql_conn = self.engine.raw_connection()
self.addCleanup(mysql_conn.close)
mysql_conn.detach()
mysql_cursor = mysql_conn.cursor()
def execute(statement, parameters=()):
if "SHOW VARIABLES LIKE 'sql_mode'" in statement:
statement = "SHOW VARIABLES LIKE 'i_dont_exist'"
return mysql_cursor.execute(statement, parameters)
test_engine = sqlalchemy.create_engine(self.engine.url,
_initialize=False)
with mock.patch.object(
test_engine.pool, '_creator',
mock.Mock(
return_value=mock.Mock(
cursor=mock.Mock(
return_value=mock.Mock(
execute=execute,
fetchone=mysql_cursor.fetchone,
fetchall=mysql_cursor.fetchall
)
)
)
)
):
private_session._init_events.dispatch_on_drivername("mysql")(
test_engine
)
test_engine.raw_connection()
self.assertIn('Unable to detect effective SQL mode',
log.output)
def test_logs_real_mode(self):
# If "SHOW VARIABLES LIKE 'sql_mode'" results in a value, then
# we get a log with the value.
log = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
engine = self._fixture(sql_mode='TRADITIONAL')
actual_mode = engine.execute(
"SHOW VARIABLES LIKE 'sql_mode'").fetchone()[1]
self.assertIn('MySQL server mode set to %s' % actual_mode,
log.output)
def test_warning_when_not_traditional(self):
# If "SHOW VARIABLES LIKE 'sql_mode'" results in a value that doesn't
# include 'TRADITIONAL', then a warning is logged.
log = self.useFixture(fixtures.FakeLogger(level=logging.WARN))
self._fixture(sql_mode='ANSI')
self.assertIn("consider enabling TRADITIONAL or STRICT_ALL_TABLES",
log.output)
def test_no_warning_when_traditional(self):
# If "SHOW VARIABLES LIKE 'sql_mode'" results in a value that includes
# 'TRADITIONAL', then no warning is logged.
log = self.useFixture(fixtures.FakeLogger(level=logging.WARN))
self._fixture(sql_mode='TRADITIONAL')
self.assertNotIn("consider enabling TRADITIONAL or STRICT_ALL_TABLES",
log.output)
def test_no_warning_when_strict_all_tables(self):
# If "SHOW VARIABLES LIKE 'sql_mode'" results in a value that includes
# 'STRICT_ALL_TABLES', then no warning is logged.
log = self.useFixture(fixtures.FakeLogger(level=logging.WARN))
self._fixture(sql_mode='TRADITIONAL')
self.assertNotIn("consider enabling TRADITIONAL or STRICT_ALL_TABLES",
log.output)
# NOTE(dhellmann): This test no longer works as written. The code in
# oslo_db.sqlalchemy.session filters out lines from modules under
# oslo_db, and now this test is under oslo_db, so the test filename
# does not appear in the context for the error message. LP #1405376
# class PatchStacktraceTest(test_base.DbTestCase):
# def test_trace(self):
# engine = self.engine
# private_session._add_trace_comments(engine)
# conn = engine.connect()
# with mock.patch.object(engine.dialect, "do_execute") as mock_exec:
# conn.execute("select * from table")
# call = mock_exec.mock_calls[0]
# # we're the caller, see that we're in there
# self.assertTrue("tests/sqlalchemy/test_sqlalchemy.py" in call[1][1])

File diff suppressed because it is too large Load Diff

View File

@ -21,7 +21,7 @@ from oslo.utils import importutils
from oslo.db import api
from oslo.db import exception
from tests import utils as test_utils
from oslo_db.tests.old_import_api import utils as test_utils
sqla = importutils.try_import('sqlalchemy')
if not sqla:
@ -66,7 +66,7 @@ class DBAPI(object):
class DBAPITestCase(test_utils.BaseTestCase):
def test_dbapi_full_path_module_method(self):
dbapi = api.DBAPI('tests.test_api')
dbapi = api.DBAPI('oslo_db.tests.test_api')
result = dbapi.api_class_call1(1, 2, kwarg1='meow')
expected = ((1, 2), {'kwarg1': 'meow'})
self.assertEqual(expected, result)
@ -75,7 +75,7 @@ class DBAPITestCase(test_utils.BaseTestCase):
self.assertRaises(ImportError, api.DBAPI, 'tests.unit.db.not_existent')
def test_dbapi_lazy_loading(self):
dbapi = api.DBAPI('tests.test_api', lazy=True)
dbapi = api.DBAPI('oslo_db.tests.test_api', lazy=True)
self.assertIsNone(dbapi._backend)
dbapi.api_class_call1(1, 'abc')

View File

@ -18,7 +18,7 @@ import sys
import mock
from oslo.db import concurrency
from tests import utils as test_utils
from oslo_db.tests.old_import_api import utils as test_utils
FAKE_BACKEND_MAPPING = {'sqlalchemy': 'fake.db.sqlalchemy.api'}
@ -47,7 +47,7 @@ class TpoolDbapiWrapperTestCase(test_utils.BaseTestCase):
sys.modules['eventlet'] = self.eventlet
self.addCleanup(sys.modules.pop, 'eventlet', None)
@mock.patch('oslo.db.api.DBAPI')
@mock.patch('oslo_db.api.DBAPI')
def test_db_api_common(self, mock_db_api):
# test context:
# CONF.database.use_tpool == False
@ -73,7 +73,7 @@ class TpoolDbapiWrapperTestCase(test_utils.BaseTestCase):
self.assertFalse(self.eventlet.tpool.Proxy.called)
self.assertEqual(1, mock_db_api.from_config.call_count)
@mock.patch('oslo.db.api.DBAPI')
@mock.patch('oslo_db.api.DBAPI')
def test_db_api_config_change(self, mock_db_api):
# test context:
# CONF.database.use_tpool == True
@ -94,7 +94,7 @@ class TpoolDbapiWrapperTestCase(test_utils.BaseTestCase):
self.eventlet.tpool.Proxy.assert_called_once_with(fake_db_api)
self.assertEqual(self.db_api._db_api, self.proxy)
@mock.patch('oslo.db.api.DBAPI')
@mock.patch('oslo_db.api.DBAPI')
def test_db_api_without_installed_eventlet(self, mock_db_api):
# test context:
# CONF.database.use_tpool == True

View File

@ -0,0 +1,61 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import imp
import os
import warnings
import mock
from oslotest import base as test_base
import six
class DeprecationWarningTest(test_base.BaseTestCase):
@mock.patch('warnings.warn')
def test_warning(self, mock_warn):
import oslo.db
imp.reload(oslo.db)
self.assertTrue(mock_warn.called)
args = mock_warn.call_args
self.assertIn('oslo_db', args[0][0])
self.assertIn('deprecated', args[0][0])
self.assertTrue(issubclass(args[0][1], DeprecationWarning))
def test_real_warning(self):
with warnings.catch_warnings(record=True) as warning_msgs:
warnings.resetwarnings()
warnings.simplefilter('always', DeprecationWarning)
import oslo.db
# Use a separate function to get the stack level correct
# so we know the message points back to this file. This
# corresponds to an import or reload, which isn't working
# inside the test under Python 3.3. That may be due to a
# difference in the import implementation not triggering
# warnings properly when the module is reloaded, or
# because the warnings module is mostly implemented in C
# and something isn't cleanly resetting the global state
# used to track whether a warning needs to be
# emitted. Whatever the cause, we definitely see the
# warnings.warn() being invoked on a reload (see the test
# above) and warnings are reported on the console when we
# run the tests. A simpler test script run outside of
# testr does correctly report the warnings.
def foo():
oslo.db.deprecated()
foo()
self.assertEqual(1, len(warning_msgs))
msg = warning_msgs[0]
self.assertIn('oslo_db', six.text_type(msg.message))
self.assertEqual('test_warning.py', os.path.basename(msg.filename))

View File

View File

@ -12,7 +12,7 @@
"""Test the compatibility layer for the engine_connect() event.
This event is added as of SQLAlchemy 0.9.0; oslo.db provides a compatibility
This event is added as of SQLAlchemy 0.9.0; oslo_db provides a compatibility
layer for prior SQLAlchemy versions.
"""
@ -21,7 +21,7 @@ import mock
from oslotest import base as test_base
import sqlalchemy as sqla
from oslo.db.sqlalchemy.compat import engine_connect
from oslo_db.sqlalchemy.compat import engine_connect
class EngineConnectTest(test_base.BaseTestCase):

View File

@ -21,12 +21,12 @@ import six
import sqlalchemy as sqla
from sqlalchemy.orm import mapper
from oslo.db import exception
from oslo.db.sqlalchemy import compat
from oslo.db.sqlalchemy import exc_filters
from oslo.db.sqlalchemy import session
from oslo.db.sqlalchemy import test_base
from tests import utils as test_utils
from oslo_db import exception
from oslo_db.sqlalchemy import compat
from oslo_db.sqlalchemy import exc_filters
from oslo_db.sqlalchemy import session
from oslo_db.sqlalchemy import test_base
from oslo_db.tests import utils as test_utils
_TABLE_NAME = '__tmp__test__tmp__'

View File

@ -12,7 +12,7 @@
"""Test the compatibility layer for the handle_error() event.
This event is added as of SQLAlchemy 0.9.7; oslo.db provides a compatibility
This event is added as of SQLAlchemy 0.9.7; oslo_db provides a compatibility
layer for prior SQLAlchemy versions.
"""
@ -26,9 +26,9 @@ from sqlalchemy.sql import select
from sqlalchemy.types import Integer
from sqlalchemy.types import TypeDecorator
from oslo.db.sqlalchemy.compat import handle_error
from oslo.db.sqlalchemy.compat import utils
from tests import utils as test_utils
from oslo_db.sqlalchemy.compat import handle_error
from oslo_db.sqlalchemy.compat import utils
from oslo_db.tests import utils as test_utils
class MyException(Exception):

View File

@ -0,0 +1,222 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslotest import base as test_base
from oslo_db.sqlalchemy.migration_cli import ext_alembic
from oslo_db.sqlalchemy.migration_cli import ext_migrate
from oslo_db.sqlalchemy.migration_cli import manager
class MockWithCmp(mock.MagicMock):
order = 0
def __init__(self, *args, **kwargs):
super(MockWithCmp, self).__init__(*args, **kwargs)
self.__lt__ = lambda self, other: self.order < other.order
@mock.patch(('oslo_db.sqlalchemy.migration_cli.'
'ext_alembic.alembic.command'))
class TestAlembicExtension(test_base.BaseTestCase):
def setUp(self):
self.migration_config = {'alembic_ini_path': '.',
'db_url': 'sqlite://'}
self.alembic = ext_alembic.AlembicExtension(self.migration_config)
super(TestAlembicExtension, self).setUp()
def test_check_enabled_true(self, command):
"""Check enabled returns True
Verifies that enabled returns True on non empty
alembic_ini_path conf variable
"""
self.assertTrue(self.alembic.enabled)
def test_check_enabled_false(self, command):
"""Check enabled returns False
Verifies enabled returns False on empty alembic_ini_path variable
"""
self.migration_config['alembic_ini_path'] = ''
alembic = ext_alembic.AlembicExtension(self.migration_config)
self.assertFalse(alembic.enabled)
def test_upgrade_none(self, command):
self.alembic.upgrade(None)
command.upgrade.assert_called_once_with(self.alembic.config, 'head')
def test_upgrade_normal(self, command):
self.alembic.upgrade('131daa')
command.upgrade.assert_called_once_with(self.alembic.config, '131daa')
def test_downgrade_none(self, command):
self.alembic.downgrade(None)
command.downgrade.assert_called_once_with(self.alembic.config, 'base')
def test_downgrade_int(self, command):
self.alembic.downgrade(111)
command.downgrade.assert_called_once_with(self.alembic.config, 'base')
def test_downgrade_normal(self, command):
self.alembic.downgrade('131daa')
command.downgrade.assert_called_once_with(
self.alembic.config, '131daa')
def test_revision(self, command):
self.alembic.revision(message='test', autogenerate=True)
command.revision.assert_called_once_with(
self.alembic.config, message='test', autogenerate=True)
def test_stamp(self, command):
self.alembic.stamp('stamp')
command.stamp.assert_called_once_with(
self.alembic.config, revision='stamp')
def test_version(self, command):
version = self.alembic.version()
self.assertIsNone(version)
@mock.patch(('oslo_db.sqlalchemy.migration_cli.'
'ext_migrate.migration'))
class TestMigrateExtension(test_base.BaseTestCase):
def setUp(self):
self.migration_config = {'migration_repo_path': '.',
'db_url': 'sqlite://'}
self.migrate = ext_migrate.MigrateExtension(self.migration_config)
super(TestMigrateExtension, self).setUp()
def test_check_enabled_true(self, migration):
self.assertTrue(self.migrate.enabled)
def test_check_enabled_false(self, migration):
self.migration_config['migration_repo_path'] = ''
migrate = ext_migrate.MigrateExtension(self.migration_config)
self.assertFalse(migrate.enabled)
def test_upgrade_head(self, migration):
self.migrate.upgrade('head')
migration.db_sync.assert_called_once_with(
self.migrate.engine, self.migrate.repository, None, init_version=0)
def test_upgrade_normal(self, migration):
self.migrate.upgrade(111)
migration.db_sync.assert_called_once_with(
mock.ANY, self.migrate.repository, 111, init_version=0)
def test_downgrade_init_version_from_base(self, migration):
self.migrate.downgrade('base')
migration.db_sync.assert_called_once_with(
self.migrate.engine, self.migrate.repository, mock.ANY,
init_version=mock.ANY)
def test_downgrade_init_version_from_none(self, migration):
self.migrate.downgrade(None)
migration.db_sync.assert_called_once_with(
self.migrate.engine, self.migrate.repository, mock.ANY,
init_version=mock.ANY)
def test_downgrade_normal(self, migration):
self.migrate.downgrade(101)
migration.db_sync.assert_called_once_with(
self.migrate.engine, self.migrate.repository, 101, init_version=0)
def test_version(self, migration):
self.migrate.version()
migration.db_version.assert_called_once_with(
self.migrate.engine, self.migrate.repository, init_version=0)
def test_change_init_version(self, migration):
self.migration_config['init_version'] = 101
migrate = ext_migrate.MigrateExtension(self.migration_config)
migrate.downgrade(None)
migration.db_sync.assert_called_once_with(
migrate.engine,
self.migrate.repository,
self.migration_config['init_version'],
init_version=self.migration_config['init_version'])
class TestMigrationManager(test_base.BaseTestCase):
def setUp(self):
self.migration_config = {'alembic_ini_path': '.',
'migrate_repo_path': '.',
'db_url': 'sqlite://'}
self.migration_manager = manager.MigrationManager(
self.migration_config)
self.ext = mock.Mock()
self.migration_manager._manager.extensions = [self.ext]
super(TestMigrationManager, self).setUp()
def test_manager_update(self):
self.migration_manager.upgrade('head')
self.ext.obj.upgrade.assert_called_once_with('head')
def test_manager_update_revision_none(self):
self.migration_manager.upgrade(None)
self.ext.obj.upgrade.assert_called_once_with(None)
def test_downgrade_normal_revision(self):
self.migration_manager.downgrade('111abcd')
self.ext.obj.downgrade.assert_called_once_with('111abcd')
def test_version(self):
self.migration_manager.version()
self.ext.obj.version.assert_called_once_with()
def test_revision_message_autogenerate(self):
self.migration_manager.revision('test', True)
self.ext.obj.revision.assert_called_once_with('test', True)
def test_revision_only_message(self):
self.migration_manager.revision('test', False)
self.ext.obj.revision.assert_called_once_with('test', False)
def test_stamp(self):
self.migration_manager.stamp('stamp')
self.ext.obj.stamp.assert_called_once_with('stamp')
class TestMigrationRightOrder(test_base.BaseTestCase):
def setUp(self):
self.migration_config = {'alembic_ini_path': '.',
'migrate_repo_path': '.',
'db_url': 'sqlite://'}
self.migration_manager = manager.MigrationManager(
self.migration_config)
self.first_ext = MockWithCmp()
self.first_ext.obj.order = 1
self.first_ext.obj.upgrade.return_value = 100
self.first_ext.obj.downgrade.return_value = 0
self.second_ext = MockWithCmp()
self.second_ext.obj.order = 2
self.second_ext.obj.upgrade.return_value = 200
self.second_ext.obj.downgrade.return_value = 100
self.migration_manager._manager.extensions = [self.first_ext,
self.second_ext]
super(TestMigrationRightOrder, self).setUp()
def test_upgrade_right_order(self):
results = self.migration_manager.upgrade(None)
self.assertEqual(results, [100, 200])
def test_downgrade_right_order(self):
results = self.migration_manager.downgrade(None)
self.assertEqual(results, [100, 0])

View File

@ -22,10 +22,10 @@ from migrate.versioning import api as versioning_api
import mock
import sqlalchemy
from oslo.db import exception as db_exception
from oslo.db.sqlalchemy import migration
from oslo.db.sqlalchemy import test_base
from tests import utils as test_utils
from oslo_db import exception as db_exception
from oslo_db.sqlalchemy import migration
from oslo_db.sqlalchemy import test_base
from oslo_db.tests import utils as test_utils
class TestMigrationCommon(test_base.DbTestCase):

View File

@ -0,0 +1,309 @@
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslotest import base as test
import six
import sqlalchemy as sa
import sqlalchemy.ext.declarative as sa_decl
from oslo_db import exception as exc
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations as migrate
class TestWalkVersions(test.BaseTestCase, migrate.WalkVersionsMixin):
migration_api = mock.MagicMock()
REPOSITORY = mock.MagicMock()
engine = mock.MagicMock()
INIT_VERSION = 4
@property
def migrate_engine(self):
return self.engine
def test_migrate_up(self):
self.migration_api.db_version.return_value = 141
self.migrate_up(141)
self.migration_api.upgrade.assert_called_with(
self.engine, self.REPOSITORY, 141)
self.migration_api.db_version.assert_called_with(
self.engine, self.REPOSITORY)
def test_migrate_up_fail(self):
version = 141
self.migration_api.db_version.return_value = version
expected_output = (u"Failed to migrate to version %(version)s on "
"engine %(engine)s\n" %
{'version': version, 'engine': self.engine})
with mock.patch.object(self.migration_api,
'upgrade',
side_effect=exc.DbMigrationError):
log = self.useFixture(fixtures.FakeLogger())
self.assertRaises(exc.DbMigrationError, self.migrate_up, version)
self.assertEqual(expected_output, log.output)
def test_migrate_up_with_data(self):
test_value = {"a": 1, "b": 2}
self.migration_api.db_version.return_value = 141
self._pre_upgrade_141 = mock.MagicMock()
self._pre_upgrade_141.return_value = test_value
self._check_141 = mock.MagicMock()
self.migrate_up(141, True)
self._pre_upgrade_141.assert_called_with(self.engine)
self._check_141.assert_called_with(self.engine, test_value)
def test_migrate_down(self):
self.migration_api.db_version.return_value = 42
self.assertTrue(self.migrate_down(42))
self.migration_api.db_version.assert_called_with(
self.engine, self.REPOSITORY)
def test_migrate_down_not_implemented(self):
with mock.patch.object(self.migration_api,
'downgrade',
side_effect=NotImplementedError):
self.assertFalse(self.migrate_down(self.engine, 42))
def test_migrate_down_with_data(self):
self._post_downgrade_043 = mock.MagicMock()
self.migration_api.db_version.return_value = 42
self.migrate_down(42, True)
self._post_downgrade_043.assert_called_with(self.engine)
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_up')
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_down')
def test_walk_versions_all_default(self, migrate_up, migrate_down):
self.REPOSITORY.latest = 20
self.migration_api.db_version.return_value = self.INIT_VERSION
self.walk_versions()
self.migration_api.version_control.assert_called_with(
self.engine, self.REPOSITORY, self.INIT_VERSION)
self.migration_api.db_version.assert_called_with(
self.engine, self.REPOSITORY)
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
upgraded = [mock.call(v, with_data=True)
for v in versions]
self.assertEqual(self.migrate_up.call_args_list, upgraded)
downgraded = [mock.call(v - 1) for v in reversed(versions)]
self.assertEqual(self.migrate_down.call_args_list, downgraded)
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_up')
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_down')
def test_walk_versions_all_true(self, migrate_up, migrate_down):
self.REPOSITORY.latest = 20
self.migration_api.db_version.return_value = self.INIT_VERSION
self.walk_versions(snake_walk=True, downgrade=True)
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
upgraded = []
for v in versions:
upgraded.append(mock.call(v, with_data=True))
upgraded.append(mock.call(v))
upgraded.extend([mock.call(v) for v in reversed(versions)])
self.assertEqual(upgraded, self.migrate_up.call_args_list)
downgraded_1 = [mock.call(v - 1, with_data=True) for v in versions]
downgraded_2 = []
for v in reversed(versions):
downgraded_2.append(mock.call(v - 1))
downgraded_2.append(mock.call(v - 1))
downgraded = downgraded_1 + downgraded_2
self.assertEqual(self.migrate_down.call_args_list, downgraded)
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_up')
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_down')
def test_walk_versions_true_false(self, migrate_up, migrate_down):
self.REPOSITORY.latest = 20
self.migration_api.db_version.return_value = self.INIT_VERSION
self.walk_versions(snake_walk=True, downgrade=False)
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
upgraded = []
for v in versions:
upgraded.append(mock.call(v, with_data=True))
upgraded.append(mock.call(v))
self.assertEqual(upgraded, self.migrate_up.call_args_list)
downgraded = [mock.call(v - 1, with_data=True) for v in versions]
self.assertEqual(self.migrate_down.call_args_list, downgraded)
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_up')
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_down')
def test_walk_versions_all_false(self, migrate_up, migrate_down):
self.REPOSITORY.latest = 20
self.migration_api.db_version.return_value = self.INIT_VERSION
self.walk_versions(snake_walk=False, downgrade=False)
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
upgraded = [mock.call(v, with_data=True) for v in versions]
self.assertEqual(upgraded, self.migrate_up.call_args_list)
class ModelsMigrationSyncMixin(test.BaseTestCase):
def setUp(self):
super(ModelsMigrationSyncMixin, self).setUp()
self.metadata = sa.MetaData()
self.metadata_migrations = sa.MetaData()
sa.Table(
'testtbl', self.metadata_migrations,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('spam', sa.String(10), nullable=False),
sa.Column('eggs', sa.DateTime),
sa.Column('foo', sa.Boolean,
server_default=sa.sql.expression.true()),
sa.Column('bool_wo_default', sa.Boolean),
sa.Column('bar', sa.Numeric(10, 5)),
sa.Column('defaulttest', sa.Integer, server_default='5'),
sa.Column('defaulttest2', sa.String(8), server_default=''),
sa.Column('defaulttest3', sa.String(5), server_default="test"),
sa.Column('defaulttest4', sa.Enum('first', 'second',
name='testenum'),
server_default="first"),
sa.Column('fk_check', sa.String(36), nullable=False),
sa.UniqueConstraint('spam', 'eggs', name='uniq_cons'),
)
BASE = sa_decl.declarative_base(metadata=self.metadata)
class TestModel(BASE):
__tablename__ = 'testtbl'
__table_args__ = (
sa.UniqueConstraint('spam', 'eggs', name='uniq_cons'),
)
id = sa.Column('id', sa.Integer, primary_key=True)
spam = sa.Column('spam', sa.String(10), nullable=False)
eggs = sa.Column('eggs', sa.DateTime)
foo = sa.Column('foo', sa.Boolean,
server_default=sa.sql.expression.true())
fk_check = sa.Column('fk_check', sa.String(36), nullable=False)
bool_wo_default = sa.Column('bool_wo_default', sa.Boolean)
defaulttest = sa.Column('defaulttest',
sa.Integer, server_default='5')
defaulttest2 = sa.Column('defaulttest2', sa.String(8),
server_default='')
defaulttest3 = sa.Column('defaulttest3', sa.String(5),
server_default="test")
defaulttest4 = sa.Column('defaulttest4', sa.Enum('first', 'second',
name='testenum'),
server_default="first")
bar = sa.Column('bar', sa.Numeric(10, 5))
class ModelThatShouldNotBeCompared(BASE):
__tablename__ = 'testtbl2'
id = sa.Column('id', sa.Integer, primary_key=True)
spam = sa.Column('spam', sa.String(10), nullable=False)
def get_metadata(self):
return self.metadata
def get_engine(self):
return self.engine
def db_sync(self, engine):
self.metadata_migrations.create_all(bind=engine)
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table':
return name == 'testtbl'
else:
return True
def _test_models_not_sync(self):
self.metadata_migrations.clear()
sa.Table(
'table', self.metadata_migrations,
sa.Column('fk_check', sa.String(36), nullable=False),
sa.PrimaryKeyConstraint('fk_check'),
mysql_engine='InnoDB'
)
sa.Table(
'testtbl', self.metadata_migrations,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('spam', sa.String(8), nullable=True),
sa.Column('eggs', sa.DateTime),
sa.Column('foo', sa.Boolean,
server_default=sa.sql.expression.false()),
sa.Column('bool_wo_default', sa.Boolean, unique=True),
sa.Column('bar', sa.BigInteger),
sa.Column('defaulttest', sa.Integer, server_default='7'),
sa.Column('defaulttest2', sa.String(8), server_default=''),
sa.Column('defaulttest3', sa.String(5), server_default="fake"),
sa.Column('defaulttest4',
sa.Enum('first', 'second', name='testenum'),
server_default="first"),
sa.Column('fk_check', sa.String(36), nullable=False),
sa.UniqueConstraint('spam', 'foo', name='uniq_cons'),
sa.ForeignKeyConstraint(['fk_check'], ['table.fk_check']),
mysql_engine='InnoDB'
)
msg = six.text_type(self.assertRaises(AssertionError,
self.test_models_sync))
# NOTE(I159): Check mentioning of the table and columns.
# The log is invalid json, so we can't parse it and check it for
# full compliance. We have no guarantee of the log items ordering,
# so we can't use regexp.
self.assertTrue(msg.startswith(
'Models and migration scripts aren\'t in sync:'))
self.assertIn('testtbl', msg)
self.assertIn('spam', msg)
self.assertIn('eggs', msg) # test that the unique constraint is added
self.assertIn('foo', msg)
self.assertIn('bar', msg)
self.assertIn('bool_wo_default', msg)
self.assertIn('defaulttest', msg)
self.assertIn('defaulttest3', msg)
self.assertIn('drop_key', msg)
class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin,
migrate.ModelsMigrationsSync,
test_base.MySQLOpportunisticTestCase):
def test_models_not_sync(self):
self._test_models_not_sync()
class ModelsMigrationsSyncPsql(ModelsMigrationSyncMixin,
migrate.ModelsMigrationsSync,
test_base.PostgreSQLOpportunisticTestCase):
def test_models_not_sync(self):
self._test_models_not_sync()

View File

@ -0,0 +1,146 @@
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslotest import base as oslo_test
from sqlalchemy import Column
from sqlalchemy import Integer, String
from sqlalchemy.ext.declarative import declarative_base
from oslo_db.sqlalchemy import models
from oslo_db.sqlalchemy import test_base
BASE = declarative_base()
class ModelBaseTest(test_base.DbTestCase):
def setUp(self):
super(ModelBaseTest, self).setUp()
self.mb = models.ModelBase()
self.ekm = ExtraKeysModel()
def test_modelbase_has_dict_methods(self):
dict_methods = ('__getitem__',
'__setitem__',
'__contains__',
'get',
'update',
'save',
'iteritems')
for method in dict_methods:
self.assertTrue(hasattr(models.ModelBase, method),
"Method %s() is not found" % method)
def test_modelbase_is_iterable(self):
self.assertTrue(issubclass(models.ModelBase, collections.Iterable))
def test_modelbase_set(self):
self.mb['world'] = 'hello'
self.assertEqual(self.mb['world'], 'hello')
def test_modelbase_update(self):
h = {'a': '1', 'b': '2'}
self.mb.update(h)
for key in h.keys():
self.assertEqual(self.mb[key], h[key])
def test_modelbase_contains(self):
mb = models.ModelBase()
h = {'a': '1', 'b': '2'}
mb.update(h)
for key in h.keys():
# Test 'in' syntax (instead of using .assertIn)
self.assertTrue(key in mb)
self.assertFalse('non-existent-key' in mb)
def test_modelbase_iteritems(self):
h = {'a': '1', 'b': '2'}
expected = {
'id': None,
'smth': None,
'name': 'NAME',
'a': '1',
'b': '2',
}
self.ekm.update(h)
self.assertEqual(dict(self.ekm.iteritems()), expected)
def test_modelbase_iter(self):
expected = {
'id': None,
'smth': None,
'name': 'NAME',
}
i = iter(self.ekm)
found_items = 0
while True:
r = next(i, None)
if r is None:
break
self.assertEqual(expected[r[0]], r[1])
found_items += 1
self.assertEqual(len(expected), found_items)
def test_modelbase_several_iters(self):
mb = ExtraKeysModel()
it1 = iter(mb)
it2 = iter(mb)
self.assertFalse(it1 is it2)
self.assertEqual(dict(it1), dict(mb))
self.assertEqual(dict(it2), dict(mb))
def test_extra_keys_empty(self):
"""Test verifies that by default extra_keys return empty list."""
self.assertEqual(self.mb._extra_keys, [])
def test_extra_keys_defined(self):
"""Property _extra_keys will return list with attributes names."""
self.assertEqual(self.ekm._extra_keys, ['name'])
def test_model_with_extra_keys(self):
data = dict(self.ekm)
self.assertEqual(data, {'smth': None,
'id': None,
'name': 'NAME'})
class ExtraKeysModel(BASE, models.ModelBase):
__tablename__ = 'test_model'
id = Column(Integer, primary_key=True)
smth = Column(String(255))
@property
def name(self):
return 'NAME'
@property
def _extra_keys(self):
return ['name']
class TimestampMixinTest(oslo_test.BaseTestCase):
def test_timestampmixin_attr(self):
methods = ('created_at',
'updated_at')
for method in methods:
self.assertTrue(hasattr(models.TimestampMixin, method),
"Method %s() is not found" % method)

View File

@ -0,0 +1,127 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from oslo.config import fixture as config
from oslo_db import options
from oslo_db.tests import utils as test_utils
class DbApiOptionsTestCase(test_utils.BaseTestCase):
def setUp(self):
super(DbApiOptionsTestCase, self).setUp()
config_fixture = self.useFixture(config.Config())
self.conf = config_fixture.conf
self.conf.register_opts(options.database_opts, group='database')
self.config = config_fixture.config
def test_deprecated_session_parameters(self):
path = self.create_tempfiles([["tmp", b"""[DEFAULT]
sql_connection=x://y.z
sql_min_pool_size=10
sql_max_pool_size=20
sql_max_retries=30
sql_retry_interval=40
sql_max_overflow=50
sql_connection_debug=60
sql_connection_trace=True
"""]])[0]
self.conf(['--config-file', path])
self.assertEqual(self.conf.database.connection, 'x://y.z')
self.assertEqual(self.conf.database.min_pool_size, 10)
self.assertEqual(self.conf.database.max_pool_size, 20)
self.assertEqual(self.conf.database.max_retries, 30)
self.assertEqual(self.conf.database.retry_interval, 40)
self.assertEqual(self.conf.database.max_overflow, 50)
self.assertEqual(self.conf.database.connection_debug, 60)
self.assertEqual(self.conf.database.connection_trace, True)
def test_session_parameters(self):
path = self.create_tempfiles([["tmp", b"""[database]
connection=x://y.z
min_pool_size=10
max_pool_size=20
max_retries=30
retry_interval=40
max_overflow=50
connection_debug=60
connection_trace=True
pool_timeout=7
"""]])[0]
self.conf(['--config-file', path])
self.assertEqual(self.conf.database.connection, 'x://y.z')
self.assertEqual(self.conf.database.min_pool_size, 10)
self.assertEqual(self.conf.database.max_pool_size, 20)
self.assertEqual(self.conf.database.max_retries, 30)
self.assertEqual(self.conf.database.retry_interval, 40)
self.assertEqual(self.conf.database.max_overflow, 50)
self.assertEqual(self.conf.database.connection_debug, 60)
self.assertEqual(self.conf.database.connection_trace, True)
self.assertEqual(self.conf.database.pool_timeout, 7)
def test_dbapi_database_deprecated_parameters(self):
path = self.create_tempfiles([['tmp', b'[DATABASE]\n'
b'sql_connection=fake_connection\n'
b'sql_idle_timeout=100\n'
b'sql_min_pool_size=99\n'
b'sql_max_pool_size=199\n'
b'sql_max_retries=22\n'
b'reconnect_interval=17\n'
b'sqlalchemy_max_overflow=101\n'
b'sqlalchemy_pool_timeout=5\n'
]])[0]
self.conf(['--config-file', path])
self.assertEqual(self.conf.database.connection, 'fake_connection')
self.assertEqual(self.conf.database.idle_timeout, 100)
self.assertEqual(self.conf.database.min_pool_size, 99)
self.assertEqual(self.conf.database.max_pool_size, 199)
self.assertEqual(self.conf.database.max_retries, 22)
self.assertEqual(self.conf.database.retry_interval, 17)
self.assertEqual(self.conf.database.max_overflow, 101)
self.assertEqual(self.conf.database.pool_timeout, 5)
def test_dbapi_database_deprecated_parameters_sql(self):
path = self.create_tempfiles([['tmp', b'[sql]\n'
b'connection=test_sql_connection\n'
b'idle_timeout=99\n'
]])[0]
self.conf(['--config-file', path])
self.assertEqual(self.conf.database.connection, 'test_sql_connection')
self.assertEqual(self.conf.database.idle_timeout, 99)
def test_deprecated_dbapi_parameters(self):
path = self.create_tempfiles([['tmp', b'[DEFAULT]\n'
b'db_backend=test_123\n'
]])[0]
self.conf(['--config-file', path])
self.assertEqual(self.conf.database.backend, 'test_123')
def test_dbapi_parameters(self):
path = self.create_tempfiles([['tmp', b'[database]\n'
b'backend=test_123\n'
]])[0]
self.conf(['--config-file', path])
self.assertEqual(self.conf.database.backend, 'test_123')
def test_set_defaults(self):
conf = cfg.ConfigOpts()
options.set_defaults(conf,
connection='sqlite:///:memory:')
self.assertTrue(len(conf.database.items()) > 1)
self.assertEqual('sqlite:///:memory:', conf.database.connection)

View File

@ -29,11 +29,11 @@ from sqlalchemy.engine import url
from sqlalchemy import Integer, String
from sqlalchemy.ext.declarative import declarative_base
from oslo.db import exception
from oslo.db import options as db_options
from oslo.db.sqlalchemy import models
from oslo.db.sqlalchemy import session
from oslo.db.sqlalchemy import test_base
from oslo_db import exception
from oslo_db import options as db_options
from oslo_db.sqlalchemy import models
from oslo_db.sqlalchemy import session
from oslo_db.sqlalchemy import test_base
BASE = declarative_base()
@ -300,8 +300,8 @@ class EngineFacadeTestCase(oslo_test.BaseTestCase):
self.assertFalse(ses.autocommit)
self.assertTrue(ses.expire_on_commit)
@mock.patch('oslo.db.sqlalchemy.session.get_maker')
@mock.patch('oslo.db.sqlalchemy.session.create_engine')
@mock.patch('oslo_db.sqlalchemy.session.get_maker')
@mock.patch('oslo_db.sqlalchemy.session.create_engine')
def test_creation_from_config(self, create_engine, get_maker):
conf = cfg.ConfigOpts()
conf.register_opts(db_options.database_opts, group='database')
@ -633,17 +633,23 @@ class CreateEngineTest(oslo_test.BaseTestCase):
)
class PatchStacktraceTest(test_base.DbTestCase):
# NOTE(dhellmann): This test no longer works as written. The code in
# oslo_db.sqlalchemy.session filters out lines from modules under
# oslo_db, and now this test is under oslo_db, so the test filename
# does not appear in the context for the error message. LP #1405376
def test_trace(self):
engine = self.engine
session._add_trace_comments(engine)
conn = engine.connect()
with mock.patch.object(engine.dialect, "do_execute") as mock_exec:
# class PatchStacktraceTest(test_base.DbTestCase):
conn.execute("select * from table")
# def test_trace(self):
# engine = self.engine
# session._add_trace_comments(engine)
# conn = engine.connect()
# with mock.patch.object(engine.dialect, "do_execute") as mock_exec:
call = mock_exec.mock_calls[0]
# conn.execute("select * from table")
# we're the caller, see that we're in there
self.assertTrue("tests/sqlalchemy/test_sqlalchemy.py" in call[1][1])
# call = mock_exec.mock_calls[0]
# # we're the caller, see that we're in there
# self.assertIn("oslo_db/tests/sqlalchemy/test_sqlalchemy.py",
# call[1][1])

View File

@ -32,13 +32,13 @@ from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import select
from sqlalchemy.types import UserDefinedType, NullType
from oslo.db import exception
from oslo.db.sqlalchemy import models
from oslo.db.sqlalchemy import provision
from oslo.db.sqlalchemy import session
from oslo.db.sqlalchemy import test_base as db_test_base
from oslo.db.sqlalchemy import utils
from tests import utils as test_utils
from oslo_db import exception
from oslo_db.sqlalchemy import models
from oslo_db.sqlalchemy import provision
from oslo_db.sqlalchemy import session
from oslo_db.sqlalchemy import test_base as db_test_base
from oslo_db.sqlalchemy import utils
from oslo_db.tests import utils as test_utils
SA_VERSION = tuple(map(int, sqlalchemy.__version__.split('.')))
@ -738,8 +738,8 @@ class TestUtils(db_test_base.DbTestCase):
self.assertRaises(ValueError, utils.drop_index, self.engine,
'test_table', 'new_index')
@mock.patch('oslo.db.sqlalchemy.utils.drop_index')
@mock.patch('oslo.db.sqlalchemy.utils.add_index')
@mock.patch('oslo_db.sqlalchemy.utils.drop_index')
@mock.patch('oslo_db.sqlalchemy.utils.add_index')
def test_change_index_columns(self, add_index, drop_index):
utils.change_index_columns(self.engine, 'test_table', 'a_index',
('a',))

177
oslo_db/tests/test_api.py Normal file
View File

@ -0,0 +1,177 @@
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for DB API."""
import mock
from oslo.config import cfg
from oslo.utils import importutils
from oslo_db import api
from oslo_db import exception
from oslo_db.tests import utils as test_utils
sqla = importutils.try_import('sqlalchemy')
if not sqla:
raise ImportError("Unable to import module 'sqlalchemy'.")
def get_backend():
return DBAPI()
class DBAPI(object):
def _api_raise(self, *args, **kwargs):
"""Simulate raising a database-has-gone-away error
This method creates a fake OperationalError with an ID matching
a valid MySQL "database has gone away" situation. It also decrements
the error_counter so that we can artificially keep track of
how many times this function is called by the wrapper. When
error_counter reaches zero, this function returns True, simulating
the database becoming available again and the query succeeding.
"""
if self.error_counter > 0:
self.error_counter -= 1
orig = sqla.exc.DBAPIError(False, False, False)
orig.args = [2006, 'Test raise operational error']
e = exception.DBConnectionError(orig)
raise e
else:
return True
def api_raise_default(self, *args, **kwargs):
return self._api_raise(*args, **kwargs)
@api.safe_for_db_retry
def api_raise_enable_retry(self, *args, **kwargs):
return self._api_raise(*args, **kwargs)
def api_class_call1(_self, *args, **kwargs):
return args, kwargs
class DBAPITestCase(test_utils.BaseTestCase):
def test_dbapi_full_path_module_method(self):
dbapi = api.DBAPI('oslo_db.tests.test_api')
result = dbapi.api_class_call1(1, 2, kwarg1='meow')
expected = ((1, 2), {'kwarg1': 'meow'})
self.assertEqual(expected, result)
def test_dbapi_unknown_invalid_backend(self):
self.assertRaises(ImportError, api.DBAPI, 'tests.unit.db.not_existent')
def test_dbapi_lazy_loading(self):
dbapi = api.DBAPI('oslo_db.tests.test_api', lazy=True)
self.assertIsNone(dbapi._backend)
dbapi.api_class_call1(1, 'abc')
self.assertIsNotNone(dbapi._backend)
def test_dbapi_from_config(self):
conf = cfg.ConfigOpts()
dbapi = api.DBAPI.from_config(conf,
backend_mapping={'sqlalchemy': __name__})
self.assertIsNotNone(dbapi._backend)
class DBReconnectTestCase(DBAPITestCase):
def setUp(self):
super(DBReconnectTestCase, self).setUp()
self.test_db_api = DBAPI()
patcher = mock.patch(__name__ + '.get_backend',
return_value=self.test_db_api)
patcher.start()
self.addCleanup(patcher.stop)
def test_raise_connection_error(self):
self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__})
self.test_db_api.error_counter = 5
self.assertRaises(exception.DBConnectionError, self.dbapi._api_raise)
def test_raise_connection_error_decorated(self):
self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__})
self.test_db_api.error_counter = 5
self.assertRaises(exception.DBConnectionError,
self.dbapi.api_raise_enable_retry)
self.assertEqual(4, self.test_db_api.error_counter, 'Unexpected retry')
def test_raise_connection_error_enabled(self):
self.dbapi = api.DBAPI('sqlalchemy',
{'sqlalchemy': __name__},
use_db_reconnect=True)
self.test_db_api.error_counter = 5
self.assertRaises(exception.DBConnectionError,
self.dbapi.api_raise_default)
self.assertEqual(4, self.test_db_api.error_counter, 'Unexpected retry')
def test_retry_one(self):
self.dbapi = api.DBAPI('sqlalchemy',
{'sqlalchemy': __name__},
use_db_reconnect=True,
retry_interval=1)
try:
func = self.dbapi.api_raise_enable_retry
self.test_db_api.error_counter = 1
self.assertTrue(func(), 'Single retry did not succeed.')
except Exception:
self.fail('Single retry raised an un-wrapped error.')
self.assertEqual(
0, self.test_db_api.error_counter,
'Counter not decremented, retry logic probably failed.')
def test_retry_two(self):
self.dbapi = api.DBAPI('sqlalchemy',
{'sqlalchemy': __name__},
use_db_reconnect=True,
retry_interval=1,
inc_retry_interval=False)
try:
func = self.dbapi.api_raise_enable_retry
self.test_db_api.error_counter = 2
self.assertTrue(func(), 'Multiple retry did not succeed.')
except Exception:
self.fail('Multiple retry raised an un-wrapped error.')
self.assertEqual(
0, self.test_db_api.error_counter,
'Counter not decremented, retry logic probably failed.')
def test_retry_until_failure(self):
self.dbapi = api.DBAPI('sqlalchemy',
{'sqlalchemy': __name__},
use_db_reconnect=True,
retry_interval=1,
inc_retry_interval=False,
max_retries=3)
func = self.dbapi.api_raise_enable_retry
self.test_db_api.error_counter = 5
self.assertRaises(
exception.DBError, func,
'Retry of permanent failure did not throw DBError exception.')
self.assertNotEqual(
0, self.test_db_api.error_counter,
'Retry did not stop after sql_max_retries iterations.')

View File

@ -0,0 +1,108 @@
# Copyright 2014 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from oslo_db import concurrency
from oslo_db.tests import utils as test_utils
FAKE_BACKEND_MAPPING = {'sqlalchemy': 'fake.db.sqlalchemy.api'}
class TpoolDbapiWrapperTestCase(test_utils.BaseTestCase):
def setUp(self):
super(TpoolDbapiWrapperTestCase, self).setUp()
self.db_api = concurrency.TpoolDbapiWrapper(
conf=self.conf, backend_mapping=FAKE_BACKEND_MAPPING)
# NOTE(akurilin): We are not going to add `eventlet` to `oslo_db` in
# requirements (`requirements.txt` and `test-requirements.txt`) due to
# the following reasons:
# - supporting of eventlet's thread pooling is totally optional;
# - we don't need to test `tpool.Proxy` functionality itself,
# because it's a tool from the third party library;
# - `eventlet` would prevent us from running unit tests on Python 3.x
# versions, because it doesn't support them yet.
#
# As we don't test `tpool.Proxy`, we can safely mock it in tests.
self.proxy = mock.MagicMock()
self.eventlet = mock.MagicMock()
self.eventlet.tpool.Proxy.return_value = self.proxy
sys.modules['eventlet'] = self.eventlet
self.addCleanup(sys.modules.pop, 'eventlet', None)
@mock.patch('oslo_db.api.DBAPI')
def test_db_api_common(self, mock_db_api):
# test context:
# CONF.database.use_tpool == False
# eventlet is installed
# expected result:
# TpoolDbapiWrapper should wrap DBAPI
fake_db_api = mock.MagicMock()
mock_db_api.from_config.return_value = fake_db_api
# get access to some db-api method
self.db_api.fake_call_1
mock_db_api.from_config.assert_called_once_with(
conf=self.conf, backend_mapping=FAKE_BACKEND_MAPPING)
self.assertEqual(self.db_api._db_api, fake_db_api)
self.assertFalse(self.eventlet.tpool.Proxy.called)
# get access to other db-api method to be sure that api didn't changed
self.db_api.fake_call_2
self.assertEqual(self.db_api._db_api, fake_db_api)
self.assertFalse(self.eventlet.tpool.Proxy.called)
self.assertEqual(1, mock_db_api.from_config.call_count)
@mock.patch('oslo_db.api.DBAPI')
def test_db_api_config_change(self, mock_db_api):
# test context:
# CONF.database.use_tpool == True
# eventlet is installed
# expected result:
# TpoolDbapiWrapper should wrap tpool proxy
fake_db_api = mock.MagicMock()
mock_db_api.from_config.return_value = fake_db_api
self.conf.set_override('use_tpool', True, group='database')
# get access to some db-api method
self.db_api.fake_call
# CONF.database.use_tpool is True, so we get tpool proxy in this case
mock_db_api.from_config.assert_called_once_with(
conf=self.conf, backend_mapping=FAKE_BACKEND_MAPPING)
self.eventlet.tpool.Proxy.assert_called_once_with(fake_db_api)
self.assertEqual(self.db_api._db_api, self.proxy)
@mock.patch('oslo_db.api.DBAPI')
def test_db_api_without_installed_eventlet(self, mock_db_api):
# test context:
# CONF.database.use_tpool == True
# eventlet is not installed
# expected result:
# raise ImportError
self.conf.set_override('use_tpool', True, group='database')
sys.modules['eventlet'] = None
self.assertRaises(ImportError, getattr, self.db_api, 'fake')

40
oslo_db/tests/utils.py Normal file
View File

@ -0,0 +1,40 @@
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo.config import cfg
from oslotest import base as test_base
from oslotest import moxstubout
import six
if six.PY3:
@contextlib.contextmanager
def nested(*contexts):
with contextlib.ExitStack() as stack:
yield [stack.enter_context(c) for c in contexts]
else:
nested = contextlib.nested
class BaseTestCase(test_base.BaseTestCase):
def setUp(self, conf=cfg.CONF):
super(BaseTestCase, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.mox = moxfixture.mox
self.stubs = moxfixture.stubs
self.conf = conf
self.addCleanup(self.conf.reset)

View File

@ -21,17 +21,18 @@ classifier =
packages =
oslo
oslo.db
oslo_db
namespace_packages =
oslo
[entry_points]
oslo.config.opts =
oslo.db = oslo.db.options:list_opts
oslo.db.concurrency = oslo.db.concurrency:list_opts
oslo.db = oslo_db.options:list_opts
oslo.db.concurrency = oslo_db.concurrency:list_opts
oslo.db.migration =
alembic = oslo.db.sqlalchemy.migration_cli.ext_alembic:AlembicExtension
migrate = oslo.db.sqlalchemy.migration_cli.ext_migrate:MigrateExtension
alembic = oslo_db.sqlalchemy.migration_cli.ext_alembic:AlembicExtension
migrate = oslo_db.sqlalchemy.migration_cli.ext_migrate:MigrateExtension
[build_sphinx]
source-dir = doc/source

View File

@ -36,6 +36,11 @@ tox_envbin=$project_dir/.tox/$venv/bin
our_name=$(python setup.py --name)
# Build the egg-info, including the source file list,
# so we install all of the files, even if the package
# list or name has changed.
python setup.py egg_info
# Replace the pip-installed package with the version in our source
# tree. Look to see if we are already installed before trying to
# uninstall ourselves, to avoid failures from packages that do not use us

View File

@ -59,4 +59,4 @@ exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build
[hacking]
import_exceptions =
oslo.db._i18n
oslo_db._i18n