Remove duplicate code from nova.db.sqlalchemy.utils

There is a lot of code in nova.db.sqlalchemy.utils which is also in
oslo-incubator's openstack.common.db.sqlalchemy.utils module so this
patch goes through and cleans up Nova.

Notable differences:

1. modify_indexes was nova-only but no longer used after the migration
   compaction work, so it's just removed here. It also didn't really
   work for mysql in cases where the index name was too long so it
   could be table-specific and therefore unusable in some mysql cases.
   See commit 029ebab for history.
2. create_shadow_table was not in oslo-incubator and it's also not
   currently used in nova after the migration compaction, however,
   I leave it here in case future migrations in Nova need to use it.
   This patch does replace the usage of _get_not_supported_column in
   create_shadow_table to use the same method from oslo's DB utils.
3. DeleteFromSelect is still used within nova.db.api per
   commit b36826e so we could move it to oslo-incubator but this
   patch leaves it for now since it's only used in Nova.
4. InsertFromSelect was introduced with commit 2e403b2 but now there
   is a copy in oslo-incubator so we can remove our usage in nova.
   However, oslo doesn't have the unit test that nova does, so the
   unit test has been moved to oslo with change I457acf33.
5. Oslo has is_backend_avail and get_connect_string methods so use
   those in test_migrations.py while doing this update.

Closes-Bug: #1280055

Change-Id: Iefa5b4311f1fe1a5da31cf527521c393f2face7c
This commit is contained in:
Matt Riedemann 2014-03-07 06:29:38 -08:00
parent 82b07d0392
commit 1a8dd2487d
7 changed files with 82 additions and 1075 deletions

View File

@ -5609,7 +5609,8 @@ def archive_deleted_rows_for_table(context, tablename, max_rows):
table.c.deleted != default_deleted_value).\
order_by(column).limit(max_rows)
insert_statement = db_utils.InsertFromSelect(shadow_table, query_insert)
insert_statement = sqlalchemyutils.InsertFromSelect(
shadow_table, query_insert)
delete_statement = db_utils.DeleteFromSelect(table, query_delete, column)
try:
# Group the insert and delete in a transaction.

View File

@ -15,7 +15,7 @@
from sqlalchemy import Column, String, Text
from nova.db.sqlalchemy import api
from nova.db.sqlalchemy import utils
from nova.openstack.common.db.sqlalchemy import utils
def upgrade(migrate_engine):

View File

@ -13,62 +13,24 @@
# License for the specific language governing permissions and limitations
# under the License.
import re
from migrate.changeset import UniqueConstraint, ForeignKeyConstraint
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.exc import OperationalError
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import schema
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from nova.db.sqlalchemy import api as db
from nova import exception
from nova.openstack.common.db.sqlalchemy import utils as oslodbutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class DeleteFromSelect(UpdateBase):
def __init__(self, table, select, column):
self.table = table
@ -87,173 +49,6 @@ def visit_delete_from_select(element, compiler, **kw):
compiler.process(element.select))
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except Exception:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise exception.NovaException(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise exception.NovaException(msg % column_name)
return column
def _get_unique_constraints_in_sqlite(migrate_engine, table_name):
regexp = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
sql_data = migrate_engine.execute(
"""
SELECT sql
FROM
sqlite_master
WHERE
type = 'table' AND
name = :table_name;
""",
table_name=table_name
).fetchone()[0]
uniques = set([
schema.UniqueConstraint(
*[getattr(table.c, c.strip(' "'))
for c in cols.split(",")], name=name
)
for name, cols in re.findall(regexp, sql_data)
])
return uniques
def _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
if isinstance(column.type, NullType):
new_column = _get_not_supported_column(col_name_col_instance,
column.name)
columns.append(new_column)
else:
columns.append(column.copy())
uniques = _get_unique_constraints_in_sqlite(migrate_engine, table_name)
table.constraints.update(uniques)
constraints = [constraint for constraint in table.constraints
if not constraint.name == uc_name and
not isinstance(constraint, schema.ForeignKeyConstraint)]
new_table = Table(table_name + "__tmp__", meta, *(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"],
*column_names,
unique=index["unique"]))
f_keys = []
for fk in insp.get_foreign_keys(table_name):
refcolumns = [fk['referred_table'] + '.' + col
for col in fk['referred_columns']]
f_keys.append(ForeignKeyConstraint(fk['constrained_columns'],
refcolumns, table=new_table, name=fk['name']))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
for fkey in f_keys:
fkey.create()
new_table.rename(table_name)
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""This method drops UC from table and works for mysql, postgresql and
sqlite. In mysql and postgresql we are able to use "alter table"
construction. In sqlite is only one way to drop UC:
1) Create new table with same columns, indexes and constraints
(except one that we want to drop).
2) Copy data from old table to new.
3) Drop old table.
4) Rename new table to the name of old table.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
if migrate_engine.name == "sqlite":
_drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
**col_name_col_instance)
else:
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""This method is used to drop all old rows that have the same values for
columns in uc_columns.
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(list(columns_for_group_by))
duplicated_rows_select = select(columns_for_select,
group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = select([table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_("Deleted duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def check_shadow_table(migrate_engine, table_name):
"""This method checks that table with ``table_name`` and
corresponding shadow table have same columns.
@ -319,8 +114,8 @@ def create_shadow_table(migrate_engine, table_name=None, table=None,
columns = []
for column in table.columns:
if isinstance(column.type, NullType):
new_column = _get_not_supported_column(col_name_col_instance,
column.name)
new_column = oslodbutils._get_not_supported_column(
col_name_col_instance, column.name)
columns.append(new_column)
else:
columns.append(column.copy())
@ -338,272 +133,3 @@ def create_shadow_table(migrate_engine, table_name=None, table=None,
except Exception:
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise exception.NovaException(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict([(index['name'], index['column_names'])
for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(migrate_engine,
table_name,
**col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = MetaData(bind=migrate_engine)
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(migrate_engine,
table_name,
**col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
table.update().\
where(table.c.deleted == True).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
# NOTE(I159): when the type of column `deleted` is changed from boolean
# to int, the corresponding CHECK constraint is dropped too. But
# starting from SQLAlchemy version 0.8.3, those CHECK constraints
# aren't dropped anymore. So despite the fact that column deleted is
# of type int now, we still restrict its values to be either 0 or 1.
constraint_markers = (
"deleted in (0, 1)",
"deleted IN (:deleted_1, :deleted_2)",
"deleted IN (:param_1, :param_2)"
)
return any(sqltext.endswith(marker) for marker in constraint_markers)
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == True).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
new_table.update().\
where(new_table.c.deleted == False).\
values(deleted=default_deleted_value).\
execute()
def _index_exists(migrate_engine, table_name, index_name):
inspector = reflection.Inspector.from_engine(migrate_engine)
indexes = inspector.get_indexes(table_name)
index_names = [index['name'] for index in indexes]
return index_name in index_names
def _add_index(migrate_engine, table, index_name, idx_columns):
index = Index(
index_name, *[getattr(table.c, col) for col in idx_columns]
)
index.create()
def _drop_index(migrate_engine, table, index_name, idx_columns):
if _index_exists(migrate_engine, table.name, index_name):
index = Index(
index_name, *[getattr(table.c, col) for col in idx_columns]
)
index.drop()
def _change_index_columns(migrate_engine, table, index_name,
new_columns, old_columns):
_drop_index(migrate_engine, table, index_name, old_columns)
_add_index(migrate_engine, table, index_name, new_columns)
def modify_indexes(migrate_engine, data, upgrade=True):
if migrate_engine.name == 'sqlite':
return
meta = MetaData()
meta.bind = migrate_engine
for table_name, indexes in data.iteritems():
table = Table(table_name, meta, autoload=True)
for index_name, old_columns, new_columns in indexes:
if not upgrade:
new_columns, old_columns = old_columns, new_columns
if migrate_engine.name == 'postgresql':
if upgrade:
_add_index(migrate_engine, table, index_name, new_columns)
else:
_drop_index(migrate_engine, table, index_name, old_columns)
elif migrate_engine.name == 'mysql':
_change_index_columns(migrate_engine, table, index_name,
new_columns, old_columns)
else:
raise ValueError('Unsupported DB %s' % migrate_engine.name)

View File

@ -45,6 +45,7 @@ from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.db.sqlalchemy import utils as sqlalchemyutils
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
@ -6314,22 +6315,24 @@ class ArchiveTestCase(test.TestCase):
self.context = context.get_admin_context()
self.engine = get_engine()
self.conn = self.engine.connect()
self.instance_id_mappings = db_utils.get_table(self.engine,
"instance_id_mappings")
self.shadow_instance_id_mappings = db_utils.get_table(self.engine,
"shadow_instance_id_mappings")
self.dns_domains = db_utils.get_table(self.engine, "dns_domains")
self.shadow_dns_domains = db_utils.get_table(self.engine,
"shadow_dns_domains")
self.consoles = db_utils.get_table(self.engine, "consoles")
self.console_pools = db_utils.get_table(self.engine, "console_pools")
self.shadow_consoles = db_utils.get_table(self.engine,
"shadow_consoles")
self.shadow_console_pools = db_utils.get_table(self.engine,
"shadow_console_pools")
self.instances = db_utils.get_table(self.engine, "instances")
self.shadow_instances = db_utils.get_table(self.engine,
"shadow_instances")
self.instance_id_mappings = sqlalchemyutils.get_table(
self.engine, "instance_id_mappings")
self.shadow_instance_id_mappings = sqlalchemyutils.get_table(
self.engine, "shadow_instance_id_mappings")
self.dns_domains = sqlalchemyutils.get_table(
self.engine, "dns_domains")
self.shadow_dns_domains = sqlalchemyutils.get_table(
self.engine, "shadow_dns_domains")
self.consoles = sqlalchemyutils.get_table(self.engine, "consoles")
self.console_pools = sqlalchemyutils.get_table(
self.engine, "console_pools")
self.shadow_consoles = sqlalchemyutils.get_table(
self.engine, "shadow_consoles")
self.shadow_console_pools = sqlalchemyutils.get_table(
self.engine, "shadow_console_pools")
self.instances = sqlalchemyutils.get_table(self.engine, "instances")
self.shadow_instances = sqlalchemyutils.get_table(
self.engine, "shadow_instances")
self.uuidstrs = []
for unused in range(6):
self.uuidstrs.append(stdlib_uuid.uuid4().hex)
@ -6343,17 +6346,17 @@ class ArchiveTestCase(test.TestCase):
super(ArchiveTestCase, self).tearDown()
for tablename in self.id_tablenames_to_cleanup:
for name in [tablename, "shadow_" + tablename]:
table = db_utils.get_table(self.engine, name)
table = sqlalchemyutils.get_table(self.engine, name)
del_statement = table.delete(table.c.id.in_(self.ids))
self.conn.execute(del_statement)
for tablename in self.uuid_tablenames_to_cleanup:
for name in [tablename, "shadow_" + tablename]:
table = db_utils.get_table(self.engine, name)
table = sqlalchemyutils.get_table(self.engine, name)
del_statement = table.delete(table.c.uuid.in_(self.uuidstrs))
self.conn.execute(del_statement)
for tablename in self.domain_tablenames_to_cleanup:
for name in [tablename, "shadow_" + tablename]:
table = db_utils.get_table(self.engine, name)
table = sqlalchemyutils.get_table(self.engine, name)
del_statement = table.delete(table.c.domain.in_(self.uuidstrs))
self.conn.execute(del_statement)
@ -6432,11 +6435,12 @@ class ArchiveTestCase(test.TestCase):
def _test_archive_deleted_rows_for_one_uuid_table(self, tablename):
""":returns: 0 on success, 1 if no uuid column, 2 if insert failed."""
main_table = db_utils.get_table(self.engine, tablename)
main_table = sqlalchemyutils.get_table(self.engine, tablename)
if not hasattr(main_table.c, "uuid"):
# Not a uuid table, so skip it.
return 1
shadow_table = db_utils.get_table(self.engine, "shadow_" + tablename)
shadow_table = sqlalchemyutils.get_table(
self.engine, "shadow_" + tablename)
# Add 6 rows to table
for uuidstr in self.uuidstrs:
ins_stmt = main_table.insert().values(uuid=uuidstr)

View File

@ -14,22 +14,18 @@
# under the License.
import uuid
import warnings
from migrate.changeset import UniqueConstraint
import sqlalchemy
from sqlalchemy.dialects import mysql
from sqlalchemy import Boolean, Index, Integer, DateTime, String
from sqlalchemy import MetaData, Table, Column, ForeignKey
from sqlalchemy.engine import reflection
from sqlalchemy import Integer, String
from sqlalchemy import MetaData, Table, Column
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.exc import SAWarning
from sqlalchemy.sql import select
from sqlalchemy.types import UserDefinedType, NullType
from sqlalchemy.types import UserDefinedType
from nova.db.sqlalchemy import api as db
from nova.db.sqlalchemy import utils
from nova import exception
from nova.openstack.common.db.sqlalchemy import utils as oslodbutils
from nova.tests.db import test_migrations
@ -82,256 +78,6 @@ class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
test_table.drop()
def test_insert_from_select(self):
insert_table_name = "__test_insert_to_table__"
select_table_name = "__test_select_from_table__"
uuidstrs = []
for unused in range(10):
uuidstrs.append(uuid.uuid4().hex)
for key, engine in self.engines.items():
meta = MetaData()
meta.bind = engine
conn = engine.connect()
insert_table = Table(insert_table_name, meta,
Column('id', Integer, primary_key=True,
nullable=False, autoincrement=True),
Column('uuid', String(36), nullable=False))
select_table = Table(select_table_name, meta,
Column('id', Integer, primary_key=True,
nullable=False, autoincrement=True),
Column('uuid', String(36), nullable=False))
insert_table.create()
select_table.create()
# Add 10 rows to select_table
for uuidstr in uuidstrs:
ins_stmt = select_table.insert().values(uuid=uuidstr)
conn.execute(ins_stmt)
# Select 4 rows in one chunk from select_table
column = select_table.c.id
query_insert = select([select_table],
select_table.c.id < 5).order_by(column)
insert_statement = utils.InsertFromSelect(insert_table,
query_insert)
result_insert = conn.execute(insert_statement)
# Verify we insert 4 rows
self.assertEqual(result_insert.rowcount, 4)
query_all = select([insert_table]).\
where(insert_table.c.uuid.in_(uuidstrs))
rows = conn.execute(query_all).fetchall()
# Verify we really have 4 rows in insert_table
self.assertEqual(len(rows), 4)
insert_table.drop()
select_table.drop()
def test_utils_drop_unique_constraint(self):
table_name = "test_utils_drop_unique_constraint"
uc_name = 'uniq_foo'
values = [
{'id': 1, 'a': 3, 'foo': 10},
{'id': 2, 'a': 2, 'foo': 20},
{'id': 3, 'a': 1, 'foo': 30}
]
for key, engine in self.engines.items():
meta = MetaData()
meta.bind = engine
test_table = Table(table_name, meta,
Column('id', Integer, primary_key=True,
nullable=False),
Column('a', Integer),
Column('foo', Integer),
UniqueConstraint('a', name='uniq_a'),
UniqueConstraint('foo', name=uc_name))
test_table.create()
engine.execute(test_table.insert(), values)
# NOTE(boris-42): This method is generic UC dropper.
utils.drop_unique_constraint(engine, table_name, uc_name, 'foo')
s = test_table.select().order_by(test_table.c.id)
rows = engine.execute(s).fetchall()
for i in xrange(0, len(values)):
v = values[i]
self.assertEqual((v['id'], v['a'], v['foo']), rows[i])
# NOTE(boris-42): Update data about Table from DB.
meta = MetaData()
meta.bind = engine
test_table = Table(table_name, meta, autoload=True)
constraints = filter(lambda c: c.name == uc_name,
test_table.constraints)
self.assertEqual(len(constraints), 0)
self.assertEqual(len(test_table.constraints), 1)
test_table.drop()
def test_util_drop_unique_constraint_with_not_supported_sqlite_type(self):
if 'sqlite' not in self.engines:
self.skipTest('sqlite is not configured')
engine = self.engines['sqlite']
meta = MetaData(bind=engine)
table_name = ("test_util_drop_unique_constraint_with_not_supported"
"_sqlite_type")
uc_name = 'uniq_foo'
values = [
{'id': 1, 'a': 3, 'foo': 10},
{'id': 2, 'a': 2, 'foo': 20},
{'id': 3, 'a': 1, 'foo': 30}
]
test_table = Table(table_name, meta,
Column('id', Integer, primary_key=True,
nullable=False),
Column('a', Integer),
Column('foo', CustomType, default=0),
UniqueConstraint('a', name='uniq_a'),
UniqueConstraint('foo', name=uc_name))
test_table.create()
engine.execute(test_table.insert(), values)
warnings.simplefilter("ignore", SAWarning)
# reflection of custom types has been fixed upstream
if SA_VERSION < (0, 9, 0):
# NOTE(boris-42): Missing info about column `foo` that has
# unsupported type CustomType.
self.assertRaises(exception.NovaException,
utils.drop_unique_constraint,
engine, table_name, uc_name, 'foo')
# NOTE(boris-42): Wrong type of foo instance. it should be
# instance of sqlalchemy.Column.
self.assertRaises(exception.NovaException,
utils.drop_unique_constraint,
engine, table_name, uc_name, 'foo',
foo=Integer())
foo = Column('foo', CustomType, default=0)
utils.drop_unique_constraint(engine, table_name, uc_name, 'foo',
foo=foo)
s = test_table.select().order_by(test_table.c.id)
rows = engine.execute(s).fetchall()
for i in xrange(0, len(values)):
v = values[i]
self.assertEqual((v['id'], v['a'], v['foo']), rows[i])
# NOTE(boris-42): Update data about Table from DB.
meta = MetaData(bind=engine)
test_table = Table(table_name, meta, autoload=True)
constraints = filter(lambda c: c.name == uc_name,
test_table.constraints)
self.assertEqual(len(constraints), 0)
self.assertEqual(len(test_table.constraints), 1)
test_table.drop()
def _populate_db_for_drop_duplicate_entries(self, engine, meta,
table_name):
values = [
{'id': 11, 'a': 3, 'b': 10, 'c': 'abcdef'},
{'id': 12, 'a': 5, 'b': 10, 'c': 'abcdef'},
{'id': 13, 'a': 6, 'b': 10, 'c': 'abcdef'},
{'id': 14, 'a': 7, 'b': 10, 'c': 'abcdef'},
{'id': 21, 'a': 1, 'b': 20, 'c': 'aa'},
{'id': 31, 'a': 1, 'b': 20, 'c': 'bb'},
{'id': 41, 'a': 1, 'b': 30, 'c': 'aef'},
{'id': 42, 'a': 2, 'b': 30, 'c': 'aef'},
{'id': 43, 'a': 3, 'b': 30, 'c': 'aef'}
]
test_table = Table(table_name, meta,
Column('id', Integer, primary_key=True,
nullable=False),
Column('a', Integer),
Column('b', Integer),
Column('c', String(255)),
Column('deleted', Integer, default=0),
Column('deleted_at', DateTime),
Column('updated_at', DateTime))
test_table.create()
engine.execute(test_table.insert(), values)
return test_table, values
def test_drop_old_duplicate_entries_from_table(self):
table_name = "test_drop_old_duplicate_entries_from_table"
for key, engine in self.engines.items():
meta = MetaData()
meta.bind = engine
test_table, values = self.\
_populate_db_for_drop_duplicate_entries(engine, meta,
table_name)
utils.drop_old_duplicate_entries_from_table(engine, table_name,
False, 'b', 'c')
uniq_values = set()
expected_ids = []
for value in sorted(values, key=lambda x: x['id'], reverse=True):
uniq_value = (('b', value['b']), ('c', value['c']))
if uniq_value in uniq_values:
continue
uniq_values.add(uniq_value)
expected_ids.append(value['id'])
real_ids = [row[0] for row in
engine.execute(select([test_table.c.id])).fetchall()]
self.assertEqual(len(real_ids), len(expected_ids))
for id_ in expected_ids:
self.assertIn(id_, real_ids)
test_table.drop()
def test_drop_old_duplicate_entries_from_table_soft_delete(self):
table_name = "test_drop_old_duplicate_entries_from_table_soft_delete"
for key, engine in self.engines.items():
meta = MetaData()
meta.bind = engine
table, values = self.\
_populate_db_for_drop_duplicate_entries(engine, meta,
table_name)
utils.drop_old_duplicate_entries_from_table(engine, table_name,
True, 'b', 'c')
uniq_values = set()
expected_values = []
soft_deleted_values = []
for value in sorted(values, key=lambda x: x['id'], reverse=True):
uniq_value = (('b', value['b']), ('c', value['c']))
if uniq_value in uniq_values:
soft_deleted_values.append(value)
continue
uniq_values.add(uniq_value)
expected_values.append(value)
base_select = table.select()
rows_select = base_select.\
where(table.c.deleted != table.c.id)
row_ids = [row['id'] for row in
engine.execute(rows_select).fetchall()]
self.assertEqual(len(row_ids), len(expected_values))
for value in expected_values:
self.assertIn(value['id'], row_ids)
deleted_rows_select = base_select.\
where(table.c.deleted == table.c.id)
deleted_rows_ids = [row['id'] for row in
engine.execute(deleted_rows_select).fetchall()]
self.assertEqual(len(deleted_rows_ids),
len(values) - len(row_ids))
for value in soft_deleted_values:
self.assertIn(value['id'], deleted_rows_ids)
table.drop()
def test_check_shadow_table(self):
table_name = 'test_check_shadow_table'
for key, engine in self.engines.items():
@ -458,7 +204,7 @@ class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
# reflection of custom types has been fixed upstream
if SA_VERSION < (0, 9, 0):
self.assertRaises(exception.NovaException,
self.assertRaises(oslodbutils.ColumnError,
utils.create_shadow_table,
engine, table_name=table_name)
@ -508,178 +254,3 @@ class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
engine, table_name=table_name)
table.drop()
shadow_table.drop()
def test_change_deleted_column_type_doesnt_drop_index(self):
table_name = 'test_change_deleted_column_type_doesnt_drop_index'
for key, engine in self.engines.items():
meta = MetaData(bind=engine)
indexes = {
'idx_a_deleted': ['a', 'deleted'],
'idx_b_deleted': ['b', 'deleted'],
'idx_a': ['a']
}
index_instances = [Index(name, *columns)
for name, columns in indexes.iteritems()]
table = Table(table_name, meta,
Column('id', Integer, primary_key=True),
Column('a', String(255)),
Column('b', String(255)),
Column('deleted', Boolean),
*index_instances)
table.create()
utils.change_deleted_column_type_to_id_type(engine, table_name)
utils.change_deleted_column_type_to_boolean(engine, table_name)
insp = reflection.Inspector.from_engine(engine)
real_indexes = insp.get_indexes(table_name)
self.assertEqual(len(real_indexes), 3)
for index in real_indexes:
name = index['name']
self.assertIn(name, indexes)
self.assertEqual(set(index['column_names']),
set(indexes[name]))
table.drop()
def test_change_deleted_column_type_to_id_type_integer(self):
table_name = 'test_change_deleted_column_type_to_id_type_integer'
for key, engine in self.engines.items():
meta = MetaData()
meta.bind = engine
table = Table(table_name, meta,
Column('id', Integer, primary_key=True),
Column('deleted', Boolean))
table.create()
utils.change_deleted_column_type_to_id_type(engine, table_name)
table = utils.get_table(engine, table_name)
self.assertIsInstance(table.c.deleted.type, Integer)
table.drop()
def test_change_deleted_column_type_to_id_type_string(self):
table_name = 'test_change_deleted_column_type_to_id_type_string'
for key, engine in self.engines.items():
meta = MetaData()
meta.bind = engine
table = Table(table_name, meta,
Column('id', String(255), primary_key=True),
Column('deleted', Boolean))
table.create()
utils.change_deleted_column_type_to_id_type(engine, table_name)
table = utils.get_table(engine, table_name)
self.assertIsInstance(table.c.deleted.type, String)
table.drop()
def test_change_deleted_column_type_to_id_type_custom(self):
if 'sqlite' in self.engines:
table_name = 'test_change_deleted_column_type_to_id_type_custom'
engine = self.engines['sqlite']
meta = MetaData()
meta.bind = engine
table = Table(table_name, meta,
Column('id', Integer, primary_key=True),
Column('foo', CustomType),
Column('deleted', Boolean))
table.create()
# reflection of custom types has been fixed upstream
if SA_VERSION < (0, 9, 0):
self.assertRaises(exception.NovaException,
utils.change_deleted_column_type_to_id_type,
engine, table_name)
fooColumn = Column('foo', CustomType())
utils.change_deleted_column_type_to_id_type(engine, table_name,
foo=fooColumn)
table = utils.get_table(engine, table_name)
# NOTE(boris-42): There is no way to check has foo type CustomType.
# but sqlalchemy will set it to NullType. This has
# been fixed upstream in recent SA versions
if SA_VERSION < (0, 9, 0):
self.assertIsInstance(table.c.foo.type, NullType)
self.assertIsInstance(table.c.deleted.type, Integer)
table.drop()
def test_change_deleted_column_type_to_boolean(self):
table_name = 'test_change_deleted_column_type_to_boolean'
for key, engine in self.engines.items():
meta = MetaData()
meta.bind = engine
table = Table(table_name, meta,
Column('id', Integer, primary_key=True),
Column('deleted', Integer))
table.create()
utils.change_deleted_column_type_to_boolean(engine, table_name)
table = utils.get_table(engine, table_name)
expected_type = Boolean if key != "mysql" else mysql.TINYINT
self.assertIsInstance(table.c.deleted.type, expected_type)
table.drop()
def test_change_deleted_column_type_to_boolean_type_custom(self):
if 'sqlite' in self.engines:
table_name = \
'test_change_deleted_column_type_to_boolean_type_custom'
engine = self.engines['sqlite']
meta = MetaData()
meta.bind = engine
table = Table(table_name, meta,
Column('id', Integer, primary_key=True),
Column('foo', CustomType),
Column('deleted', Integer))
table.create()
# reflection of custom types has been fixed upstream
if SA_VERSION < (0, 9, 0):
self.assertRaises(exception.NovaException,
utils.change_deleted_column_type_to_boolean,
engine, table_name)
fooColumn = Column('foo', CustomType())
utils.change_deleted_column_type_to_boolean(engine, table_name,
foo=fooColumn)
table = utils.get_table(engine, table_name)
# NOTE(boris-42): There is no way to check has foo type CustomType.
# but sqlalchemy will set it to NullType. This has
# been fixed upstream in recent SA versions.
if SA_VERSION < (0, 9, 0):
self.assertIsInstance(table.c.foo.type, NullType)
self.assertIsInstance(table.c.deleted.type, Boolean)
table.drop()
def test_drop_unique_constraint_in_sqlite_fk_recreate(self):
if 'sqlite' in self.engines:
engine = self.engines['sqlite']
meta = MetaData()
meta.bind = engine
parent_table_name = ('test_drop_unique_constraint_in_sqlite_fk_'
'recreate_parent_table')
parent_table = Table(parent_table_name, meta,
Column('id', Integer, primary_key=True),
Column('foo', Integer))
parent_table.create()
table_name = 'test_drop_unique_constraint_in_sqlite_fk_recreate'
table = Table(table_name, meta,
Column('id', Integer, primary_key=True),
Column('baz', Integer),
Column('bar', Integer,
ForeignKey(parent_table_name + ".id")),
UniqueConstraint('baz', name='constr1'))
table.create()
utils.drop_unique_constraint(engine, table_name, 'constr1', 'baz')
insp = reflection.Inspector.from_engine(engine)
f_keys = insp.get_foreign_keys(table_name)
self.assertEqual(len(f_keys), 1)
f_key = f_keys[0]
self.assertEqual(f_key['referred_table'], parent_table_name)
self.assertEqual(f_key['referred_columns'], ['id'])
self.assertEqual(f_key['constrained_columns'], ['bar'])
table.drop()
parent_table.drop()

View File

@ -53,6 +53,7 @@ import sqlalchemy.exc
import nova.db.sqlalchemy.migrate_repo
from nova.db.sqlalchemy import utils as db_utils
from nova.openstack.common.db.sqlalchemy import utils as oslodbutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
@ -64,46 +65,19 @@ import nova.virt.baremetal.db.sqlalchemy.migrate_repo
LOG = logging.getLogger(__name__)
def _get_connect_string(backend, user, passwd, database):
"""Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
elif backend == "mysql":
backend = "mysql+mysqldb"
else:
raise Exception("Unrecognized backend: '%s'" % backend)
return ("%s://%s:%s@localhost/%s" % (backend, user, passwd, database))
def _is_backend_avail(backend, user, passwd, database):
try:
connect_uri = _get_connect_string(backend, user, passwd, database)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def _have_mysql(user, passwd, database):
present = os.environ.get('NOVA_TEST_MYSQL_PRESENT')
if present is None:
return _is_backend_avail('mysql', user, passwd, database)
return oslodbutils.is_backend_avail('mysql+mysqldb', database,
user, passwd)
return present.lower() in ('', 'true')
def _have_postgresql(user, passwd, database):
present = os.environ.get('NOVA_TEST_POSTGRESQL_PRESENT')
if present is None:
return _is_backend_avail('postgres', user, passwd, database)
return oslodbutils.is_backend_avail('postgresql+psycopg2', database,
user, passwd)
return present.lower() in ('', 'true')
@ -157,8 +131,8 @@ class CommonTestsMixIn(object):
"""Test that we can trigger a mysql connection failure and we fail
gracefully to ensure we don't break people without mysql
"""
if _is_backend_avail('mysql', "openstack_cifail", self.PASSWD,
self.DATABASE):
if oslodbutils.is_backend_avail('mysql+mysqldb', self.DATABASE,
"openstack_cifail", self.PASSWD):
self.fail("Shouldn't have connected")
def test_postgresql_opportunistically(self):
@ -168,8 +142,8 @@ class CommonTestsMixIn(object):
"""Test that we can trigger a postgres connection failure and we fail
gracefully to ensure we don't break people without postgres
"""
if _is_backend_avail('postgres', "openstack_cifail", self.PASSWD,
self.DATABASE):
if oslodbutils.is_backend_avail('postgresql+psycopg2', self.DATABASE,
"openstack_cifail", self.PASSWD):
self.fail("Shouldn't have connected")
@ -403,8 +377,8 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
self.skipTest("mysql not available")
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
connect_string = _get_connect_string("mysql", self.USER, self.PASSWD,
self.DATABASE)
connect_string = oslodbutils.get_connect_string(
"mysql+mysqldb", self.DATABASE, self.USER, self.PASSWD)
(user, password, database, host) = \
get_mysql_connection_info(urlparse.urlparse(connect_string))
engine = sqlalchemy.create_engine(connect_string)
@ -442,8 +416,8 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
self.skipTest("postgresql not available")
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
connect_string = _get_connect_string("postgres", self.USER,
self.PASSWD, self.DATABASE)
connect_string = oslodbutils.get_connect_string(
"postgresql+psycopg2", self.DATABASE, self.USER, self.PASSWD)
engine = sqlalchemy.create_engine(connect_string)
(user, password, database, host) = \
get_pgsql_connection_info(urlparse.urlparse(connect_string))
@ -579,26 +553,26 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
self.migration_api = temp.versioning_api
def assertColumnExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
t = oslodbutils.get_table(engine, table)
self.assertIn(column, t.c)
def assertColumnNotExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
t = oslodbutils.get_table(engine, table)
self.assertNotIn(column, t.c)
def assertTableNotExists(self, engine, table):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine, table)
oslodbutils.get_table, engine, table)
def assertIndexExists(self, engine, table, index):
t = db_utils.get_table(engine, table)
t = oslodbutils.get_table(engine, table)
index_names = [idx.name for idx in t.indexes]
self.assertIn(index, index_names)
def assertIndexMembers(self, engine, table, index, members):
self.assertIndexExists(engine, table, index)
t = db_utils.get_table(engine, table)
t = oslodbutils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
@ -608,7 +582,7 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
self.assertEqual(sorted(members), sorted(index_columns))
def _check_227(self, engine, data):
table = db_utils.get_table(engine, 'project_user_quotas')
table = oslodbutils.get_table(engine, 'project_user_quotas')
# Insert fake_quotas with the longest resource name.
fake_quotas = {'id': 5,
@ -625,7 +599,7 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
def _check_228(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'metrics')
compute_nodes = db_utils.get_table(engine, 'compute_nodes')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.metrics.type,
sqlalchemy.types.Text)
@ -635,7 +609,7 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
def _check_229(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'extra_resources')
compute_nodes = db_utils.get_table(engine, 'compute_nodes')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.extra_resources.type,
sqlalchemy.types.Text)
@ -648,7 +622,8 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
self.assertColumnExists(engine, table_name, 'host')
self.assertColumnExists(engine, table_name, 'details')
action_events = db_utils.get_table(engine, 'instance_actions_events')
action_events = oslodbutils.get_table(engine,
'instance_actions_events')
self.assertIsInstance(action_events.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(action_events.c.details.type,
@ -663,7 +638,7 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
def _check_231(self, engine, data):
self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid')
instances = db_utils.get_table(engine, 'instances')
instances = oslodbutils.get_table(engine, 'instances')
self.assertIsInstance(instances.c.ephemeral_key_uuid.type,
sqlalchemy.types.String)
self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
@ -682,25 +657,27 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
def _check_233(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'stats')
compute_nodes = db_utils.get_table(engine, 'compute_nodes')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.stats.type,
sqlalchemy.types.Text)
self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table,
engine, 'compute_node_stats')
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, 'compute_node_stats')
def _post_downgrade_233(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'stats')
# confirm compute_node_stats exists
db_utils.get_table(engine, 'compute_node_stats')
oslodbutils.get_table(engine, 'compute_node_stats')
def _check_244(self, engine, data):
volume_usage_cache = db_utils.get_table(engine, 'volume_usage_cache')
volume_usage_cache = oslodbutils.get_table(
engine, 'volume_usage_cache')
self.assertEqual(64, volume_usage_cache.c.user_id.type.length)
def _post_downgrade_244(self, engine):
volume_usage_cache = db_utils.get_table(engine, 'volume_usage_cache')
volume_usage_cache = oslodbutils.get_table(
engine, 'volume_usage_cache')
self.assertEqual(36, volume_usage_cache.c.user_id.type.length)
@ -740,27 +717,27 @@ class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
def _pre_upgrade_002(self, engine):
data = [{'id': 1, 'key': 'fake-key', 'image_path': '/dev/null',
'pxe_config_path': '/dev/null/', 'root_mb': 0, 'swap_mb': 0}]
table = db_utils.get_table(engine, 'bm_deployments')
table = oslodbutils.get_table(engine, 'bm_deployments')
engine.execute(table.insert(), data)
return data
def _check_002(self, engine, data):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine, 'bm_deployments')
oslodbutils.get_table, engine, 'bm_deployments')
def _post_downgrade_004(self, engine):
bm_nodes = db_utils.get_table(engine, 'bm_nodes')
bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
self.assertNotIn(u'instance_name', [c.name for c in bm_nodes.columns])
def _check_005(self, engine, data):
bm_nodes = db_utils.get_table(engine, 'bm_nodes')
bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
columns = [c.name for c in bm_nodes.columns]
self.assertNotIn(u'prov_vlan_id', columns)
self.assertNotIn(u'registration_status', columns)
def _pre_upgrade_006(self, engine):
nodes = db_utils.get_table(engine, 'bm_nodes')
ifs = db_utils.get_table(engine, 'bm_interfaces')
nodes = oslodbutils.get_table(engine, 'bm_nodes')
ifs = oslodbutils.get_table(engine, 'bm_interfaces')
# node 1 has two different addresses in bm_nodes and bm_interfaces
engine.execute(nodes.insert(),
[{'id': 1,
@ -779,7 +756,7 @@ class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
'address': 'cc:cc:cc:cc:cc:cc'}])
def _check_006(self, engine, data):
ifs = db_utils.get_table(engine, 'bm_interfaces')
ifs = oslodbutils.get_table(engine, 'bm_interfaces')
rows = ifs.select().\
where(ifs.c.bm_node_id == 1).\
execute().\
@ -793,7 +770,7 @@ class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
self.assertEqual(rows[0]['address'], 'cc:cc:cc:cc:cc:cc')
def _post_downgrade_006(self, engine):
ifs = db_utils.get_table(engine, 'bm_interfaces')
ifs = oslodbutils.get_table(engine, 'bm_interfaces')
rows = ifs.select().where(ifs.c.bm_node_id == 1).execute().fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]['address'], 'bb:bb:bb:bb:bb:bb')
@ -802,26 +779,26 @@ class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
self.assertEqual(len(rows), 0)
def _check_007(self, engine, data):
bm_nodes = db_utils.get_table(engine, 'bm_nodes')
bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
columns = [c.name for c in bm_nodes.columns]
self.assertNotIn(u'prov_mac_address', columns)
def _check_008(self, engine, data):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine, 'bm_pxe_ips')
oslodbutils.get_table, engine, 'bm_pxe_ips')
def _post_downgrade_008(self, engine):
db_utils.get_table(engine, 'bm_pxe_ips')
oslodbutils.get_table(engine, 'bm_pxe_ips')
def _pre_upgrade_010(self, engine):
bm_nodes = db_utils.get_table(engine, 'bm_nodes')
bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
data = [{'id': 10, 'prov_mac_address': 'cc:cc:cc:cc:cc:cc'}]
engine.execute(bm_nodes.insert(), data)
return data
def _check_010(self, engine, data):
bm_nodes = db_utils.get_table(engine, 'bm_nodes')
bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
self.assertIn('preserve_ephemeral', bm_nodes.columns)
default = engine.execute(
@ -833,7 +810,7 @@ class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
bm_nodes.delete().where(bm_nodes.c.id == data[0]['id']).execute()
def _post_downgrade_010(self, engine):
bm_nodes = db_utils.get_table(engine, 'bm_nodes')
bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
self.assertNotIn('preserve_ephemeral', bm_nodes.columns)

View File

@ -1,72 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.db.sqlalchemy import utils
from nova import test
class SqlAlchemyUtilsTestCase(test.NoDBTestCase):
"""Test case for sqlaclchemy utils methods."""
def test_modify_indexes_checks_index_before_dropping_in_postgresql(self):
data = {"table_name": (('index2', ('old_column'),
('new_column')),)}
migrate_engine = mock.Mock()
migrate_engine.name = 'postgresql'
with mock.patch('nova.db.sqlalchemy.utils.reflection.Inspector'
'.from_engine') as inspector:
inspector.return_value.get_indexes.return_value = [
{'name': "index1"}]
with mock.patch('nova.db.sqlalchemy.utils.Index') as index:
index.return_value = mock.Mock()
utils.modify_indexes(migrate_engine, data, False)
self.assertFalse(index.called)
self.assertFalse(index.return_value.drop.called)
def test_modify_indexes_checks_index_before_dropping_in_mysql(self):
data = {"table_name": (('index2', ('old_column'),
('new_column')),)}
migrate_engine = mock.Mock()
migrate_engine.name = 'mysql'
with mock.patch('nova.db.sqlalchemy.utils.reflection.Inspector'
'.from_engine') as inspector:
inspector.return_value.get_indexes.return_value = [
{'name': "index1"}]
with mock.patch('nova.db.sqlalchemy.utils.Index') as index:
with mock.patch('nova.db.sqlalchemy.utils.Table') as Table:
index.return_value = mock.Mock()
utils.modify_indexes(migrate_engine, data, False)
self.assertFalse(index.return_value.drop.called)
def test_modify_indexes(self):
data = {"table_name": (('index2', ('old_column'),
('new_column')),)}
migrate_engine = mock.Mock()
migrate_engine.name = 'mysql'
with mock.patch('nova.db.sqlalchemy.utils.reflection.Inspector'
'.from_engine') as inspector:
inspector.return_value.get_indexes.return_value = [
{'name': "index2"}]
with mock.patch('nova.db.sqlalchemy.utils.Index') as index:
with mock.patch('nova.db.sqlalchemy.utils.Table') as Table:
index.return_value = mock.Mock()
utils.modify_indexes(migrate_engine, data, True)
self.assertTrue(index.return_value.drop.called)
self.assertTrue(index.return_value.create.called)