Remove SQL Downgrades

SQL Schema Downgrades are no longer supported. This commit removes all
downgrades and attempts to clearly indicate that there was an issue
migrating the schema if a downgrade is requested.

Closes-Bug: 1434103
Change-Id: Ic023c90b354dd796dc78a03500f9680210b38673
This commit is contained in:
Morgan Fainberg 2015-03-25 15:45:44 -07:00
parent 363117d9a6
commit 0d08cac395
43 changed files with 58 additions and 664 deletions

View File

@ -63,7 +63,8 @@ class DbSync(BaseApp):
help=('Migrate the database up to a specified '
'version. If not provided, db_sync will '
'migrate the database to the latest known '
'version.'))
'version. Schema downgrades are not '
'supported.'))
parser.add_argument('--extension', default=None,
help=('Migrate the database for the specified '
'extension. If not provided, db_sync will '

View File

@ -19,7 +19,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -19,7 +19,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -19,7 +19,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -19,7 +19,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -19,7 +19,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -39,11 +39,3 @@ def upgrade(migrate_engine):
if any(i for i in user_group_membership.indexes if
i.columns.keys() == ['group_id'] and i.name != 'group_id'):
sa.Index('group_id', user_group_membership.c.group_id).create()
def downgrade(migrate_engine):
# NOTE(i159): index exists only in MySQL schemas, and got an inconsistent
# name only when MySQL 5.5 renamed it after re-creation
# (during migrations). So we just fixed inconsistency, there is no
# necessity to revert it.
pass

View File

@ -39,11 +39,3 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
mapping_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
assignment = sql.Table(MAPPING_TABLE, meta, autoload=True)
assignment.drop(migrate_engine, checkfirst=True)

View File

@ -24,11 +24,3 @@ def upgrade(migrate_engine):
region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
url_column = sql.Column('url', sql.String(255), nullable=True)
region_table.create_column(url_column)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
region_table.drop_column('url')

View File

@ -36,25 +36,9 @@ b. For each endpoint
ii. Assign the id to the region_id column
c. Remove the column region
To Downgrade:
Endpoint Table
a. Add back in the region column
b. For each endpoint
i. Copy the region_id column to the region column
c. Remove the column region_id
Region Table
Decrease the size of the id column in the region table, making sure that
we don't get classing primary keys.
"""
import migrate
import six
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
@ -90,39 +74,6 @@ def _migrate_to_region_id(migrate_engine, region_table, endpoint_table):
name='fk_endpoint_region_id').create()
def _migrate_to_region(migrate_engine, region_table, endpoint_table):
endpoints = list(endpoint_table.select().execute())
for endpoint in endpoints:
new_values = {'region': endpoint.region_id}
f = endpoint_table.c.id == endpoint.id
update = endpoint_table.update().where(f).values(new_values)
migrate_engine.execute(update)
if 'sqlite' != migrate_engine.name:
migrate.ForeignKeyConstraint(
columns=[endpoint_table.c.region_id],
refcolumns=[region_table.c.id],
name='fk_endpoint_region_id').drop()
endpoint_table.c.region_id.drop()
def _prepare_regions_for_id_truncation(migrate_engine, region_table):
"""Ensure there are no IDs that are bigger than 64 chars.
The size of the id and parent_id fields where increased from 64 to 255
during the upgrade. On downgrade we have to make sure that the ids can
fit in the new column size. For rows with ids greater than this, we have
no choice but to dump them.
"""
for region in list(region_table.select().execute()):
if (len(six.text_type(region.id)) > 64 or
len(six.text_type(region.parent_region_id)) > 64):
delete = region_table.delete(region_table.c.id == region.id)
migrate_engine.execute(delete)
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
@ -138,19 +89,3 @@ def upgrade(migrate_engine):
_migrate_to_region_id(migrate_engine, region_table, endpoint_table)
endpoint_table.c.region.drop()
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
region_table = sql.Table('region', meta, autoload=True)
endpoint_table = sql.Table('endpoint', meta, autoload=True)
region_column = sql.Column('region', sql.String(length=255))
region_column.create(endpoint_table)
_migrate_to_region(migrate_engine, region_table, endpoint_table)
_prepare_regions_for_id_truncation(migrate_engine, region_table)
region_table.c.id.alter(type=sql.String(length=64))
region_table.c.parent_region_id.alter(type=sql.String(length=64))

View File

@ -24,12 +24,3 @@ def upgrade(migrate_engine):
assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
idx = sql.Index('ix_actor_id', assignment.c.actor_id)
idx.create(migrate_engine)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
idx = sql.Index('ix_actor_id', assignment.c.actor_id)
idx.drop(migrate_engine)

View File

@ -23,13 +23,3 @@ def upgrade(migrate_engine):
sql.Index('ix_token_user_id', token.c.user_id).create()
sql.Index('ix_token_trust_id', token.c.trust_id).create()
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
token = sql.Table('token', meta, autoload=True)
sql.Index('ix_token_user_id', token.c.user_id).drop()
sql.Index('ix_token_trust_id', token.c.trust_id).drop()

View File

@ -16,7 +16,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -16,7 +16,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -16,7 +16,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -16,7 +16,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -16,7 +16,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -38,17 +38,3 @@ def upgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
return
migration_helpers.add_constraints(list_constraints(project_table))
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
# SQLite does not support constraints, and querying the constraints
# raises an exception
if migrate_engine.name != 'sqlite':
migration_helpers.remove_constraints(list_constraints(project_table))
project_table.drop_column(_PARENT_ID_COLUMN_NAME)

View File

@ -33,9 +33,3 @@ def upgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
return
migration_helpers.remove_constraints(list_constraints(migrate_engine))
def downgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
return
migration_helpers.add_constraints(list_constraints(migrate_engine))

View File

@ -21,12 +21,3 @@ def upgrade(migrate_engine):
region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
region_table.drop_column('url')
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
url_column = sql.Column('url', sql.String(255), nullable=True)
region_table.create_column(url_column)

View File

@ -37,9 +37,3 @@ def upgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
return
migration_helpers.remove_constraints(list_constraints(migrate_engine))
def downgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
return
migration_helpers.add_constraints(list_constraints(migrate_engine))

View File

@ -43,13 +43,3 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
sensitive_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
table = sql.Table(WHITELIST_TABLE, meta, autoload=True)
table.drop(migrate_engine, checkfirst=True)
table = sql.Table(SENSITIVE_TABLE, meta, autoload=True)
table.drop(migrate_engine, checkfirst=True)

View File

@ -34,10 +34,3 @@ def upgrade(migrate_engine):
f = service_table.c.id == service.id
update = service_table.update().where(f).values(new_values)
migrate_engine.execute(update)
def downgrade(migration_engine):
# The upgrade fixes the data inconsistency for the service name,
# it defaults the value to empty string. There is no necessity
# to revert it.
pass

View File

@ -143,6 +143,7 @@ def _sync_common_repo(version):
abs_path = find_migrate_repo()
init_version = migrate_repo.DB_INIT_VERSION
engine = sql.get_engine()
_assert_not_schema_downgrade(version=version)
migration.db_sync(engine, abs_path, version=version,
init_version=init_version)
@ -176,6 +177,18 @@ def _fix_federation_tables(engine):
engine.execute("SET foreign_key_checks = 1")
def _assert_not_schema_downgrade(extension=None, version=None):
if version is not None:
try:
current_ver = int(six.text_type(get_db_version(extension)))
if int(version) < current_ver:
raise migration.exception.DbMigrationError()
except exceptions.DatabaseNotControlledError:
# NOTE(morganfainberg): The database is not controlled, this action
# cannot be a downgrade.
pass
def _sync_extension_repo(extension, version):
init_version = 0
engine = sql.get_engine()
@ -198,6 +211,9 @@ def _sync_extension_repo(extension, version):
except exception.MigrationNotProvided as e:
print(e)
sys.exit(1)
_assert_not_schema_downgrade(extension=extension, version=version)
try:
migration.db_sync(engine, abs_path, version=version,
init_version=init_version)

View File

@ -36,12 +36,3 @@ def upgrade(migrate_engine):
nullable=False))
endpoint_filtering_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
# Operations to reverse the above upgrade go here.
for table_name in ['project_endpoint']:
table = sql.Table(table_name, meta, autoload=True)
table.drop()

View File

@ -39,13 +39,3 @@ def upgrade(migrate_engine):
sql.PrimaryKeyConstraint('endpoint_group_id',
'project_id'))
project_endpoint_group_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
# Operations to reverse the above upgrade go here.
for table_name in ['project_endpoint_group',
'endpoint_group']:
table = sql.Table(table_name, meta, autoload=True)
table.drop()

View File

@ -38,11 +38,3 @@ def upgrade(migrate_engine):
mysql_charset='utf8')
endpoint_policy_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
# Operations to reverse the above upgrade go here.
table = sql.Table('policy_association', meta, autoload=True)
table.drop()

View File

@ -30,14 +30,3 @@ def upgrade(migrate_engine):
sql.Column('type', sql.String(255)),
sql.Column('extra', sql.Text()))
service_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = sql.MetaData()
meta.bind = migrate_engine
tables = ['example']
for t in tables:
table = sql.Table(t, meta, autoload=True)
table.drop(migrate_engine, checkfirst=True)

View File

@ -40,12 +40,3 @@ def upgrade(migrate_engine):
mysql_charset='utf8')
federation_protocol_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
tables = ['federation_protocol', 'identity_provider']
for table_name in tables:
table = sql.Table(table_name, meta, autoload=True)
table.drop()

View File

@ -25,13 +25,3 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
mapping_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
# Drop previously created tables
tables = ['mapping']
for table_name in tables:
table = sql.Table(table_name, meta, autoload=True)
table.drop()

View File

@ -27,9 +27,3 @@ def upgrade(migrate_engine):
values(mapping_id=''))
migrate_engine.execute(stmt)
federation_protocol.c.mapping_id.alter(nullable=False)
def downgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
federation_protocol = sa.Table('federation_protocol', meta, autoload=True)
federation_protocol.c.mapping_id.alter(nullable=True)

View File

@ -21,10 +21,3 @@ def upgrade(migrate_engine):
idp_table = utils.get_table(migrate_engine, 'identity_provider')
remote_id = sql.Column('remote_id', sql.String(256), nullable=True)
idp_table.create_column(remote_id)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
idp_table = utils.get_table(migrate_engine, 'identity_provider')
idp_table.drop_column('remote_id')

View File

@ -29,10 +29,3 @@ def upgrade(migrate_engine):
mysql_charset='utf8')
sp_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
table = sql.Table('service_provider', meta, autoload=True)
table.drop()

View File

@ -38,11 +38,3 @@ def upgrade(migrate_engine):
sp_table.c.auth_url.alter(nullable=False)
sp_table.c.sp_url.alter(nullable=False)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
sp_table = sql.Table(_SP_TABLE_NAME, meta, autoload=True)
sp_table.c.auth_url.alter(nullable=True)
sp_table.c.sp_url.alter(nullable=True)

View File

@ -55,13 +55,3 @@ def upgrade(migrate_engine):
sql.Column('consumer_id', sql.String(64), nullable=False),
sql.Column('expires_at', sql.String(64), nullable=True))
access_token_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
# Operations to reverse the above upgrade go here.
tables = ['consumer', 'request_token', 'access_token']
for table_name in tables:
table = sql.Table(table_name, meta, autoload=True)
table.drop()

View File

@ -35,20 +35,3 @@ def upgrade(migrate_engine):
'ref_column': consumer_table.c.id}]
if meta.bind != 'sqlite':
migration_helpers.add_constraints(constraints)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
consumer_table = sql.Table('consumer', meta, autoload=True)
request_token_table = sql.Table('request_token', meta, autoload=True)
access_token_table = sql.Table('access_token', meta, autoload=True)
constraints = [{'table': request_token_table,
'fk_column': 'consumer_id',
'ref_column': consumer_table.c.id},
{'table': access_token_table,
'fk_column': 'consumer_id',
'ref_column': consumer_table.c.id}]
if migrate_engine.name != 'sqlite':
migration_helpers.remove_constraints(constraints)

View File

@ -20,10 +20,3 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine
user_table = sql.Table('consumer', meta, autoload=True)
user_table.c.description.alter(nullable=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
user_table = sql.Table('consumer', meta, autoload=True)
user_table.c.description.alter(nullable=False)

View File

@ -23,13 +23,3 @@ def upgrade(migrate_engine):
request_token_table.c.requested_roles.alter(name="role_ids")
access_token_table = sql.Table('access_token', meta, autoload=True)
access_token_table.c.requested_roles.alter(name="role_ids")
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
request_token_table = sql.Table('request_token', meta, autoload=True)
request_token_table.c.role_ids.alter(nullable=False)
request_token_table.c.role_ids.alter(name="requested_roles")
access_token_table = sql.Table('access_token', meta, autoload=True)
access_token_table.c.role_ids.alter(name="requested_roles")

View File

@ -32,11 +32,3 @@ def upgrade(migrate_engine):
# with the new name. This can be considered as renaming under the
# MySQL rules.
sa.Index('consumer_id', table.c.consumer_id).create()
def downgrade(migrate_engine):
# NOTE(i159): index exists only in MySQL schemas, and got an inconsistent
# name only when MySQL 5.5 renamed it after re-creation
# (during migrations). So we just fixed inconsistency, there is no
# necessity to revert it.
pass

View File

@ -34,14 +34,3 @@ def upgrade(migrate_engine):
sql.Column('expires_at', sql.DateTime()),
sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False))
service_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = sql.MetaData()
meta.bind = migrate_engine
tables = ['revocation_event']
for t in tables:
table = sql.Table(t, meta, autoload=True)
table.drop(migrate_engine, checkfirst=True)

View File

@ -26,12 +26,3 @@ def upgrade(migrate_engine):
nullable=True)
event_table.create_column(audit_id_column)
event_table.create_column(audit_chain_column)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
event_table = sql.Table(_TABLE_NAME, meta, autoload=True)
event_table.drop_column('audit_id')
event_table.drop_column('audit_chain_id')

View File

@ -53,12 +53,6 @@ class SqlUpgradeExampleExtension(test_sql_upgrade.SqlMigrateBase):
self.upgrade(1, repository=self.repo_path)
self.assertTableColumns('example', ['id', 'type', 'extra'])
def test_downgrade(self):
self.upgrade(1, repository=self.repo_path)
self.assertTableColumns('example', ['id', 'type', 'extra'])
self.downgrade(0, repository=self.repo_path)
self.assertTableDoesNotExist('example')
class SqlUpgradeOAuth1Extension(test_sql_upgrade.SqlMigrateBase):
def repo_package(self):
@ -68,10 +62,6 @@ class SqlUpgradeOAuth1Extension(test_sql_upgrade.SqlMigrateBase):
super(SqlUpgradeOAuth1Extension, self).upgrade(
version, repository=self.repo_path)
def downgrade(self, version):
super(SqlUpgradeOAuth1Extension, self).downgrade(
version, repository=self.repo_path)
def _assert_v1_3_tables(self):
self.assertTableColumns('consumer',
['id',
@ -136,18 +126,6 @@ class SqlUpgradeOAuth1Extension(test_sql_upgrade.SqlMigrateBase):
self.upgrade(5)
self._assert_v4_later_tables()
def test_downgrade(self):
self.upgrade(5)
self._assert_v4_later_tables()
self.downgrade(3)
self._assert_v1_3_tables()
self.downgrade(1)
self._assert_v1_3_tables()
self.downgrade(0)
self.assertTableDoesNotExist('consumer')
self.assertTableDoesNotExist('request_token')
self.assertTableDoesNotExist('access_token')
class EndpointFilterExtension(test_sql_upgrade.SqlMigrateBase):
def repo_package(self):
@ -157,10 +135,6 @@ class EndpointFilterExtension(test_sql_upgrade.SqlMigrateBase):
super(EndpointFilterExtension, self).upgrade(
version, repository=self.repo_path)
def downgrade(self, version):
super(EndpointFilterExtension, self).downgrade(
version, repository=self.repo_path)
def _assert_v1_tables(self):
self.assertTableColumns('project_endpoint',
['endpoint_id', 'project_id'])
@ -184,14 +158,6 @@ class EndpointFilterExtension(test_sql_upgrade.SqlMigrateBase):
self.upgrade(2)
self._assert_v2_tables()
def test_downgrade(self):
self.upgrade(2)
self._assert_v2_tables()
self.downgrade(1)
self._assert_v1_tables()
self.downgrade(0)
self.assertTableDoesNotExist('project_endpoint')
class EndpointPolicyExtension(test_sql_upgrade.SqlMigrateBase):
def repo_package(self):
@ -204,14 +170,6 @@ class EndpointPolicyExtension(test_sql_upgrade.SqlMigrateBase):
['id', 'policy_id', 'endpoint_id',
'service_id', 'region_id'])
def test_downgrade(self):
self.upgrade(1, repository=self.repo_path)
self.assertTableColumns('policy_association',
['id', 'policy_id', 'endpoint_id',
'service_id', 'region_id'])
self.downgrade(0, repository=self.repo_path)
self.assertTableDoesNotExist('policy_association')
class FederationExtension(test_sql_upgrade.SqlMigrateBase):
"""Test class for ensuring the Federation SQL."""
@ -264,27 +222,7 @@ class FederationExtension(test_sql_upgrade.SqlMigrateBase):
'federation_protocol')
self.assertFalse(federation_protocol.c.mapping_id.nullable)
def test_downgrade(self):
self.upgrade(3, repository=self.repo_path)
self.assertTableColumns(self.identity_provider,
['id', 'enabled', 'description'])
self.assertTableColumns(self.federation_protocol,
['id', 'idp_id', 'mapping_id'])
self.assertTableColumns(self.mapping,
['id', 'rules'])
self.downgrade(2, repository=self.repo_path)
federation_protocol = utils.get_table(
self.engine,
'federation_protocol')
self.assertTrue(federation_protocol.c.mapping_id.nullable)
self.downgrade(0, repository=self.repo_path)
self.assertTableDoesNotExist(self.identity_provider)
self.assertTableDoesNotExist(self.federation_protocol)
self.assertTableDoesNotExist(self.mapping)
def test_fixup_service_provider_attributes(self):
def test_service_provider_attributes_cannot_be_null(self):
self.upgrade(6, repository=self.repo_path)
self.assertTableColumns(self.service_provider,
['id', 'description', 'enabled', 'auth_url',
@ -325,12 +263,28 @@ class FederationExtension(test_sql_upgrade.SqlMigrateBase):
sp3)
session.close()
self.downgrade(5, repository=self.repo_path)
def test_fixup_service_provider_attributes(self):
session = self.Session()
sp1 = {'id': uuid.uuid4().hex,
'auth_url': None,
'sp_url': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True}
sp2 = {'id': uuid.uuid4().hex,
'auth_url': uuid.uuid4().hex,
'sp_url': None,
'description': uuid.uuid4().hex,
'enabled': True}
sp3 = {'id': uuid.uuid4().hex,
'auth_url': None,
'sp_url': None,
'description': uuid.uuid4().hex,
'enabled': True}
self.upgrade(5, repository=self.repo_path)
self.assertTableColumns(self.service_provider,
['id', 'description', 'enabled', 'auth_url',
'sp_url'])
session = self.Session()
self.metadata.clear()
# Before the migration, the table should accept null values
self.insert_dict(session, self.service_provider, sp1)
@ -356,13 +310,14 @@ class FederationExtension(test_sql_upgrade.SqlMigrateBase):
self.assertEqual('', sp.auth_url)
self.assertEqual('', sp.sp_url)
_REVOKE_COLUMN_NAMES = ['id', 'domain_id', 'project_id', 'user_id', 'role_id',
'trust_id', 'consumer_id', 'access_token_id',
'issued_before', 'expires_at', 'revoked_at']
class RevokeExtension(test_sql_upgrade.SqlMigrateBase):
_REVOKE_COLUMN_NAMES = ['id', 'domain_id', 'project_id', 'user_id',
'role_id', 'trust_id', 'consumer_id',
'access_token_id', 'issued_before', 'expires_at',
'revoked_at']
def repo_package(self):
return revoke
@ -370,11 +325,4 @@ class RevokeExtension(test_sql_upgrade.SqlMigrateBase):
self.assertTableDoesNotExist('revocation_event')
self.upgrade(1, repository=self.repo_path)
self.assertTableColumns('revocation_event',
_REVOKE_COLUMN_NAMES)
def test_downgrade(self):
self.upgrade(1, repository=self.repo_path)
self.assertTableColumns('revocation_event',
_REVOKE_COLUMN_NAMES)
self.downgrade(0, repository=self.repo_path)
self.assertTableDoesNotExist('revocation_event')
self._REVOKE_COLUMN_NAMES)

View File

@ -230,9 +230,6 @@ class SqlMigrateBase(tests.SQLDriverOverrides, tests.TestCase):
def upgrade(self, *args, **kwargs):
self._migrate(*args, **kwargs)
def downgrade(self, *args, **kwargs):
self._migrate(*args, downgrade=True, **kwargs)
def _migrate(self, version, repository=None, downgrade=False,
current_schema=None):
repository = repository or self.repo_path
@ -278,42 +275,6 @@ class SqlUpgradeTests(SqlMigrateBase):
version,
'DB is not at version %s' % migrate_repo.DB_INIT_VERSION)
def test_two_steps_forward_one_step_back(self):
"""You should be able to cleanly undo and re-apply all upgrades.
Upgrades are run in the following order::
Starting with the initial version defined at
keystone.common.migrate_repo.DB_INIT_VERSION
INIT +1 -> INIT +2 -> INIT +1 -> INIT +2 -> INIT +3 -> INIT +2 ...
^---------------------^ ^---------------------^
Downgrade to the DB_INIT_VERSION does not occur based on the
requirement that the base version be DB_INIT_VERSION + 1 before
migration can occur. Downgrade below DB_INIT_VERSION + 1 is no longer
supported.
DB_INIT_VERSION is the number preceding the release schema version from
two releases prior. Example, Juno releases with the DB_INIT_VERSION
being 35 where Havana (Havana was two releases before Juno) release
schema version is 36.
The migrate utility requires the db must be initialized under version
control with the revision directly before the first version to be
applied.
"""
for x in range(migrate_repo.DB_INIT_VERSION + 1,
self.max_version + 1):
self.upgrade(x)
downgrade_ver = x - 1
# Don't actually downgrade to the init version. This will raise
# a not-implemented error.
if downgrade_ver != migrate_repo.DB_INIT_VERSION:
self.downgrade(x - 1)
self.upgrade(x)
def test_upgrade_add_initial_tables(self):
self.upgrade(migrate_repo.DB_INIT_VERSION + 1)
self.check_initial_table_structure()
@ -338,32 +299,6 @@ class SqlUpgradeTests(SqlMigrateBase):
for k in default_domain.keys():
self.assertEqual(default_domain[k], getattr(refs[0], k))
def test_downgrade_to_db_init_version(self):
self.upgrade(self.max_version)
if self.engine.name == 'mysql':
self._mysql_check_all_tables_innodb()
self.downgrade(migrate_repo.DB_INIT_VERSION + 1)
self.check_initial_table_structure()
meta = sqlalchemy.MetaData()
meta.bind = self.engine
meta.reflect(self.engine)
initial_table_set = set(INITIAL_TABLE_STRUCTURE.keys())
table_set = set(meta.tables.keys())
# explicitly remove the migrate_version table, this is not controlled
# by the migration scripts and should be exempt from this check.
table_set.remove('migrate_version')
self.assertSetEqual(initial_table_set, table_set)
# Downgrade to before Icehouse's release schema version (044) is not
# supported. A NotImplementedError should be raised when attempting to
# downgrade.
self.assertRaises(NotImplementedError, self.downgrade,
migrate_repo.DB_INIT_VERSION)
def insert_dict(self, session, table_name, d, table=None):
"""Naively inserts key-value pairs into a table, given a dictionary."""
if table is None:
@ -380,8 +315,6 @@ class SqlUpgradeTests(SqlMigrateBase):
self.assertTableDoesNotExist('id_mapping')
self.upgrade(51)
self.assertTableExists('id_mapping')
self.downgrade(50)
self.assertTableDoesNotExist('id_mapping')
def test_region_url_upgrade(self):
self.upgrade(52)
@ -389,42 +322,6 @@ class SqlUpgradeTests(SqlMigrateBase):
['id', 'description', 'parent_region_id',
'extra', 'url'])
def test_region_url_downgrade(self):
self.upgrade(52)
self.downgrade(51)
self.assertTableColumns('region',
['id', 'description', 'parent_region_id',
'extra'])
def test_region_url_cleanup(self):
# make sure that the url field is dropped in the downgrade
self.upgrade(52)
session = self.Session()
beta = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'parent_region_id': uuid.uuid4().hex,
'url': uuid.uuid4().hex
}
acme = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'parent_region_id': uuid.uuid4().hex,
'url': None
}
self.insert_dict(session, 'region', beta)
self.insert_dict(session, 'region', acme)
region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
self.assertEqual(2, session.query(region_table).count())
session.close()
self.downgrade(51)
session = self.Session()
self.metadata.clear()
region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
self.assertEqual(2, session.query(region_table).count())
region = session.query(region_table)[0]
self.assertRaises(AttributeError, getattr, region, 'url')
def test_endpoint_region_upgrade_columns(self):
self.upgrade(53)
self.assertTableColumns('endpoint',
@ -439,21 +336,6 @@ class SqlUpgradeTests(SqlMigrateBase):
autoload=True)
self.assertEqual(255, endpoint_table.c.region_id.type.length)
def test_endpoint_region_downgrade_columns(self):
self.upgrade(53)
self.downgrade(52)
self.assertTableColumns('endpoint',
['id', 'legacy_endpoint_id', 'interface',
'service_id', 'url', 'extra', 'enabled',
'region'])
region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
self.assertEqual(64, region_table.c.id.type.length)
self.assertEqual(64, region_table.c.parent_region_id.type.length)
endpoint_table = sqlalchemy.Table('endpoint',
self.metadata,
autoload=True)
self.assertEqual(255, endpoint_table.c.region.type.length)
def test_endpoint_region_migration(self):
self.upgrade(52)
session = self.Session()
@ -519,28 +401,6 @@ class SqlUpgradeTests(SqlMigrateBase):
self.assertEqual(1, session.query(endpoint_table).
filter_by(region_id=_small_region_name).count())
# downgrade to 52
session.close()
self.downgrade(52)
session = self.Session()
self.metadata.clear()
region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
self.assertEqual(1, session.query(region_table).count())
self.assertEqual(1, session.query(region_table).
filter_by(id=_small_region_name).count())
endpoint_table = sqlalchemy.Table('endpoint',
self.metadata,
autoload=True)
self.assertEqual(5, session.query(endpoint_table).count())
self.assertEqual(2, session.query(endpoint_table).
filter_by(region=_long_region_name).count())
self.assertEqual(1, session.query(endpoint_table).
filter_by(region=_clashing_region_name).count())
self.assertEqual(1, session.query(endpoint_table).
filter_by(region=_small_region_name).count())
def test_add_actor_id_index(self):
self.upgrade(53)
self.upgrade(54)
@ -556,69 +416,12 @@ class SqlUpgradeTests(SqlMigrateBase):
self.assertIn(('ix_token_user_id', ['user_id']), index_data)
self.assertIn(('ix_token_trust_id', ['trust_id']), index_data)
def test_token_user_id_and_trust_id_index_downgrade(self):
self.upgrade(55)
self.downgrade(54)
table = sqlalchemy.Table('token', self.metadata, autoload=True)
index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes]
self.assertNotIn(('ix_token_user_id', ['user_id']), index_data)
self.assertNotIn(('ix_token_trust_id', ['trust_id']), index_data)
def test_remove_actor_id_index(self):
self.upgrade(54)
self.downgrade(53)
table = sqlalchemy.Table('assignment', self.metadata, autoload=True)
index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes]
self.assertNotIn(('ix_actor_id', ['actor_id']), index_data)
def test_project_parent_id_upgrade(self):
self.upgrade(61)
self.assertTableColumns('project',
['id', 'name', 'extra', 'description',
'enabled', 'domain_id', 'parent_id'])
def test_project_parent_id_downgrade(self):
self.upgrade(61)
self.downgrade(60)
self.assertTableColumns('project',
['id', 'name', 'extra', 'description',
'enabled', 'domain_id'])
def test_project_parent_id_cleanup(self):
# make sure that the parent_id field is dropped in the downgrade
self.upgrade(61)
session = self.Session()
domain = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
acme = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'domain_id': domain['id'],
'name': uuid.uuid4().hex,
'parent_id': None
}
beta = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'domain_id': domain['id'],
'name': uuid.uuid4().hex,
'parent_id': acme['id']
}
self.insert_dict(session, 'domain', domain)
self.insert_dict(session, 'project', acme)
self.insert_dict(session, 'project', beta)
proj_table = sqlalchemy.Table('project', self.metadata, autoload=True)
self.assertEqual(2, session.query(proj_table).count())
session.close()
self.downgrade(60)
session = self.Session()
self.metadata.clear()
proj_table = sqlalchemy.Table('project', self.metadata, autoload=True)
self.assertEqual(2, session.query(proj_table).count())
project = session.query(proj_table)[0]
self.assertRaises(AttributeError, getattr, project, 'parent_id')
def test_drop_assignment_role_fk(self):
self.upgrade(61)
self.assertTrue(self.does_fk_exist('assignment', 'role_id'))
@ -626,8 +429,6 @@ class SqlUpgradeTests(SqlMigrateBase):
if self.engine.name != 'sqlite':
# sqlite does not support FK deletions (or enforcement)
self.assertFalse(self.does_fk_exist('assignment', 'role_id'))
self.downgrade(61)
self.assertTrue(self.does_fk_exist('assignment', 'role_id'))
def does_fk_exist(self, table, fk_column):
inspector = reflection.Inspector.from_engine(self.engine)
@ -642,14 +443,7 @@ class SqlUpgradeTests(SqlMigrateBase):
['id', 'description', 'parent_region_id',
'extra'])
def test_drop_region_url_downgrade(self):
self.upgrade(63)
self.downgrade(62)
self.assertTableColumns('region',
['id', 'description', 'parent_region_id',
'extra', 'url'])
def test_drop_domain_fk(self):
def test_domain_fk(self):
self.upgrade(63)
self.assertTrue(self.does_fk_exist('group', 'domain_id'))
self.assertTrue(self.does_fk_exist('user', 'domain_id'))
@ -658,9 +452,6 @@ class SqlUpgradeTests(SqlMigrateBase):
# sqlite does not support FK deletions (or enforcement)
self.assertFalse(self.does_fk_exist('group', 'domain_id'))
self.assertFalse(self.does_fk_exist('user', 'domain_id'))
self.downgrade(63)
self.assertTrue(self.does_fk_exist('group', 'domain_id'))
self.assertTrue(self.does_fk_exist('user', 'domain_id'))
def test_add_domain_config(self):
whitelisted_table = 'whitelisted_config'
@ -673,9 +464,6 @@ class SqlUpgradeTests(SqlMigrateBase):
['domain_id', 'group', 'option', 'value'])
self.assertTableColumns(sensitive_table,
['domain_id', 'group', 'option', 'value'])
self.downgrade(64)
self.assertTableDoesNotExist(whitelisted_table)
self.assertTableDoesNotExist(sensitive_table)
def test_fixup_service_name_value_upgrade(self):
"""Update service name data from `extra` to empty string."""
@ -881,6 +669,13 @@ class VersionTests(SqlMigrateBase):
version = migration_helpers.get_db_version()
self.assertEqual(self.max_version, version)
def test_assert_not_schema_downgrade(self):
self.upgrade(self.max_version)
self.assertRaises(
db_exception.DbMigrationError,
migration_helpers._sync_common_repo,
self.max_version - 1)
def test_extension_not_controlled(self):
"""When get the version before controlling, raises DbMigrationError."""
self.assertRaises(db_exception.DbMigrationError,
@ -906,21 +701,12 @@ class VersionTests(SqlMigrateBase):
self.assertTrue(
version > 0,
"Version for %s didn't change after migrated?" % name)
def test_extension_downgraded(self):
"""When get the version after downgrading an extension, it is 0."""
for name, extension in six.iteritems(EXTENSIONS):
abs_path = migration_helpers.find_migrate_repo(extension)
migration.db_version_control(sql.get_engine(), abs_path)
migration.db_sync(sql.get_engine(), abs_path)
version = migration_helpers.get_db_version(extension=name)
self.assertTrue(
version > 0,
"Version for %s didn't change after migrated?" % name)
migration.db_sync(sql.get_engine(), abs_path, version=0)
version = migration_helpers.get_db_version(extension=name)
self.assertEqual(0, version,
'Migrate version for %s is not 0' % name)
# Verify downgrades cannot occur
self.assertRaises(
db_exception.DbMigrationError,
migration_helpers._sync_extension_repo,
extension=name,
version=0)
def test_unexpected_extension(self):
"""The version for an extension that doesn't exist raises ImportError.