Drop support database downgrades

Remove downgrade from migrations and tests

Related cross-project spec:
http://specs.openstack.org/openstack/openstack-specs/specs/no-downward-sql-migration.html

Change-Id: Ib286cfdc6df19506e33194337ce8acb58991a139
Closes-bug: #1434103
This commit is contained in:
Sergey Reshetnyak 2015-03-24 15:03:01 +03:00
parent f6defa6898
commit ccc884aabd
26 changed files with 13 additions and 199 deletions

View File

@ -16,7 +16,7 @@ under the License.
The migrations in `alembic_migrations/versions` contain the changes needed to migrate
between Sahara database revisions. A migration occurs by executing a script that
details the changes needed to upgrade or downgrade the database. The migration scripts
details the changes needed to upgrade the database. The migration scripts
are ordered so that multiple scripts can run sequentially. The scripts are executed by
Sahara's migration wrapper which uses the Alembic library to manage the migration. Sahara
supports migration from Icehouse or later.
@ -46,11 +46,6 @@ Upgrade the database incrementally:
$ sahara-db-manage --config-file /path/to/sahara.conf upgrade --delta <# of revs>
```
Downgrade the database by a certain number of revisions:
```
$ sahara-db-manage --config-file /path/to/sahara.conf downgrade --delta <# of revs>
```
Create new revision:
```
$ sahara-db-manage --config-file /path/to/sahara.conf revision -m "description of revision" --autogenerate

View File

@ -32,7 +32,3 @@ ${imports if imports else ""}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}

View File

@ -332,19 +332,3 @@ def upgrade():
sa.UniqueConstraint('instance_id', 'node_group_id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
def downgrade():
op.drop_table('instances')
op.drop_table('node_groups')
op.drop_table('job_executions')
op.drop_table('mains_association')
op.drop_table('templates_relations')
op.drop_table('clusters')
op.drop_table('libs_association')
op.drop_table('data_sources')
op.drop_table('job_binaries')
op.drop_table('job_binary_internal')
op.drop_table('cluster_templates')
op.drop_table('node_group_templates')
op.drop_table('jobs')

View File

@ -28,7 +28,3 @@ down_revision = '001'
def upgrade():
pass
def downgrade():
pass

View File

@ -28,7 +28,3 @@ down_revision = '002'
def upgrade():
pass
def downgrade():
pass

View File

@ -28,7 +28,3 @@ down_revision = '003'
def upgrade():
pass
def downgrade():
pass

View File

@ -28,7 +28,3 @@ down_revision = '004'
def upgrade():
pass
def downgrade():
pass

View File

@ -28,7 +28,3 @@ down_revision = '005'
def upgrade():
pass
def downgrade():
pass

View File

@ -26,7 +26,6 @@ revision = '007'
down_revision = '006'
from alembic import op
import sqlalchemy as sa
from sahara.db.sqlalchemy import types as st
@ -35,9 +34,3 @@ def upgrade():
op.alter_column('clusters', 'status_description',
type_=st.LongText(), existing_nullable=True,
existing_server_default=None)
def downgrade():
op.alter_column('clusters', 'status_description',
type_=sa.String(length=200), existing_nullable=True,
existing_server_default=None)

View File

@ -38,9 +38,3 @@ def upgrade():
sa.Column('security_groups', st.JsonEncoded()))
op.add_column('templates_relations',
sa.Column('security_groups', st.JsonEncoded()))
def downgrade():
op.drop_column('node_group_templates', 'security_groups')
op.drop_column('node_groups', 'security_groups')
op.drop_column('templates_relations', 'security_groups')

View File

@ -34,7 +34,3 @@ from sahara.db.sqlalchemy import types as st
def upgrade():
op.add_column('clusters',
sa.Column('rollback_info', st.JsonEncoded()))
def downgrade():
op.drop_column('clusters', 'rollback_info')

View File

@ -40,10 +40,3 @@ def upgrade():
sa.Column('auto_security_group', sa.Boolean()))
op.add_column('node_groups',
sa.Column('open_ports', st.JsonEncoded()))
def downgrade():
op.drop_column('node_group_templates', 'auto_security_group')
op.drop_column('node_groups', 'auto_security_group')
op.drop_column('templates_relations', 'auto_security_group')
op.drop_column('node_groups', 'open_ports')

View File

@ -34,7 +34,3 @@ from sahara.db.sqlalchemy import types as st
def upgrade():
op.add_column('clusters',
sa.Column('sahara_info', st.JsonEncoded()))
def downgrade():
op.drop_column('clusters', 'sahara_info')

View File

@ -37,9 +37,3 @@ def upgrade():
sa.Column('availability_zone', sa.String(length=255)))
op.add_column('templates_relations',
sa.Column('availability_zone', sa.String(length=255)))
def downgrade():
op.drop_column('node_group_templates', 'availability_zone')
op.drop_column('node_groups', 'availability_zone')
op.drop_column('templates_relations', 'availability_zone')

View File

@ -38,9 +38,3 @@ def upgrade():
sa.String(length=255)))
op.add_column('templates_relations', sa.Column('volumes_availability_zone',
sa.String(length=255)))
def downgrade():
op.drop_column('node_group_templates', 'volumes_availability_zone')
op.drop_column('node_groups', 'volumes_availability_zone')
op.drop_column('templates_relations', 'volumes_availability_zone')

View File

@ -39,9 +39,3 @@ def upgrade():
op.add_column('templates_relations',
sa.Column('volume_type', sa.String(length=255),
nullable=True))
def downgrade():
op.drop_column('templates_relations', 'volume_type')
op.drop_column('node_groups', 'volume_type')
op.drop_column('node_group_templates', 'volume_type')

View File

@ -91,8 +91,3 @@ def upgrade():
sa.UniqueConstraint('id', 'step_id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
def downgrade():
op.drop_table('cluster_events')
op.drop_table('cluster_provision_steps')

View File

@ -36,9 +36,3 @@ def upgrade():
sa.Column('is_proxy_gateway', sa.Boolean()))
op.add_column('templates_relations',
sa.Column('is_proxy_gateway', sa.Boolean()))
def downgrade():
op.drop_column('templates_relations', 'is_proxy_gateway')
op.drop_column('node_groups', 'is_proxy_gateway')
op.drop_column('node_group_templates', 'is_proxy_gateway')

View File

@ -26,13 +26,7 @@ revision = '017'
down_revision = '016'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('job_executions', 'progress')
def downgrade():
op.add_column('job_executions',
sa.Column('progress', sa.FLOAT(), nullable=True))

View File

@ -36,9 +36,3 @@ def upgrade():
sa.Column('volume_local_to_instance', sa.Boolean()))
op.add_column('templates_relations',
sa.Column('volume_local_to_instance', sa.Boolean()))
def downgrade():
op.drop_column('node_group_templates', 'volume_local_to_instance')
op.drop_column('node_groups', 'volume_local_to_instance')
op.drop_column('templates_relations', 'volume_local_to_instance')

View File

@ -34,8 +34,3 @@ def upgrade():
sa.Column('is_default', sa.Boolean(), nullable=True))
op.add_column('node_group_templates',
sa.Column('is_default', sa.Boolean(), nullable=True))
def downgrade():
op.drop_column('node_group_templates', 'is_default')
op.drop_column('cluster_templates', 'is_default')

View File

@ -26,19 +26,9 @@ revision = '020'
down_revision = '019'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('cluster_provision_steps', 'completed_at')
op.drop_column('cluster_provision_steps', 'completed')
op.drop_column('cluster_provision_steps', 'started_at')
def downgrade():
op.add_column('cluster_provision_steps',
sa.Column('completed', sa.Integer(), nullable=True))
op.add_column('cluster_provision_steps',
sa.Column('started_at', sa.DateTime(), nullable=True))
op.add_column('cluster_provision_steps',
sa.Column('completed_at', sa.DateTime(), nullable=True))

View File

@ -70,7 +70,7 @@ def add_command_parsers(subparsers):
parser = subparsers.add_parser('check_migration')
parser.set_defaults(func=do_check_migration)
for name in ['upgrade', 'downgrade']:
for name in ['upgrade']:
parser = subparsers.add_parser(name)
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')

View File

@ -65,19 +65,7 @@ class TestCli(testtools.TestCase):
dict(argv=['prog', 'upgrade', '--delta', '3'],
func_name='upgrade',
exp_args=('+3',),
exp_kwargs={'sql': False})),
('downgrade-sql',
dict(argv=['prog', 'downgrade', '--sql', 'folsom'],
func_name='downgrade',
exp_args=('folsom',),
exp_kwargs={'sql': True})),
('downgrade-delta',
dict(argv=['prog', 'downgrade', '--delta', '2'],
func_name='downgrade',
exp_args=('-2',),
exp_kwargs={'sql': False})),
exp_kwargs={'sql': False}))
]
def setUp(self):

View File

@ -39,9 +39,6 @@ from sahara.tests.unit.db.migration import test_migrations_base as base
class SaharaMigrationsCheckers(object):
snake_walk = True
downgrade = True
def assertColumnExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertIn(column, t.c)
@ -76,7 +73,7 @@ class SaharaMigrationsCheckers(object):
self.assertEqual(sorted(members), sorted(index_columns))
def test_walk_versions(self):
self.walk_versions(self.engine, self.snake_walk, self.downgrade)
self.walk_versions(self.engine)
def _pre_upgrade_001(self, engine):
# Anything returned from this method will be

View File

@ -78,54 +78,33 @@ class BaseWalkMigrationTestCase(object):
sa.cleanup()
return res
def _up_and_down_versions(self):
"""Stores a tuple of versions.
def _get_versions(self):
"""Stores a list of versions.
Since alembic version has a random algorithm of generation
(SA-migrate has an ordered autoincrement naming) we should store
a tuple of versions (version for upgrade and version for downgrade)
for successful testing of migrations in up>down>up mode.
a list of versions (version for upgrade)
for successful testing of migrations in up mode.
"""
env = alembic_script.ScriptDirectory.from_config(self.ALEMBIC_CONFIG)
versions = []
for rev in env.walk_revisions():
versions.append((rev.revision, rev.down_revision or '-1'))
versions.append(rev.revision)
versions.reverse()
return versions
def walk_versions(self, engine=None, snake_walk=False, downgrade=True):
def walk_versions(self, engine=None):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
self._configure(engine)
up_and_down_versions = self._up_and_down_versions()
for ver_up, ver_down in up_and_down_versions:
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, ver_up, with_data=True)
if snake_walk:
downgraded = self._migrate_down(engine,
ver_down,
with_data=True,
next_version=ver_up)
if downgraded:
self._migrate_up(engine, ver_up)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
up_and_down_versions.reverse()
for ver_up, ver_down in up_and_down_versions:
# downgrade -> upgrade -> downgrade
downgraded = self._migrate_down(engine,
ver_down, next_version=ver_up)
if snake_walk and downgraded:
self._migrate_up(engine, ver_up)
self._migrate_down(engine, ver_down, next_version=ver_up)
versions = self._get_versions()
for ver in versions:
self._migrate_up(engine, ver, with_data=True)
def _get_version_from_db(self, engine):
"""Returns latest version from db for each type of migrate repo."""
@ -146,27 +125,6 @@ class BaseWalkMigrationTestCase(object):
self._alembic_command(cmd, engine, self.ALEMBIC_CONFIG, version)
def _migrate_down(self, engine, version, with_data=False,
next_version=None):
try:
self._migrate(engine, version, 'downgrade')
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(version, self._get_version_from_db(engine))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%s" % next_version, None)
if post_downgrade:
post_downgrade(engine)
return True
def _migrate_up(self, engine, version, with_data=False):
"""migrate up to a new version of the db.