db: update migrations to use SQLA 2.0 patterns

All tests in test_migration.py pass with either SQLAlchemy 1.4
or SQLAlchemy 2.0 installed.

Change-Id: Ie8831c04b7b4515deea27b1ceb472d07cda91ca0
This commit is contained in:
Mike Bayer 2024-03-28 18:02:00 -04:00
parent cae5ac7be1
commit 0ce2857d0f
20 changed files with 1115 additions and 1069 deletions

View File

@ -26,15 +26,12 @@ from alembic import op
from manila.db.migrations import utils
from oslo_log import log
from oslo_utils import timeutils
from sqlalchemy import MetaData
LOG = log.getLogger(__name__)
def upgrade():
meta = MetaData()
meta.bind = op.get_bind()
connection = op.get_bind().connect()
connection = op.get_bind()
quota_classes_table = utils.load_table('quota_classes', connection)
try:

View File

@ -35,7 +35,6 @@ LOG = log.getLogger(__name__)
def upgrade():
migrate_engine = op.get_bind().engine
meta = MetaData()
meta.bind = migrate_engine
services = Table(
'services', meta,
@ -76,16 +75,10 @@ def upgrade():
Column('deleted', Integer, default=0),
Column('id', Integer(), primary_key=True),
Column('class_name',
String(length=255,
convert_unicode=True,
unicode_error=None,
_warn_on_bytestring=False),
String(length=255),
index=True),
Column('resource',
String(length=255,
convert_unicode=True,
unicode_error=None,
_warn_on_bytestring=False)),
String(length=255)),
Column('hard_limit', Integer(), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
@ -100,14 +93,10 @@ def upgrade():
Column('id', Integer(), primary_key=True),
Column('user_id', String(length=255)),
Column('project_id',
String(length=255, convert_unicode=True,
unicode_error=None,
_warn_on_bytestring=False),
String(length=255),
index=True),
Column('resource',
String(length=255, convert_unicode=True,
unicode_error=None,
_warn_on_bytestring=False)),
String(length=255)),
Column('in_use', Integer(), nullable=False),
Column('reserved', Integer(), nullable=False),
Column('until_refresh', Integer(), nullable=True),
@ -124,21 +113,14 @@ def upgrade():
Column('id', Integer(), primary_key=True),
Column('user_id', String(length=255)),
Column('uuid',
String(length=36,
convert_unicode=True,
unicode_error=None,
_warn_on_bytestring=False),
String(length=36),
nullable=False),
Column('usage_id', Integer(), nullable=False),
Column('project_id',
String(length=255, convert_unicode=True,
unicode_error=None,
_warn_on_bytestring=False),
String(length=255),
index=True),
Column('resource',
String(length=255, convert_unicode=True,
unicode_error=None,
_warn_on_bytestring=False)),
String(length=255)),
Column('delta', Integer(), nullable=False),
Column('expire', DateTime(timezone=False)),
ForeignKeyConstraint(['usage_id'], ['quota_usages.id']),
@ -387,10 +369,11 @@ def upgrade():
share_snapshots, share_server_backend_details,
share_metadata, volume_types, volume_type_extra_specs]
for table in tables:
if not table.exists():
with migrate_engine.begin() as conn:
for table in tables:
try:
table.create()
table.create(conn, checkfirst=True)
except Exception:
LOG.info(repr(table))
LOG.exception('Exception while creating table.')

View File

@ -32,7 +32,6 @@ LOG = log.getLogger(__name__)
def upgrade():
meta = MetaData()
meta.bind = op.get_bind()
# New table
messages = Table(
@ -55,7 +54,7 @@ def upgrade():
mysql_charset='utf8'
)
messages.create()
messages.create(op.get_bind())
def downgrade():

View File

@ -70,24 +70,25 @@ def upgrade():
for instance in connection.execute(instances_query):
access_mappings_query = instance_access_table.select().where(
instance_access_table.c.share_instance_id == instance['id']
instance_access_table.c.share_instance_id ==
instance._mapping['id']
).where(instance_access_table.c.deleted == 'False')
status = constants.STATUS_ACTIVE
for access_rule in connection.execute(access_mappings_query):
if (access_rule['state'] == constants.STATUS_DELETING or
access_rule['state'] not in priorities):
if (access_rule._mapping['state'] == constants.STATUS_DELETING or
access_rule._mapping['state'] not in priorities):
continue
if priorities[access_rule['state']] > priorities[status]:
status = access_rule['state']
if priorities[access_rule._mapping['state']] > priorities[status]:
status = access_rule._mapping['state']
# pylint: disable=no-value-for-parameter
op.execute(
share_instances_table.update().where(
share_instances_table.c.id == instance['id']
share_instances_table.c.id == instance._mapping['id']
).values({'access_rules_status': upgrade_data_mapping[status]})
)
@ -115,7 +116,7 @@ def downgrade():
# NOTE(u_glide): We cannot determine if a rule is applied or not in
# Manila, so administrator should manually handle such access rules.
if instance['access_rules_status'] == 'active':
if instance._mapping['access_rules_status'] == 'active':
state = 'active'
else:
state = 'error'
@ -123,7 +124,8 @@ def downgrade():
# pylint: disable=no-value-for-parameter
op.execute(
instance_access_table.update().where(
instance_access_table.c.share_instance_id == instance['id']
instance_access_table.c.share_instance_id ==
instance._mapping['id']
).where(instance_access_table.c.deleted == 'False').values(
{'state': state}
)

View File

@ -47,83 +47,86 @@ def upgrade():
Add 'create_share_from_snapshot_support' extra spec to all share types and
attribute 'create_share_from_snapshot_support' to Share model.
"""
session = sa.orm.Session(bind=op.get_bind().connect())
with sa.orm.Session(bind=op.get_bind()) as session:
extra_specs_table = table(
'share_type_extra_specs',
sa.Column('created_at', sa.DateTime),
sa.Column('deleted', sa.Integer),
sa.Column('share_type_id', sa.String(length=36)),
sa.Column('spec_key', sa.String(length=255)),
sa.Column('spec_value', sa.String(length=255)))
extra_specs_table = table(
'share_type_extra_specs',
sa.Column('created_at', sa.DateTime),
sa.Column('deleted', sa.Integer),
sa.Column('share_type_id', sa.String(length=36)),
sa.Column('spec_key', sa.String(length=255)),
sa.Column('spec_value', sa.String(length=255)))
share_type_table = table(
'share_types',
sa.Column('deleted', sa.Integer),
sa.Column('id', sa.Integer))
share_type_table = table(
'share_types',
sa.Column('deleted', sa.Integer),
sa.Column('id', sa.Integer))
# Get list of share type IDs that don't already have the new required
# create_share_from_snapshot_support extra spec defined.
existing_extra_specs = session.query(
extra_specs_table).filter(
extra_specs_table.c.spec_key ==
constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT).filter(
extra_specs_table.c.deleted == 0).all()
excluded_st_ids = [es.share_type_id for es in existing_extra_specs]
# Get share types for the IDs we got in the previous query
share_types = session.query(share_type_table).filter(
share_type_table.c.deleted.in_(('0', 'False', ))).filter(
share_type_table.c.id.notin_(excluded_st_ids)).all()
extra_specs = []
now = timeutils.utcnow()
for share_type in share_types:
# Get the value of snapshot_support for each extant share type
snapshot_support_extra_spec = session.query(
# Get list of share type IDs that don't already have the new required
# create_share_from_snapshot_support extra spec defined.
existing_extra_specs = session.query(
extra_specs_table).filter(
extra_specs_table.c.spec_key ==
constants.ExtraSpecs.SNAPSHOT_SUPPORT).filter(
extra_specs_table.c.share_type_id == share_type.id).first()
constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT).filter(
extra_specs_table.c.deleted == 0).all()
excluded_st_ids = [es.share_type_id for es in existing_extra_specs]
spec_value = (snapshot_support_extra_spec.spec_value if
snapshot_support_extra_spec else 'False')
# Get share types for the IDs we got in the previous query
share_types = session.query(share_type_table).filter(
share_type_table.c.deleted.in_(('0', 'False', ))).filter(
share_type_table.c.id.notin_(excluded_st_ids)).all()
# Copy the snapshot_support value to create_share_from_snapshot_support
extra_specs.append({
'spec_key':
constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT,
'spec_value': spec_value,
'deleted': 0,
'created_at': now,
'share_type_id': share_type.id,
})
if extra_specs:
op.bulk_insert(extra_specs_table, extra_specs)
extra_specs = []
now = timeutils.utcnow()
for share_type in share_types:
# Add create_share_from_snapshot_support attribute to shares table
op.add_column('shares',
sa.Column('create_share_from_snapshot_support',
sa.Boolean, default=True))
# Get the value of snapshot_support for each extant share type
snapshot_support_extra_spec = session.query(
extra_specs_table).filter(
extra_specs_table.c.spec_key ==
constants.ExtraSpecs.SNAPSHOT_SUPPORT).filter(
extra_specs_table.c.share_type_id == share_type.id).first()
# Copy snapshot_support to create_share_from_snapshot_support on each share
shares_table = sa.Table(
'shares',
sa.MetaData(),
sa.Column('id', sa.String(length=36)),
sa.Column('deleted', sa.String(length=36)),
sa.Column('snapshot_support', sa.Boolean),
sa.Column('create_share_from_snapshot_support', sa.Boolean),
)
# pylint: disable=no-value-for-parameter
update = shares_table.update().where(
shares_table.c.deleted == 'False').values(
create_share_from_snapshot_support=shares_table.c.snapshot_support)
session.execute(update)
session.commit()
spec_value = (snapshot_support_extra_spec.spec_value if
snapshot_support_extra_spec else 'False')
session.close_all()
# Copy the snapshot_support value to
# create_share_from_snapshot_support
extra_specs.append({
'spec_key':
constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT,
'spec_value': spec_value,
'deleted': 0,
'created_at': now,
'share_type_id': share_type.id,
})
if extra_specs:
op.bulk_insert(extra_specs_table, extra_specs)
# Add create_share_from_snapshot_support attribute to shares table
op.add_column(
'shares',
sa.Column(
'create_share_from_snapshot_support', sa.Boolean, default=True
)
)
# Copy snapshot_support to create_share_from_snapshot_support on each
# share
shares_table = sa.Table(
'shares',
sa.MetaData(),
sa.Column('id', sa.String(length=36)),
sa.Column('deleted', sa.String(length=36)),
sa.Column('snapshot_support', sa.Boolean),
sa.Column('create_share_from_snapshot_support', sa.Boolean),
)
# pylint: disable=no-value-for-parameter
update = shares_table.update().where(
shares_table.c.deleted == 'False').values(
create_share_from_snapshot_support=shares_table.c.snapshot_support)
session.execute(update)
session.commit()
def downgrade():
@ -132,12 +135,12 @@ def downgrade():
Remove 'create_share_from_snapshot_support' extra spec from all share types
and attribute 'create_share_from_snapshot_support' from Share model.
"""
connection = op.get_bind().connect()
connection = op.get_bind()
deleted_at = timeutils.utcnow()
extra_specs = sa.Table(
'share_type_extra_specs',
sa.MetaData(),
autoload=True,
autoload_with=connection)
# pylint: disable=no-value-for-parameter

View File

@ -48,11 +48,11 @@ def upgrade():
for instance in connection.execute(share_instances_table.select()):
share = connection.execute(shares_table.select().where(
instance['share_id'] == shares_table.c.id)).first()
instance._mapping['share_id'] == shares_table.c.id)).first()
# pylint: disable=no-value-for-parameter
op.execute(share_instances_table.update().where(
share_instances_table.c.id == instance['id']).values(
{'share_type_id': share['share_type_id']}))
share_instances_table.c.id == instance._mapping['id']).values(
{'share_type_id': share._mapping['share_type_id']}))
op.drop_column('shares', 'share_type_id')
@ -75,11 +75,11 @@ def downgrade():
for share in connection.execute(shares_table.select()):
instance = connection.execute(share_instances_table.select().where(
share['id'] == share_instances_table.c.share_id)).first()
share._mapping['id'] == share_instances_table.c.share_id)).first()
# pylint: disable=no-value-for-parameter
op.execute(shares_table.update().where(
shares_table.c.id == instance['share_id']).values(
{'share_type_id': instance['share_type_id']}))
shares_table.c.id == instance._mapping['share_id']).values(
{'share_type_id': instance._mapping['share_type_id']}))
op.drop_constraint('si_st_id_fk', 'share_instances', type_='foreignkey')
op.drop_column('share_instances', 'share_type_id')

View File

@ -125,14 +125,15 @@ def remove_share_instances_table(connection):
shares_table.c.id == share.id
).values(
{
'host': instance['host'],
'status': instance['status'],
'scheduled_at': instance['scheduled_at'],
'launched_at': instance['launched_at'],
'terminated_at': instance['terminated_at'],
'share_network_id': instance['share_network_id'],
'share_server_id': instance['share_server_id'],
'availability_zone': instance['availability_zone'],
'host': instance._mapping['host'],
'status': instance._mapping['status'],
'scheduled_at': instance._mapping['scheduled_at'],
'launched_at': instance._mapping['launched_at'],
'terminated_at': instance._mapping['terminated_at'],
'share_network_id': instance._mapping['share_network_id'],
'share_server_id': instance._mapping['share_server_id'],
'availability_zone':
instance._mapping['availability_zone'],
}
)
)
@ -210,8 +211,8 @@ def remove_snapshot_instances_table(connection):
snapshots_table.c.id == snapshot.id
).values(
{
'status': snapshot_instance['status'],
'progress': snapshot_instance['progress'],
'status': snapshot_instance._mapping['status'],
'progress': snapshot_instance._mapping['progress'],
}
)
)

View File

@ -66,11 +66,12 @@ def upgrade():
)
for instance in connection.execute(instances_query):
access_rule_status = instance['access_rules_status']
access_rule_status = instance._mapping['access_rules_status']
# pylint: disable=no-value-for-parameter
op.execute(
instance_access_map_table.update().where(
instance_access_map_table.c.share_instance_id == instance['id']
instance_access_map_table.c.share_instance_id ==
instance._mapping['id']
).values({
'state': access_rules_status_to_state_mapping[
access_rule_status],
@ -78,7 +79,7 @@ def upgrade():
)
op.execute(
share_instances_table.update().where(
share_instances_table.c.id == instance['id']
share_instances_table.c.id == instance._mapping['id']
).values({
'access_rules_status': access_rules_status_upgrade_mapping[
access_rule_status],

View File

@ -39,35 +39,35 @@ def upgrade():
Add 'snapshot_support' extra spec to all share types and
attr 'snapshot_support' to Share model.
"""
session = sa.orm.Session(bind=op.get_bind().connect())
es_table = table(
'share_type_extra_specs',
sa.Column('created_at', sa.DateTime),
sa.Column('deleted', sa.Integer),
sa.Column('share_type_id', sa.String(length=36)),
sa.Column('spec_key', sa.String(length=255)),
sa.Column('spec_value', sa.String(length=255)))
with sa.orm.Session(bind=op.get_bind()) as session:
st_table = table(
'share_types',
sa.Column('deleted', sa.Integer),
sa.Column('id', sa.Integer))
es_table = table(
'share_type_extra_specs',
sa.Column('created_at', sa.DateTime),
sa.Column('deleted', sa.Integer),
sa.Column('share_type_id', sa.String(length=36)),
sa.Column('spec_key', sa.String(length=255)),
sa.Column('spec_value', sa.String(length=255)))
# NOTE(vponomaryov): field 'deleted' is integer here.
existing_extra_specs = (session.query(es_table).
filter(es_table.c.spec_key ==
constants.ExtraSpecs.SNAPSHOT_SUPPORT).
filter(es_table.c.deleted == 0).
all())
exclude_st_ids = [es.share_type_id for es in existing_extra_specs]
st_table = table(
'share_types',
sa.Column('deleted', sa.Integer),
sa.Column('id', sa.Integer))
# NOTE(vponomaryov): field 'deleted' is string here.
share_types = (session.query(st_table).
filter(st_table.c.deleted.in_(('0', 'False', ))).
filter(st_table.c.id.notin_(exclude_st_ids)).
all())
session.close_all()
# NOTE(vponomaryov): field 'deleted' is integer here.
existing_extra_specs = (session.query(es_table).
filter(es_table.c.spec_key ==
constants.ExtraSpecs.SNAPSHOT_SUPPORT).
filter(es_table.c.deleted == 0).
all())
exclude_st_ids = [es.share_type_id for es in existing_extra_specs]
# NOTE(vponomaryov): field 'deleted' is string here.
share_types = (session.query(st_table).
filter(st_table.c.deleted.in_(('0', 'False', ))).
filter(st_table.c.id.notin_(exclude_st_ids)).
all())
extra_specs = []
now = timeutils.utcnow()
@ -88,16 +88,16 @@ def upgrade():
op.add_column('shares',
sa.Column('snapshot_support', sa.Boolean, default=True))
connection = op.get_bind().connect()
connection = op.get_bind()
shares = sa.Table(
'shares',
sa.MetaData(),
autoload=True,
autoload_with=connection)
# pylint: disable=no-value-for-parameter
update = shares.update().where(shares.c.deleted == 'False').values(
snapshot_support=True)
update = shares.update().where(
shares.c.deleted == 'False'
).values(snapshot_support=True)
connection.execute(update)
@ -107,11 +107,10 @@ def downgrade():
Remove 'snapshot_support' extra spec from all share types and
attr 'snapshot_support' from Share model.
"""
connection = op.get_bind().connect()
connection = op.get_bind()
extra_specs = sa.Table(
'share_type_extra_specs',
sa.MetaData(),
autoload=True,
autoload_with=connection)
# pylint: disable=no-value-for-parameter

View File

@ -55,21 +55,22 @@ def upgrade():
sa.Column('updated_at', sa.DateTime))
export_locations = []
session = sa.orm.Session(bind=op.get_bind().connect())
for share in session.query(shares_table).all():
deleted = share.deleted if isinstance(share.deleted, int) else 0
export_locations.append({
'created_at': share.created_at,
'updated_at': share.updated_at,
'deleted_at': share.deleted_at,
'deleted': deleted,
'share_id': share.id,
'path': share.export_location,
})
op.bulk_insert(export_locations_table, export_locations)
op.drop_column('shares', 'export_location')
session.close_all()
with sa.orm.Session(bind=op.get_bind()) as session:
for share in session.query(shares_table).all():
deleted = share.deleted if isinstance(share.deleted, int) else 0
export_locations.append({
'created_at': share.created_at,
'updated_at': share.updated_at,
'deleted_at': share.deleted_at,
'deleted': deleted,
'share_id': share.id,
'path': share.export_location,
})
op.bulk_insert(export_locations_table, export_locations)
op.drop_column('shares', 'export_location')
def downgrade():
@ -90,23 +91,25 @@ def downgrade():
sa.Column('deleted', sa.Integer))
connection = op.get_bind()
session = sa.orm.Session(bind=connection.connect())
export_locations = session.query(
func.min(export_locations_table.c.updated_at),
export_locations_table.c.share_id,
export_locations_table.c.path).filter(
export_locations_table.c.deleted == 0).group_by(
export_locations_table.c.share_id,
export_locations_table.c.path).all()
shares = sa.Table('shares', sa.MetaData(),
autoload=True, autoload_with=connection)
with sa.orm.Session(bind=connection) as session:
export_locations = session.query(
func.min(export_locations_table.c.updated_at),
export_locations_table.c.share_id,
export_locations_table.c.path).filter(
export_locations_table.c.deleted == 0).group_by(
export_locations_table.c.share_id,
export_locations_table.c.path).all()
for location in export_locations:
# pylint: disable=no-value-for-parameter
update = (shares.update().where(shares.c.id == location.share_id).
values(export_location=location.path))
connection.execute(update)
shares = sa.Table(
'shares', sa.MetaData(),
autoload_with=connection)
op.drop_table('share_export_locations')
session.close_all()
for location in export_locations:
# pylint: disable=no-value-for-parameter
update = (
shares.update().where(shares.c.id == location.share_id).
values(export_location=location.path))
connection.execute(update)
op.drop_table('share_export_locations')

View File

@ -96,14 +96,15 @@ def downgrade():
for access_rule in share_access_rules:
access_mapping = connection.execute(
instance_access_table.select().where(
instance_access_table.c.access_id == access_rule['id'])
instance_access_table.c.access_id ==
access_rule._mapping['id'])
).first()
# pylint: disable=no-value-for-parameter
op.execute(
access_table.update().where(
access_table.c.id == access_rule['id']
).values({'state': access_mapping['state']})
access_table.c.id == access_rule._mapping['id']
).values({'state': access_mapping._mapping['state']})
)
op.drop_table('share_instance_access_map')

View File

@ -31,47 +31,50 @@ from sqlalchemy.sql import table
def upgrade():
session = sa.orm.Session(bind=op.get_bind().connect())
with sa.orm.Session(bind=op.get_bind()) as session:
es_table = table(
'share_type_extra_specs',
sa.Column('created_at', sa.DateTime),
sa.Column('deleted', sa.Integer),
sa.Column('share_type_id', sa.String(length=36)),
sa.Column('spec_key', sa.String(length=255)),
sa.Column('spec_value', sa.String(length=255)))
es_table = table(
'share_type_extra_specs',
sa.Column('created_at', sa.DateTime),
sa.Column('deleted', sa.Integer),
sa.Column('share_type_id', sa.String(length=36)),
sa.Column('spec_key', sa.String(length=255)),
sa.Column('spec_value', sa.String(length=255)))
st_table = table(
'share_types',
sa.Column('deleted', sa.Integer),
sa.Column('id', sa.Integer))
st_table = table(
'share_types',
sa.Column('deleted', sa.Integer),
sa.Column('id', sa.Integer))
# NOTE(vponomaryov): field 'deleted' is integer here.
existing_required_extra_specs = (session.query(es_table).
filter(es_table.c.spec_key ==
'driver_handles_share_servers').
filter(es_table.c.deleted == 0).
all())
exclude_st_ids = [es.share_type_id for es in existing_required_extra_specs]
# NOTE(vponomaryov): field 'deleted' is integer here.
existing_required_extra_specs = (
session.query(es_table).filter(
es_table.c.spec_key ==
'driver_handles_share_servers'
).filter(es_table.c.deleted == 0).all()
)
exclude_st_ids = [
es.share_type_id for es in existing_required_extra_specs]
# NOTE(vponomaryov): field 'deleted' is string here.
share_types = (session.query(st_table).
filter(st_table.c.deleted.in_(('0', 'False', ))).
filter(st_table.c.id.notin_(exclude_st_ids)).
all())
# NOTE(vponomaryov): field 'deleted' is string here.
share_types = (
session.query(st_table).
filter(st_table.c.deleted.in_(('0', 'False', ))).
filter(st_table.c.id.notin_(exclude_st_ids)).
all()
)
extra_specs = []
for st in share_types:
extra_specs.append({
'spec_key': 'driver_handles_share_servers',
'spec_value': 'True',
'deleted': 0,
'created_at': timeutils.utcnow(),
'share_type_id': st.id,
})
extra_specs = []
for st in share_types:
extra_specs.append({
'spec_key': 'driver_handles_share_servers',
'spec_value': 'True',
'deleted': 0,
'created_at': timeutils.utcnow(),
'share_type_id': st.id,
})
op.bulk_insert(es_table, extra_specs)
session.close_all()
op.bulk_insert(es_table, extra_specs)
def downgrade():

View File

@ -33,29 +33,27 @@ def upgrade():
Add attribute 'revert_to_snapshot_support' to Share model.
"""
session = sa.orm.Session(bind=op.get_bind().connect())
with sa.orm.Session(bind=op.get_bind()) as session:
# Add create_share_from_snapshot_support attribute to shares table
op.add_column(
'shares',
sa.Column('revert_to_snapshot_support', sa.Boolean, default=False))
# Add create_share_from_snapshot_support attribute to shares table
op.add_column(
'shares',
sa.Column('revert_to_snapshot_support', sa.Boolean, default=False))
# Set revert_to_snapshot_support on each share
shares_table = sa.Table(
'shares',
sa.MetaData(),
sa.Column('id', sa.String(length=36)),
sa.Column('deleted', sa.String(length=36)),
sa.Column('revert_to_snapshot_support', sa.Boolean),
)
# pylint: disable=no-value-for-parameter
update = shares_table.update().where(
shares_table.c.deleted == 'False').values(
revert_to_snapshot_support=False)
session.execute(update)
session.commit()
session.close_all()
# Set revert_to_snapshot_support on each share
shares_table = sa.Table(
'shares',
sa.MetaData(),
sa.Column('id', sa.String(length=36)),
sa.Column('deleted', sa.String(length=36)),
sa.Column('revert_to_snapshot_support', sa.Boolean),
)
# pylint: disable=no-value-for-parameter
update = shares_table.update().where(
shares_table.c.deleted == 'False').values(
revert_to_snapshot_support=False)
session.execute(update)
session.commit()
def downgrade():

View File

@ -111,18 +111,19 @@ def upgrade():
connection)
for alloc in connection.execute(network_allocation_table.select()):
# admin allocations should not contain subnet id.
if alloc['label'] == 'admin':
if alloc._mapping['label'] == 'admin':
continue
server = connection.execute(
share_servers_table.select().where(
alloc['share_server_id'] == (
alloc._mapping['share_server_id'] == (
share_servers_table.c.id))).first()
# pylint: disable=no-value-for-parameter
op.execute(network_allocation_table.update().where(
alloc['id'] == network_allocation_table.c.id).values(
{'share_network_subnet_id': server['share_network_subnet_id']}))
alloc._mapping['id'] == network_allocation_table.c.id).values(
{'share_network_subnet_id':
server._mapping['share_network_subnet_id']}))
# add a new column to share_servers.
try:
@ -173,27 +174,30 @@ def downgrade():
SHARE_SERVER_SUBNET_MAP_TABLE, connection)
share_servers_table = utils.load_table(SHARE_SERVERS_TABLE,
connection)
session = sa.orm.Session(bind=connection.connect())
for server in connection.execute(share_servers_table.select()):
subnets = session.query(
server_subnet_mappings_table).filter(
server['id'] == (
server_subnet_mappings_table.c.share_server_id)).all()
if server['deleted'] != 'False' and len(subnets) > 1:
LOG.warning('Share server %s is not deleted and it '
'has more than one subnet (%s subnets), '
'the downgrade may cause an inconsistent '
'environment.', server['id'], len(subnets))
with sa.orm.Session(bind=op.get_bind()) as session:
for server in connection.execute(share_servers_table.select()):
subnets = session.query(
server_subnet_mappings_table).filter(
server._mapping['id'] == (
server_subnet_mappings_table.c.share_server_id)
).all()
subnet_id = subnets[0].share_network_subnet_id if subnets else None
if server._mapping['deleted'] != 'False' and len(subnets) > 1:
LOG.warning('Share server %s is not deleted and it '
'has more than one subnet (%s subnets), '
'the downgrade may cause an inconsistent '
'environment.',
server._mapping['id'], len(subnets))
# pylint: disable=no-value-for-parameter
op.execute(share_servers_table.update().where(
server['id'] == share_servers_table.c.id).values(
{'share_network_subnet_id': subnet_id}))
subnet_id = (
subnets[0].share_network_subnet_id if subnets else None
)
session.close_all()
# pylint: disable=no-value-for-parameter
op.execute(share_servers_table.update().where(
server._mapping['id'] == share_servers_table.c.id).values(
{'share_network_subnet_id': subnet_id}))
except Exception:
LOG.error("'share_network_subnet_id' field in the %s table could not "

View File

@ -24,6 +24,7 @@ down_revision = '5237b6625330'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import LABEL_STYLE_TABLENAME_PLUS_COL
from manila.db.migrations import utils
@ -113,7 +114,7 @@ def downgrade():
ssi_table.join(
share_instances_table,
share_instances_table.c.id == ssi_table.c.share_instance_id
).select(use_labels=True).where(
).select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).where(
ssi_table.c.share_group_snapshot_id.isnot(None),
)):
ported_data.append({

View File

@ -40,7 +40,6 @@ LOG = log.getLogger(__name__)
def upgrade():
try:
meta = sa.MetaData()
meta.bind = op.get_bind()
# Add new 'is_admin_only' column in export locations table that will be
# used for hiding admin export locations from common users in API.
@ -62,14 +61,14 @@ def upgrade():
sa.Column('uuid', sa.String(36)),
sa.Column('is_admin_only', sa.Boolean),
)
for record in el_table.select().execute():
for record in op.get_bind().execute(el_table.select()):
# pylint: disable=no-value-for-parameter
el_table.update().values(
op.get_bind().execute(el_table.update().values(
is_admin_only=False,
uuid=uuidutils.generate_uuid(),
).where(
el_table.c.id == record.id,
).execute()
))
# Make new 'uuid' column in export locations table not nullable.
op.alter_column(

View File

@ -40,7 +40,7 @@ def upgrade():
share_types = sql.Table('share_types', meta, is_public.copy())
# pylint: disable=no-value-for-parameter
share_types.update().values(is_public=True).execute()
op.execute(share_types.update().values(is_public=True))
except Exception:
LOG.error("Column |%s| not created!", repr(is_public))
raise

View File

@ -17,5 +17,4 @@ import sqlalchemy as sa
def load_table(name, connection):
return sa.Table(name, sa.MetaData(), autoload=True,
autoload_with=connection)
return sa.Table(name, sa.MetaData(), autoload_with=connection)

File diff suppressed because it is too large Load Diff

View File

@ -109,7 +109,8 @@ class ManilaMigrationsCheckers(migrations_data_checks.DbMigrationsData):
post_downgrade = getattr(
self, "_post_downgrade_%s" % version.revision, None)
if post_downgrade:
post_downgrade(self.engine)
with self.engine.begin() as conn:
post_downgrade(conn)
return True
@ -128,14 +129,17 @@ class ManilaMigrationsCheckers(migrations_data_checks.DbMigrationsData):
pre_upgrade = getattr(
self, "_pre_upgrade_%s" % version, None)
if pre_upgrade:
data = pre_upgrade(self.engine)
with self.engine.begin() as conn:
data = pre_upgrade(conn)
self.migration_api.upgrade(version)
self.assertEqual(version, self.migration_api.version())
if with_data:
check = getattr(self, "_check_%s" % version, None)
if check:
check(self.engine, data)
with self.engine.begin() as conn:
check(conn, data)
except Exception as e:
LOG.error("Failed to migrate to version %(version)s on engine "
"%(engine)s. Exception while running the migration: "
@ -187,28 +191,30 @@ class TestManilaMigrationsMySQL(
return_value=self.engine):
self._walk_versions(snake_walk=False, downgrade=False)
# sanity check
sanity_check = """SELECT count(*)
FROM information_schema.tables
WHERE table_schema = :database;"""
total = self.engine.execute(
text(sanity_check),
database=self.engine.url.database)
with self.engine.begin() as conn:
# sanity check
sanity_check = """SELECT count(*)
FROM information_schema.tables
WHERE table_schema = :database;"""
total = conn.execute(
text(sanity_check),
{"database": self.engine.url.database})
self.assertGreater(total.scalar(), 0, "No tables found. Wrong schema?")
self.assertGreater(
total.scalar(), 0, "No tables found. Wrong schema?")
noninnodb_query = """
SELECT count(*)
FROM information_schema.TABLES
WHERE table_schema = :database
AND engine != 'InnoDB'
AND table_name != 'alembic_version';"""
noninnodb_query = """
SELECT count(*)
FROM information_schema.TABLES
WHERE table_schema = :database
AND engine != 'InnoDB'
AND table_name != 'alembic_version';"""
count = self.engine.execute(
text(noninnodb_query),
database=self.engine.url.database
).scalar()
self.assertEqual(0, count, "%d non InnoDB tables created" % count)
count = conn.execute(
text(noninnodb_query),
{"database": self.engine.url.database}
).scalar()
self.assertEqual(0, count, "%d non InnoDB tables created" % count)
class TestManilaMigrationsPostgreSQL(