Remove DB downgrade

As downgrade are not supported after Kilo, we should remove them now.
Roll backs can be performed as mentioned in the below link:
http://docs.openstack.org/ops-guide/ops-upgrades.html#rolling-back-a-failed-upgrade

The DB downgrades were deprecated in Glance Mitaka release by commit
e3366afdfb.

Change-Id: I937d15d93f16a3e44a50e6ff1a469098eab67c79
Implements: blueprint remove-db-downgrade
This commit is contained in:
wangxiyuan 2015-09-16 11:50:56 +08:00
parent 797a12361f
commit 2f803d3fa9
54 changed files with 51 additions and 1947 deletions

View File

@ -53,8 +53,9 @@ This will take an existing database and upgrade it to the specified VERSION.
Downgrading an Existing Database Downgrading an Existing Database
-------------------------------- --------------------------------
glance-manage db downgrade <VERSION> Upgrades involve complex operations and can fail. Before attempting any
upgrade, you should make a full database backup of your production data. As of
This will downgrade an existing database from the current version to the Kilo, database downgrades are not supported, and the only method available to
specified VERSION. get back to a prior database version is to restore from backup[1].
[1]: http://docs.openstack.org/ops-guide/ops-upgrades.html#perform-a-backup

View File

@ -50,10 +50,6 @@ COMMANDS
This will take an existing database and upgrade it to the This will take an existing database and upgrade it to the
specified VERSION. specified VERSION.
**db_downgrade <VERSION>**
This will take an existing database and downgrade it to the
specified VERSION.
**db_version_control** **db_version_control**
Place the database under migration control. Place the database under migration control.

View File

@ -84,17 +84,6 @@ class DbCommands(object):
db_migration.MIGRATE_REPO_PATH, db_migration.MIGRATE_REPO_PATH,
version) version)
@args('--version', metavar='<version>', help='Database version')
def downgrade(self, version=None):
"""Downgrade the database's migration level"""
print("Warning: DB downgrade is deprecated and will be removed in N "
"release. Users should make a full database backup of the "
"production data before attempting any upgrade.",
file=sys.stderr)
migration.db_sync(db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH,
version)
@args('--version', metavar='<version>', help='Database version') @args('--version', metavar='<version>', help='Database version')
def version_control(self, version=None): def version_control(self, version=None):
"""Place a database under migration control""" """Place a database under migration control"""
@ -107,7 +96,7 @@ class DbCommands(object):
help='Current Database version') help='Current Database version')
def sync(self, version=None, current_version=None): def sync(self, version=None, current_version=None):
""" """
Place a database under migration control and upgrade/downgrade it, Place a database under migration control and upgrade it,
creating first if necessary. creating first if necessary.
""" """
if current_version not in (None, 'None'): if current_version not in (None, 'None'):
@ -193,13 +182,6 @@ class DbLegacyCommands(object):
def upgrade(self, version=None): def upgrade(self, version=None):
self.command_object.upgrade(CONF.command.version) self.command_object.upgrade(CONF.command.version)
def downgrade(self, version=None):
print("Warning: DB downgrade is deprecated and will be removed in N "
"release. Users should make a full database backup of the "
"production data before attempting any upgrade.",
file=sys.stderr)
self.command_object.downgrade(CONF.command.version)
def version_control(self, version=None): def version_control(self, version=None):
self.command_object.version_control(CONF.command.version) self.command_object.version_control(CONF.command.version)
@ -234,11 +216,6 @@ def add_legacy_command_parsers(command_object, subparsers):
parser.add_argument('version', nargs='?') parser.add_argument('version', nargs='?')
parser.set_defaults(action='db_upgrade') parser.set_defaults(action='db_upgrade')
parser = subparsers.add_parser('db_downgrade')
parser.set_defaults(action_fn=legacy_command_object.downgrade)
parser.add_argument('version')
parser.set_defaults(action='db_downgrade')
parser = subparsers.add_parser('db_version_control') parser = subparsers.add_parser('db_version_control')
parser.set_defaults(action_fn=legacy_command_object.version_control) parser.set_defaults(action_fn=legacy_command_object.version_control)
parser.add_argument('version', nargs='?') parser.add_argument('version', nargs='?')

View File

@ -16,7 +16,7 @@
from sqlalchemy.schema import (Column, MetaData, Table) from sqlalchemy.schema import (Column, MetaData, Table)
from glance.db.sqlalchemy.migrate_repo.schema import ( from glance.db.sqlalchemy.migrate_repo.schema import (
Boolean, DateTime, Integer, String, Text, create_tables, drop_tables) # noqa Boolean, DateTime, Integer, String, Text, create_tables) # noqa
def define_images_table(meta): def define_images_table(meta):
@ -53,10 +53,3 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine meta.bind = migrate_engine
tables = [define_images_table(meta)] tables = [define_images_table(meta)]
create_tables(tables) create_tables(tables)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_images_table(meta)]
drop_tables(tables)

View File

@ -17,7 +17,7 @@ from sqlalchemy.schema import (
Column, ForeignKey, Index, MetaData, Table, UniqueConstraint) Column, ForeignKey, Index, MetaData, Table, UniqueConstraint)
from glance.db.sqlalchemy.migrate_repo.schema import ( from glance.db.sqlalchemy.migrate_repo.schema import (
Boolean, DateTime, Integer, String, Text, create_tables, drop_tables, Boolean, DateTime, Integer, String, Text, create_tables,
from_migration_import) # noqa from_migration_import) # noqa
@ -76,10 +76,3 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine meta.bind = migrate_engine
tables = [define_image_properties_table(meta)] tables = [define_image_properties_table(meta)]
create_tables(tables) create_tables(tables)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_image_properties_table(meta)]
drop_tables(tables)

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from migrate.changeset import * # noqa
from sqlalchemy import * # noqa from sqlalchemy import * # noqa
from glance.db.sqlalchemy.migrate_repo.schema import ( from glance.db.sqlalchemy.migrate_repo.schema import (
@ -53,17 +52,6 @@ def get_images_table(meta):
return images return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 002...
"""
(define_image_properties_table,) = from_migration_import(
'002_add_image_properties_table', ['define_image_properties_table'])
image_properties = define_image_properties_table(meta)
return image_properties
def upgrade(migrate_engine): def upgrade(migrate_engine):
meta = MetaData() meta = MetaData()
meta.bind = migrate_engine meta.bind = migrate_engine
@ -119,35 +107,3 @@ def upgrade(migrate_engine):
container_format.create(images) container_format.create(images)
images.columns['type'].drop() images.columns['type'].drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Steps to take, in this order:
# 1) Add type column back to Image
# 2) Move the existing type properties from ImageProperty into
# Image.type
# 3) Drop the disk_format and container_format columns in Image
conn = migrate_engine.connect()
images = get_images_table(meta)
image_properties = get_image_properties_table(meta)
type_col = Column('type', String(30))
type_col.create(images)
sel = select([image_properties]).where(image_properties.c.key == 'type')
type_property_records = conn.execute(sel).fetchall()
for record in type_property_records:
upd = images.update().where(
images.c.id == record.image_id).values(type=record.value)
conn.execute(upd)
dlt = image_properties.delete().where(
image_properties.c.image_id == record.image_id)
conn.execute(dlt)
conn.close()
images.columns['disk_format'].drop()
images.columns['container_format'].drop()

View File

@ -1,54 +0,0 @@
-- Make changes to the base images table
CREATE TEMPORARY TABLE images_backup (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id)
);
INSERT INTO images_backup
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted
FROM images;
DROP TABLE images;
CREATE TABLE images (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
type VARCHAR(30),
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
CREATE INDEX ix_images_deleted ON images (deleted);
CREATE INDEX ix_images_is_public ON images (is_public);
INSERT INTO images (id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted)
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted
FROM images_backup;
DROP TABLE images_backup;
-- Re-insert the type values from the temp table
UPDATE images
SET type = (SELECT value FROM image_properties WHERE image_id = images.id AND key = 'type')
WHERE EXISTS (SELECT * FROM image_properties WHERE image_id = images.id AND key = 'type');
-- Remove the type properties from the image_properties table
DELETE FROM image_properties
WHERE key = 'type';

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from migrate.changeset import * # noqa
from sqlalchemy import * # noqa from sqlalchemy import * # noqa
from glance.db.sqlalchemy.migrate_repo.schema import ( from glance.db.sqlalchemy.migrate_repo.schema import (
@ -73,12 +72,3 @@ def upgrade(migrate_engine):
checksum = Column('checksum', String(32)) checksum = Column('checksum', String(32))
checksum.create(images) checksum.create(images)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
images.columns['checksum'].drop()

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from migrate.changeset import * # noqa
from sqlalchemy import * # noqa from sqlalchemy import * # noqa
from glance.db.sqlalchemy.migrate_repo.schema import ( from glance.db.sqlalchemy.migrate_repo.schema import (
@ -54,17 +53,6 @@ def get_images_table(meta):
return images return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 002...
"""
(define_image_properties_table,) = from_migration_import(
'002_add_image_properties_table', ['define_image_properties_table'])
image_properties = define_image_properties_table(meta)
return image_properties
def upgrade(migrate_engine): def upgrade(migrate_engine):
meta = MetaData() meta = MetaData()
meta.bind = migrate_engine meta.bind = migrate_engine
@ -84,21 +72,3 @@ def upgrade(migrate_engine):
images = get_images_table(meta) images = get_images_table(meta)
images.columns['size'].alter(type=BigInteger()) images.columns['size'].alter(type=BigInteger())
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# No changes to SQLite stores are necessary, since
# there is no BIG INTEGER type in SQLite. Unfortunately,
# running the Python 005_size_big_integer.py migration script
# on a SQLite datastore results in an error in the sa-migrate
# code that does the workarounds for SQLite not having
# ALTER TABLE MODIFY COLUMN ability
dialect = migrate_engine.url.get_dialect().name
if not dialect.startswith('sqlite'):
images = get_images_table(meta)
images.columns['size'].alter(type=Integer())

View File

@ -13,11 +13,9 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from migrate.changeset import * # noqa
from sqlalchemy import * # noqa from sqlalchemy import * # noqa
from glance.db.sqlalchemy.migrate_repo.schema import ( from glance.db.sqlalchemy.migrate_repo.schema import from_migration_import
Boolean, DateTime, Integer, String, Text, from_migration_import) # noqa
def get_images_table(meta): def get_images_table(meta):
@ -31,44 +29,6 @@ def get_images_table(meta):
return images return images
def get_image_properties_table(meta):
"""
Returns the Table object for the image_properties table that
corresponds to the image_properties table definition of this version.
"""
(get_images_table,) = from_migration_import(
'004_add_checksum', ['get_images_table'])
images = get_images_table(meta) # noqa
image_properties = Table('image_properties',
meta,
Column('id',
Integer(),
primary_key=True,
nullable=False),
Column('image_id',
Integer(),
ForeignKey('images.id'),
nullable=False,
index=True),
Column('name', String(255), nullable=False),
Column('value', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted',
Boolean(),
nullable=False,
default=False,
index=True),
UniqueConstraint('image_id', 'name'),
mysql_engine='InnoDB',
extend_existing=True)
return image_properties
def upgrade(migrate_engine): def upgrade(migrate_engine):
meta = MetaData() meta = MetaData()
meta.bind = migrate_engine meta.bind = migrate_engine
@ -97,30 +57,3 @@ def upgrade(migrate_engine):
image_properties = get_image_properties_table(meta) image_properties = get_image_properties_table(meta)
image_properties.columns['key'].alter(name="name") image_properties.columns['key'].alter(name="name")
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
image_properties = get_image_properties_table(meta)
if migrate_engine.name == "ibm_db_sa":
# NOTE(dperaza) ibm db2 does not allow ALTER INDEX so we will drop
# the index, rename the column, then re-create the index
sql_commands = [
"""ALTER TABLE image_properties DROP UNIQUE
ix_image_properties_image_id_name;""",
"""ALTER TABLE image_properties RENAME COLUMN name to \"key\";""",
"""ALTER TABLE image_properties ADD CONSTRAINT
ix_image_properties_image_id_key UNIQUE(image_id, \"key\");""",
]
for command in sql_commands:
meta.bind.execute(command)
else:
index = Index('ix_image_properties_image_id_name',
image_properties.c.image_id,
image_properties.c.name)
index.rename('ix_image_properties_image_id_key')
image_properties.columns['name'].alter(name="key")

View File

@ -1,11 +0,0 @@
--
-- This file is necessary because MySQL does not support
-- renaming indexes.
--
DROP INDEX ix_image_properties_image_id_name ON image_properties;
-- Rename the `key` column to `name`
ALTER TABLE image_properties
CHANGE COLUMN name `key` VARCHAR(255) NOT NULL;
CREATE UNIQUE INDEX ix_image_properties_image_id_key ON image_properties (image_id, `key`);

View File

@ -1,43 +0,0 @@
--
-- This is necessary because SQLite does not support
-- RENAME INDEX or ALTER TABLE CHANGE COLUMN.
--
CREATE TEMPORARY TABLE image_properties_backup (
id INTEGER NOT NULL,
image_id INTEGER NOT NULL,
key VARCHAR(255) NOT NULL,
value TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id)
);
INSERT INTO image_properties_backup
SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted
FROM image_properties;
DROP TABLE image_properties;
CREATE TABLE image_properties (
id INTEGER NOT NULL,
image_id INTEGER NOT NULL,
key VARCHAR(255) NOT NULL,
value TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
UNIQUE (image_id, key),
FOREIGN KEY(image_id) REFERENCES images (id)
);
CREATE INDEX ix_image_properties_key ON image_properties (key);
INSERT INTO image_properties (id, image_id, key, value, created_at, updated_at, deleted_at, deleted)
SELECT id, image_id, key, value, created_at, updated_at, deleted_at, deleted
FROM image_properties_backup;
DROP TABLE image_properties_backup;

View File

@ -13,12 +13,11 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from migrate.changeset import * # noqa
from sqlalchemy import * # noqa from sqlalchemy import * # noqa
from glance.db.sqlalchemy.migrate_repo.schema import ( from glance.db.sqlalchemy.migrate_repo.schema import (
Boolean, DateTime, BigInteger, Integer, String, Boolean, DateTime, BigInteger, Integer, String,
Text, from_migration_import) # noqa Text) # noqa
def get_images_table(meta): def get_images_table(meta):
@ -56,17 +55,6 @@ def get_images_table(meta):
return images return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 006...
"""
(get_image_properties_table,) = from_migration_import(
'006_key_to_name', ['get_image_properties_table'])
image_properties = get_image_properties_table(meta)
return image_properties
def upgrade(migrate_engine): def upgrade(migrate_engine):
meta = MetaData() meta = MetaData()
meta.bind = migrate_engine meta.bind = migrate_engine
@ -75,12 +63,3 @@ def upgrade(migrate_engine):
owner = Column('owner', String(255)) owner = Column('owner', String(255))
owner.create(images) owner.create(images)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
images.columns['owner'].drop()

View File

@ -13,12 +13,11 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from migrate.changeset import * # noqa
from sqlalchemy import * # noqa from sqlalchemy import * # noqa
from glance.db.sqlalchemy.migrate_repo.schema import ( from glance.db.sqlalchemy.migrate_repo.schema import (
Boolean, DateTime, Integer, String, create_tables, Boolean, DateTime, Integer, String, create_tables,
drop_tables, from_migration_import) # noqa from_migration_import) # noqa
def get_images_table(meta): def get_images_table(meta):
@ -32,17 +31,6 @@ def get_images_table(meta):
return images return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 007...
"""
(get_image_properties_table,) = from_migration_import(
'007_add_owner', ['get_image_properties_table'])
image_properties = get_image_properties_table(meta)
return image_properties
def get_image_members_table(meta): def get_image_members_table(meta):
images = get_images_table(meta) # noqa images = get_images_table(meta) # noqa
@ -89,10 +77,3 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine meta.bind = migrate_engine
tables = [get_image_members_table(meta)] tables = [get_image_members_table(meta)]
create_tables(tables) create_tables(tables)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [get_image_members_table(meta)]
drop_tables(tables)

View File

@ -13,11 +13,10 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from migrate.changeset import * # noqa
from sqlalchemy import * # noqa from sqlalchemy import * # noqa
from glance.db.sqlalchemy.migrate_repo.schema import ( from glance.db.sqlalchemy.migrate_repo.schema import (
Boolean, DateTime, Integer, String, Text, from_migration_import) # noqa Boolean, DateTime, Integer, String, Text) # noqa
def get_images_table(meta): def get_images_table(meta):
@ -57,17 +56,6 @@ def get_images_table(meta):
return images return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 008...
"""
(define_image_properties_table,) = from_migration_import(
'008_add_image_members_table', ['define_image_properties_table'])
image_properties = define_image_properties_table(meta)
return image_properties
def upgrade(migrate_engine): def upgrade(migrate_engine):
meta = MetaData() meta = MetaData()
meta.bind = migrate_engine meta.bind = migrate_engine
@ -79,13 +67,3 @@ def upgrade(migrate_engine):
min_ram = Column('min_ram', Integer(), default=0) min_ram = Column('min_ram', Integer(), default=0)
min_ram.create(images) min_ram.create(images)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
images.columns['min_disk'].drop()
images.columns['min_ram'].drop()

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from migrate.changeset import * # noqa
from sqlalchemy import * # noqa from sqlalchemy import * # noqa
from glance.db.sqlalchemy.migrate_repo.schema import from_migration_import from glance.db.sqlalchemy.migrate_repo.schema import from_migration_import
@ -30,28 +29,6 @@ def get_images_table(meta):
return images return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 008...
"""
(get_image_properties_table,) = from_migration_import(
'008_add_image_members_table', ['get_image_properties_table'])
image_properties = get_image_properties_table(meta)
return image_properties
def get_image_members_table(meta):
"""
No changes to the image members table from 008...
"""
(get_image_members_table,) = from_migration_import(
'008_add_image_members_table', ['get_image_members_table'])
images = get_image_members_table(meta)
return images
def upgrade(migrate_engine): def upgrade(migrate_engine):
meta = MetaData() meta = MetaData()
meta.bind = migrate_engine meta.bind = migrate_engine
@ -64,17 +41,3 @@ def upgrade(migrate_engine):
images_table.update( images_table.update(
images_table.c.updated_at == None, images_table.c.updated_at == None,
{images_table.c.updated_at: images_table.c.created_at})) {images_table.c.updated_at: images_table.c.created_at}))
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images_table = get_images_table(meta)
# set updated_at to None if equal to created_at
conn = migrate_engine.connect()
conn.execute(
images_table.update(
images_table.c.updated_at == images_table.c.created_at,
{images_table.c.updated_at: None}))

View File

@ -24,10 +24,3 @@ def upgrade(migrate_engine):
images = sqlalchemy.Table('images', meta, autoload=True) images = sqlalchemy.Table('images', meta, autoload=True)
images.c.min_disk.alter(nullable=False) images.c.min_disk.alter(nullable=False)
images.c.min_ram.alter(nullable=False) images.c.min_ram.alter(nullable=False)
def downgrade(migrate_engine):
meta.bind = migrate_engine
images = sqlalchemy.Table('images', meta, autoload=True)
images.c.min_disk.alter(nullable=True)
images.c.min_ram.alter(nullable=True)

View File

@ -1,58 +0,0 @@
CREATE TEMPORARY TABLE images_backup (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
INSERT INTO images_backup
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram
FROM images;
DROP TABLE images;
CREATE TABLE images (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER,
min_ram INTEGER,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
CREATE INDEX ix_images_deleted ON images (deleted);
CREATE INDEX ix_images_is_public ON images (is_public);
INSERT INTO images
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram
FROM images_backup;
DROP TABLE images_backup;

View File

@ -53,29 +53,6 @@ def upgrade(migrate_engine):
_upgrade_other(t_images, t_image_members, t_image_properties, dialect) _upgrade_other(t_images, t_image_members, t_image_properties, dialect)
def downgrade(migrate_engine):
"""
Call the correct dialect-specific downgrade.
"""
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
t_images = _get_table('images', meta)
t_image_members = _get_table('image_members', meta)
t_image_properties = _get_table('image_properties', meta)
dialect = migrate_engine.url.get_dialect().name
if dialect == "sqlite":
_update_all_uuids_to_ids(t_images, t_image_members, t_image_properties)
_downgrade_sqlite(meta, t_images, t_image_members, t_image_properties)
elif dialect == "ibm_db_sa":
_remove_db2_constraints(meta)
_update_all_uuids_to_ids(t_images, t_image_members, t_image_properties)
_downgrade_db2(meta, t_images, t_image_members, t_image_properties)
else:
_downgrade_other(t_images, t_image_members, t_image_properties,
dialect)
def _upgrade_sqlite(meta, t_images, t_image_members, t_image_properties): def _upgrade_sqlite(meta, t_images, t_image_members, t_image_properties):
""" """
Upgrade 011 -> 012 with special SQLite-compatible logic. Upgrade 011 -> 012 with special SQLite-compatible logic.
@ -252,178 +229,6 @@ def _add_db2_constraints(meta):
meta.bind.execute(command) meta.bind.execute(command)
def _remove_db2_constraints(meta):
# Remove the foreign keys constraints
sql_commands = [
"""ALTER TABLE image_members DROP CONSTRAINT member_image_id;""",
"""ALTER TABLE image_properties DROP CONSTRAINT property_image_id;"""
]
for command in sql_commands:
meta.bind.execute(command)
def _downgrade_db2(meta, t_images, t_image_members, t_image_properties):
"""
Downgrade for DB2.
"""
t_images.c.id.alter(sqlalchemy.Integer(), primary_key=True)
image_members_old = sqlalchemy.Table(
'image_members_old',
meta,
sqlalchemy.Column('id',
sqlalchemy.Integer(),
primary_key=True,
nullable=False),
sqlalchemy.Column('image_id',
sqlalchemy.Integer(),
nullable=False,
index=True),
sqlalchemy.Column('member',
sqlalchemy.String(255),
nullable=False),
sqlalchemy.Column('can_share',
sqlalchemy.Boolean(),
nullable=False,
default=False),
sqlalchemy.Column('created_at',
sqlalchemy.DateTime(),
nullable=False),
sqlalchemy.Column('updated_at',
sqlalchemy.DateTime()),
sqlalchemy.Column('deleted_at',
sqlalchemy.DateTime()),
sqlalchemy.Column('deleted',
sqlalchemy.Boolean(),
nullable=False,
default=False,
index=True),
sqlalchemy.UniqueConstraint('image_id', 'member'),
extend_existing=True)
image_properties_old = sqlalchemy.Table(
'image_properties_old',
meta,
sqlalchemy.Column('id',
sqlalchemy.Integer(),
primary_key=True,
nullable=False),
sqlalchemy.Column('image_id',
sqlalchemy.Integer(),
nullable=False,
index=True),
sqlalchemy.Column('name',
sqlalchemy.String(255),
nullable=False),
sqlalchemy.Column('value',
sqlalchemy.Text()),
sqlalchemy.Column('created_at',
sqlalchemy.DateTime(),
nullable=False),
sqlalchemy.Column('updated_at',
sqlalchemy.DateTime()),
sqlalchemy.Column('deleted_at',
sqlalchemy.DateTime()),
sqlalchemy.Column('deleted',
sqlalchemy.Boolean(),
nullable=False,
default=False,
index=True),
sqlalchemy.UniqueConstraint(
'image_id', 'name',
name='ix_image_properties_image_id_name'),
extend_existing=True)
image_members_old.create()
image_properties_old.create()
sql_commands = [
"""INSERT INTO image_members_old
SELECT * FROM image_members;""",
"""INSERT INTO image_properties_old
SELECT * FROM image_properties;""",
]
for command in sql_commands:
meta.bind.execute(command)
t_image_members.drop()
t_image_properties.drop()
image_members_old.rename(name='image_members')
image_properties_old.rename(name='image_properties')
def _downgrade_sqlite(meta, t_images, t_image_members, t_image_properties):
"""
Downgrade 012 -> 011 with special SQLite-compatible logic.
"""
sql_commands = [
"""CREATE TABLE images_backup (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);""",
"""INSERT INTO images_backup
SELECT * FROM images;""",
"""CREATE TABLE image_members_backup (
id INTEGER NOT NULL,
image_id INTEGER NOT NULL,
member VARCHAR(255) NOT NULL,
can_share BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
UNIQUE (image_id, member),
CHECK (can_share IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(image_id) REFERENCES images (id)
);""",
"""INSERT INTO image_members_backup
SELECT * FROM image_members;""",
"""CREATE TABLE image_properties_backup (
id INTEGER NOT NULL,
image_id INTEGER NOT NULL,
name VARCHAR(255) NOT NULL,
value TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
UNIQUE (image_id, name),
FOREIGN KEY(image_id) REFERENCES images (id)
);""",
"""INSERT INTO image_properties_backup
SELECT * FROM image_properties;""",
]
for command in sql_commands:
meta.bind.execute(command)
_sqlite_table_swap(meta, t_image_members, t_image_properties, t_images)
def _upgrade_other(t_images, t_image_members, t_image_properties, dialect): def _upgrade_other(t_images, t_image_members, t_image_properties, dialect):
""" """
Upgrade 011 -> 012 with logic for non-SQLite databases. Upgrade 011 -> 012 with logic for non-SQLite databases.
@ -445,41 +250,6 @@ def _upgrade_other(t_images, t_image_members, t_image_properties, dialect):
fk.create() fk.create()
def _downgrade_other(t_images, t_image_members, t_image_properties, dialect):
"""
Downgrade 012 -> 011 with logic for non-SQLite databases.
"""
foreign_keys = _get_foreign_keys(t_images,
t_image_members,
t_image_properties, dialect)
for fk in foreign_keys:
fk.drop()
_update_all_uuids_to_ids(t_images, t_image_members, t_image_properties)
t_images.c.id.alter(primary_key=True)
# we have to use raw sql for postgresql as we have errors
# if we use alter type on sqlalchemy
if dialect == 'postgresql':
t_images.bind.execute('''ALTER TABLE images
ALTER COLUMN id TYPE INTEGER
USING (id::INTEGER)''')
t_images.bind.execute('''ALTER TABLE image_members
ALTER COLUMN image_id TYPE INTEGER
USING (image_id::INTEGER)''')
t_images.bind.execute('''ALTER TABLE image_properties
ALTER COLUMN image_id TYPE INTEGER
USING (image_id::INTEGER)''')
else:
t_images.c.id.alter(sqlalchemy.Integer())
t_image_members.c.image_id.alter(sqlalchemy.Integer())
t_image_properties.c.image_id.alter(sqlalchemy.Integer())
for fk in foreign_keys:
fk.create()
def _sqlite_table_swap(meta, t_image_members, t_image_properties, t_images): def _sqlite_table_swap(meta, t_image_members, t_image_properties, t_images):
t_image_members.drop() t_image_members.drop()
t_image_properties.drop() t_image_properties.drop()

View File

@ -26,10 +26,3 @@ def upgrade(migrate_engine):
images = Table('images', meta, autoload=True) images = Table('images', meta, autoload=True)
images.create_column(protected) images.create_column(protected)
def downgrade(migrate_engine):
meta.bind = migrate_engine
images = Table('images', meta, autoload=True)
images.drop_column(protected)

View File

@ -1,62 +0,0 @@
--
-- This is necessary because sqlalchemy has various bugs preventing
-- downgrades from working correctly.
--
CREATE TEMPORARY TABLE images_backup (
id VARCHAR(36) NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
INSERT INTO images_backup
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram
FROM images;
DROP TABLE images;
CREATE TABLE images (
id VARCHAR(36) NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
CREATE INDEX ix_images_is_public ON images (is_public);
CREATE INDEX ix_images_deleted ON images (deleted);
INSERT INTO images
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram
FROM images_backup;
DROP TABLE images_backup;

View File

@ -64,10 +64,3 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine meta.bind = migrate_engine
tables = [define_image_tags_table(meta)] tables = [define_image_tags_table(meta)]
glance_schema.create_tables(tables) glance_schema.create_tables(tables)
def downgrade(migrate_engine):
meta = schema.MetaData()
meta.bind = migrate_engine
tables = [define_image_tags_table(meta)]
glance_schema.drop_tables(tables)

View File

@ -28,10 +28,6 @@ def upgrade(migrate_engine):
migrate_location_credentials(migrate_engine, to_quoted=True) migrate_location_credentials(migrate_engine, to_quoted=True)
def downgrade(migrate_engine):
migrate_location_credentials(migrate_engine, to_quoted=False)
def migrate_location_credentials(migrate_engine, to_quoted): def migrate_location_credentials(migrate_engine, to_quoted):
""" """
Migrate location credentials for swift uri's between the quoted Migrate location credentials for swift uri's between the quoted

View File

@ -26,10 +26,3 @@ def upgrade(migrate_engine):
image_members = Table('image_members', meta, autoload=True) image_members = Table('image_members', meta, autoload=True)
image_members.create_column(status) image_members.create_column(status)
def downgrade(migrate_engine):
meta.bind = migrate_engine
image_members = Table('image_members', meta, autoload=True)
image_members.drop_column(status)

View File

@ -1,43 +0,0 @@
CREATE TEMPORARY TABLE image_members_backup (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
member VARCHAR(255) NOT NULL,
can_share BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
UNIQUE (image_id, member),
CHECK (can_share IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(image_id) REFERENCES images (id)
);
INSERT INTO image_members_backup
SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted
FROM image_members;
DROP TABLE image_members;
CREATE TABLE image_members (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
member VARCHAR(255) NOT NULL,
can_share BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
UNIQUE (image_id, member),
CHECK (can_share IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(image_id) REFERENCES images (id)
);
INSERT INTO image_members
SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted
FROM image_members_backup;
DROP TABLE image_members_backup;

View File

@ -26,8 +26,6 @@ migration performs the following steps for every entry in the images table:
Fixes bug #1081043 Fixes bug #1081043
""" """
import types # noqa
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import encodeutils from oslo_utils import encodeutils
@ -48,10 +46,6 @@ def upgrade(migrate_engine):
migrate_location_credentials(migrate_engine, to_quoted=True) migrate_location_credentials(migrate_engine, to_quoted=True)
def downgrade(migrate_engine):
migrate_location_credentials(migrate_engine, to_quoted=False)
def migrate_location_credentials(migrate_engine, to_quoted): def migrate_location_credentials(migrate_engine, to_quoted):
""" """
Migrate location credentials for encrypted swift uri's between the Migrate location credentials for encrypted swift uri's between the

View File

@ -55,10 +55,3 @@ def upgrade(migrate_engine):
) )
schema.create_tables([image_locations_table]) schema.create_tables([image_locations_table])
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
image_locations_table = sqlalchemy.Table('image_locations', meta,
autoload=True)
schema.drop_tables([image_locations_table])

View File

@ -42,17 +42,3 @@ def upgrade(migrate_engine):
'deleted_at': image.deleted_at, 'deleted_at': image.deleted_at,
} }
image_locations_table.insert(values=values).execute() image_locations_table.insert(values=values).execute()
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
images_table = get_images_table(meta)
image_locations_table = get_image_locations_table(meta)
image_records = image_locations_table.select().execute().fetchall()
for image_location in image_records:
images_table.update(
values={'location': image_location.value}).where(
images_table.c.id == image_location.image_id).execute()

View File

@ -15,8 +15,6 @@
import sqlalchemy import sqlalchemy
from glance.db.sqlalchemy.migrate_repo import schema
def get_images_table(meta): def get_images_table(meta):
return sqlalchemy.Table('images', meta, autoload=True) return sqlalchemy.Table('images', meta, autoload=True)
@ -26,10 +24,3 @@ def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine) meta = sqlalchemy.schema.MetaData(migrate_engine)
images_table = get_images_table(meta) images_table = get_images_table(meta)
images_table.columns['location'].drop() images_table.columns['location'].drop()
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
images_table = get_images_table(meta)
location = sqlalchemy.Column('location', schema.Text())
location.create(images_table)

View File

@ -29,7 +29,3 @@ def upgrade(migrate_engine):
if table_name in tables: if table_name in tables:
migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" %
table_name) table_name)
def downgrade(migrate_engine):
pass

View File

@ -17,7 +17,6 @@ import re
from migrate.changeset import UniqueConstraint from migrate.changeset import UniqueConstraint
from oslo_db import exception as db_exception from oslo_db import exception as db_exception
from sqlalchemy import and_, func, orm
from sqlalchemy import MetaData, Table from sqlalchemy import MetaData, Table
from sqlalchemy.exc import OperationalError, ProgrammingError from sqlalchemy.exc import OperationalError, ProgrammingError
@ -45,20 +44,6 @@ def upgrade(migrate_engine):
table=image_members).create() table=image_members).create()
def downgrade(migrate_engine):
image_members = _get_image_members_table(migrate_engine)
if migrate_engine.name in ('mysql', 'postgresql'):
_sanitize(migrate_engine, image_members)
UniqueConstraint('image_id',
name=NEW_KEYNAME,
table=image_members).drop()
UniqueConstraint('image_id',
'member',
name=_get_original_keyname(migrate_engine.name),
table=image_members).create()
def _get_image_members_table(migrate_engine): def _get_image_members_table(migrate_engine):
meta = MetaData() meta = MetaData()
meta.bind = migrate_engine meta.bind = migrate_engine
@ -74,23 +59,3 @@ def _infer_original_keyname(table):
for i in table.indexes: for i in table.indexes:
if ORIGINAL_KEYNAME_RE.match(i.name): if ORIGINAL_KEYNAME_RE.match(i.name):
return i.name return i.name
def _sanitize(migrate_engine, table):
"""
Avoid possible integrity error by removing deleted rows
to accommodate less restrictive uniqueness constraint
"""
session = orm.sessionmaker(bind=migrate_engine)()
# find the image_member rows containing duplicate combinations
# of image_id and member
qry = (session.query(table.c.image_id, table.c.member)
.group_by(table.c.image_id, table.c.member)
.having(func.count() > 1))
for image_id, member in qry:
# only remove duplicate rows already marked deleted
d = table.delete().where(and_(table.c.deleted == True,
table.c.image_id == image_id,
table.c.member == member))
d.execute()
session.close()

View File

@ -17,7 +17,3 @@
def upgrade(migrate_engine): def upgrade(migrate_engine):
pass pass
def downgrade(migration_engine):
pass

View File

@ -17,7 +17,3 @@
def upgrade(migrate_engine): def upgrade(migrate_engine):
pass pass
def downgrade(migration_engine):
pass

View File

@ -17,7 +17,3 @@
def upgrade(migrate_engine): def upgrade(migrate_engine):
pass pass
def downgrade(migration_engine):
pass

View File

@ -30,14 +30,3 @@ def upgrade(migrate_engine):
schema.PickleType(), schema.PickleType(),
default={}) default={})
meta_data.create(image_locations_table) meta_data.create(image_locations_table)
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData()
meta.bind = migrate_engine
image_locations_table = sqlalchemy.Table('image_locations',
meta,
autoload=True)
image_locations_table.columns['meta_data'].drop()

View File

@ -26,13 +26,3 @@ def upgrade(migrate_engine):
index = Index(INDEX_NAME, images.c.checksum) index = Index(INDEX_NAME, images.c.checksum)
index.create(migrate_engine) index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = Table('images', meta, autoload=True)
index = Index(INDEX_NAME, images.c.checksum)
index.drop(migrate_engine)

View File

@ -26,13 +26,3 @@ def upgrade(migrate_engine):
index = Index(INDEX_NAME, images.c.owner) index = Index(INDEX_NAME, images.c.owner)
index.create(migrate_engine) index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = Table('images', meta, autoload=True)
index = Index(INDEX_NAME, images.c.owner)
index.drop(migrate_engine)

View File

@ -13,11 +13,10 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import json
import pickle import pickle
import sqlalchemy import sqlalchemy
from sqlalchemy import MetaData, Table, Column # noqa from sqlalchemy import Table, Column # noqa
from glance.db.sqlalchemy import models from glance.db.sqlalchemy import models
@ -44,29 +43,3 @@ def upgrade(migrate_engine):
conn.close() conn.close()
image_locations.columns['meta_data'].drop() image_locations.columns['meta_data'].drop()
image_locations.columns['storage_meta_data'].alter(name='meta_data') image_locations.columns['storage_meta_data'].alter(name='meta_data')
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
image_locations = Table('image_locations', meta, autoload=True)
old_meta_data = Column('old_meta_data', sqlalchemy.PickleType(),
default={})
old_meta_data.create(image_locations)
noj = json.dumps({})
s = sqlalchemy.sql.select([image_locations]).where(
image_locations.c.meta_data != noj)
conn = migrate_engine.connect()
res = conn.execute(s)
for row in res:
x = row['meta_data']
meta_data = json.loads(x)
if meta_data != {}:
stmt = image_locations.update().where(
image_locations.c.id == row['id']).values(
old_meta_data=meta_data)
conn.execute(stmt)
conn.close()
image_locations.columns['meta_data'].drop()
image_locations.columns['old_meta_data'].alter(name='meta_data')

View File

@ -17,7 +17,7 @@
from sqlalchemy.schema import (Column, MetaData, Table, Index) from sqlalchemy.schema import (Column, MetaData, Table, Index)
from glance.db.sqlalchemy.migrate_repo.schema import ( from glance.db.sqlalchemy.migrate_repo.schema import (
Boolean, DateTime, String, Text, create_tables, drop_tables) # noqa Boolean, DateTime, String, Text, create_tables) # noqa
def define_tasks_table(meta): def define_tasks_table(meta):
@ -56,10 +56,3 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine meta.bind = migrate_engine
tables = [define_tasks_table(meta)] tables = [define_tasks_table(meta)]
create_tables(tables) create_tables(tables)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_tasks_table(meta)]
drop_tables(tables)

View File

@ -73,9 +73,3 @@ def upgrade(migrate_engine):
stmt.execute() stmt.execute()
session.close() session.close()
def downgrade(migrate_engine):
# NOTE(flaper87): There's no downgrade
# path for this.
return

View File

@ -17,8 +17,7 @@ from sqlalchemy.schema import (Column, ForeignKey, MetaData, Table)
from glance.db.sqlalchemy.migrate_repo.schema import (String, from glance.db.sqlalchemy.migrate_repo.schema import (String,
Text, Text,
create_tables, create_tables) # noqa
drop_tables) # noqa
TASKS_MIGRATE_COLUMNS = ['input', 'message', 'result'] TASKS_MIGRATE_COLUMNS = ['input', 'message', 'result']
@ -65,29 +64,3 @@ def upgrade(migrate_engine):
for col_name in TASKS_MIGRATE_COLUMNS: for col_name in TASKS_MIGRATE_COLUMNS:
tasks_table.columns[col_name].drop() tasks_table.columns[col_name].drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tasks_table = Table('tasks', meta, autoload=True)
task_info_table = Table('task_info', meta, autoload=True)
for col_name in TASKS_MIGRATE_COLUMNS:
column = Column(col_name, Text())
column.create(tasks_table)
task_info_records = task_info_table.select().execute().fetchall()
for task_info in task_info_records:
values = {
'input': task_info.input,
'result': task_info.result,
'message': task_info.message
}
tasks_table.update(values=values).where(
tasks_table.c.id == task_info.task_id).execute()
drop_tables([task_info_table])

View File

@ -39,14 +39,3 @@ def upgrade(migrate_engine):
images_table.c.status == src) images_table.c.status == src)
image_locations_table.update(values={'status': dst}).where( image_locations_table.update(values={'status': dst}).where(
image_locations_table.c.image_id.in_(subq)).execute() image_locations_table.c.image_id.in_(subq)).execute()
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData()
meta.bind = migrate_engine
image_locations_table = sqlalchemy.Table('image_locations', meta,
autoload=True)
# Remove 'status' column from image_locations table
image_locations_table.columns['status'].drop()

View File

@ -24,11 +24,3 @@ def upgrade(migrate_engine):
virtual_size = sqlalchemy.Column('virtual_size', virtual_size = sqlalchemy.Column('virtual_size',
sqlalchemy.BigInteger) sqlalchemy.BigInteger)
images.create_column(virtual_size) images.create_column(virtual_size)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
images = sqlalchemy.Table('images', meta, autoload=True)
images.columns['virtual_size'].drop()

View File

@ -18,8 +18,7 @@ from sqlalchemy.schema import (
from glance.common import timeutils from glance.common import timeutils
from glance.db.sqlalchemy.migrate_repo.schema import ( from glance.db.sqlalchemy.migrate_repo.schema import (
Boolean, DateTime, Integer, String, Text, create_tables, Boolean, DateTime, Integer, String, Text, create_tables) # noqa
drop_tables) # noqa
RESOURCE_TYPES = [u'OS::Glance::Image', u'OS::Cinder::Volume', RESOURCE_TYPES = [u'OS::Glance::Image', u'OS::Cinder::Volume',
@ -207,14 +206,3 @@ def upgrade(migrate_engine):
resource_types_table = _get_metadef_resource_types_table(meta) resource_types_table = _get_metadef_resource_types_table(meta)
_populate_resource_types(resource_types_table) _populate_resource_types(resource_types_table)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_metadef_objects_table(meta),
define_metadef_properties_table(meta),
define_metadef_namespace_resource_types_table(meta),
define_metadef_resource_types_table(meta),
define_metadef_namespaces_table(meta)]
drop_tables(tables)

View File

@ -23,12 +23,3 @@ def upgrade(migrate_engine):
metadef_properties = Table('metadef_properties', meta, autoload=True) metadef_properties = Table('metadef_properties', meta, autoload=True)
metadef_properties.c.schema.alter(name='json_schema') metadef_properties.c.schema.alter(name='json_schema')
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
metadef_objects = Table('metadef_objects', meta, autoload=True)
metadef_objects.c.json_schema.alter(name='schema')
metadef_properties = Table('metadef_properties', meta, autoload=True)
metadef_properties.c.json_schema.alter(name='schema')

View File

@ -11,8 +11,8 @@
# under the License. # under the License.
import sqlalchemy import sqlalchemy
from sqlalchemy import Table, Index, UniqueConstraint, Sequence from sqlalchemy import Table, Index, UniqueConstraint
from sqlalchemy.schema import (AddConstraint, DropConstraint, CreateIndex, from sqlalchemy.schema import (AddConstraint, DropConstraint,
ForeignKeyConstraint) ForeignKeyConstraint)
from sqlalchemy import sql from sqlalchemy import sql
from sqlalchemy import update from sqlalchemy import update
@ -82,42 +82,3 @@ def upgrade(migrate_engine):
if len(image_locations.foreign_keys) == 0: if len(image_locations.foreign_keys) == 0:
migrate_engine.execute(AddConstraint(ForeignKeyConstraint( migrate_engine.execute(AddConstraint(ForeignKeyConstraint(
[image_locations.c.image_id], [images.c.id]))) [image_locations.c.image_id], [images.c.id])))
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
if migrate_engine.name not in ['mysql', 'postgresql']:
return
image_properties = Table('image_properties', meta, autoload=True)
image_members = Table('image_members', meta, autoload=True)
images = Table('images', meta, autoload=True)
if migrate_engine.name == 'postgresql':
constraint = UniqueConstraint(image_properties.c.image_id,
image_properties.c.name,
name='ix_image_properties_image_id_name')
migrate_engine.execute(DropConstraint(constraint))
constraint = UniqueConstraint(image_properties.c.image_id,
image_properties.c.name)
migrate_engine.execute(AddConstraint(constraint))
index = Index('ix_image_properties_image_id_name',
image_properties.c.image_id,
image_properties.c.name)
migrate_engine.execute(CreateIndex(index))
images.c.id.alter(server_default=Sequence('images_id_seq')
.next_value())
if migrate_engine.name == 'mysql':
constraint = UniqueConstraint(image_properties.c.image_id,
image_properties.c.name,
name='image_id')
migrate_engine.execute(AddConstraint(constraint))
image_members.c.status.alter(nullable=True, server_default=None)
images.c.protected.alter(nullable=True, server_default=None)

View File

@ -1,147 +0,0 @@
CREATE TEMPORARY TABLE images_backup (
id VARCHAR(36) NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER,
min_ram INTEGER,
protected BOOLEAN,
virtual_size INTEGER,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
INSERT INTO images_backup
SELECT id, name, size, status, is_public, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram, virtual_size, protected
FROM images;
DROP TABLE images;
CREATE TABLE images (
id VARCHAR(36) NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
protected BOOLEAN,
virtual_size INTEGER,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
CREATE INDEX owner_image_idx ON images (owner);
CREATE INDEX checksum_image_idx ON images (checksum);
INSERT INTO images
SELECT id, name, size, status, is_public, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram, protected, virtual_size
FROM images_backup;
DROP TABLE images_backup;
CREATE TEMPORARY TABLE image_members_backup (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
member VARCHAR(255) NOT NULL,
can_share BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
status VARCHAR(20),
PRIMARY KEY (id),
UNIQUE (image_id, member),
CHECK (can_share IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(image_id) REFERENCES images (id)
);
INSERT INTO image_members_backup
SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted, status
FROM image_members;
DROP TABLE image_members;
CREATE TABLE image_members (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
member VARCHAR(255) NOT NULL,
can_share BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
status VARCHAR(20),
PRIMARY KEY (id),
UNIQUE (image_id, member),
CHECK (can_share IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(image_id) REFERENCES images (id)
);
INSERT INTO image_members
SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted, status
FROM image_members_backup;
DROP TABLE image_members_backup;
CREATE TEMPORARY TABLE image_properties_backup (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
name VARCHAR(255) NOT NULL,
value TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id)
);
INSERT INTO image_properties_backup
SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted
FROM image_properties;
DROP TABLE image_properties;
CREATE TABLE image_properties (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
name VARCHAR(255) NOT NULL,
value TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
UNIQUE (image_id, name),
FOREIGN KEY(image_id) REFERENCES images (id)
);
CREATE INDEX ix_image_properties_name ON image_properties (name);
INSERT INTO image_properties (id, image_id, name, value, created_at, updated_at, deleted_at, deleted)
SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted
FROM image_properties_backup;
DROP TABLE image_properties_backup;

View File

@ -16,7 +16,7 @@ from sqlalchemy.schema import (
Column, Index, MetaData, Table, UniqueConstraint) # noqa Column, Index, MetaData, Table, UniqueConstraint) # noqa
from glance.db.sqlalchemy.migrate_repo.schema import ( from glance.db.sqlalchemy.migrate_repo.schema import (
DateTime, Integer, String, create_tables, drop_tables) # noqa DateTime, Integer, String, create_tables) # noqa
def define_metadef_tags_table(meta): def define_metadef_tags_table(meta):
@ -49,10 +49,3 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine meta.bind = migrate_engine
tables = [define_metadef_tags_table(meta)] tables = [define_metadef_tags_table(meta)]
create_tables(tables) create_tables(tables)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_metadef_tags_table(meta)]
drop_tables(tables)

View File

@ -14,7 +14,7 @@ import migrate
import sqlalchemy import sqlalchemy
from sqlalchemy import inspect from sqlalchemy import inspect
from sqlalchemy import (Table, Index, UniqueConstraint) from sqlalchemy import (Table, Index, UniqueConstraint)
from sqlalchemy.schema import (AddConstraint, DropConstraint) from sqlalchemy.schema import (DropConstraint)
def _change_db2_unique_constraint(operation_type, constraint_name, *columns): def _change_db2_unique_constraint(operation_type, constraint_name, *columns):
@ -194,173 +194,3 @@ def upgrade(migrate_engine):
# origional logic for other database backends. # origional logic for other database backends.
for (constraint_name, cols) in constraints: for (constraint_name, cols) in constraints:
_change_db2_unique_constraint('create', constraint_name, *cols) _change_db2_unique_constraint('create', constraint_name, *cols)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
inspector = inspect(migrate_engine)
metadef_namespaces = Table('metadef_namespaces', meta, autoload=True)
metadef_properties = Table('metadef_properties', meta, autoload=True)
metadef_objects = Table('metadef_objects', meta, autoload=True)
metadef_ns_res_types = Table('metadef_namespace_resource_types',
meta, autoload=True)
metadef_resource_types = Table('metadef_resource_types', meta,
autoload=True)
metadef_tags = Table('metadef_tags', meta, autoload=True)
constraints = [('ix_namespaces_namespace',
[metadef_namespaces.c.namespace]),
('ix_objects_namespace_id_name',
[metadef_objects.c.namespace_id,
metadef_objects.c.name]),
('ix_metadef_properties_namespace_id_name',
[metadef_properties.c.namespace_id,
metadef_properties.c.name])]
metadef_tags_constraints = inspector.get_unique_constraints('metadef_tags')
for constraint in metadef_tags_constraints:
if set(constraint['column_names']) == set(['namespace_id', 'name']):
constraints.append((constraint['name'],
[metadef_tags.c.namespace_id,
metadef_tags.c.name]))
if meta.bind.name == "ibm_db_sa":
# For db2, the following constraints need to be dropped first,
# otherwise the index like ix_metadef_ns_res_types_namespace_id
# will fail to drop. These constraints will be added back at
# the end. It should not affect the origional logic for other
# database backends.
for (constraint_name, cols) in constraints:
_change_db2_unique_constraint('drop', constraint_name, *cols)
else:
Index('ix_namespaces_namespace',
metadef_namespaces.c.namespace).create()
Index('ix_objects_namespace_id_name', metadef_objects.c.namespace_id,
metadef_objects.c.name).create()
Index('ix_metadef_properties_namespace_id_name',
metadef_properties.c.namespace_id,
metadef_properties.c.name).create()
Index('ix_metadef_tags_name', metadef_tags.c.name).drop()
Index('ix_metadef_tags_namespace_id', metadef_tags.c.namespace_id,
metadef_tags.c.name).drop()
if migrate_engine.name != 'sqlite':
fkc = migrate.ForeignKeyConstraint([metadef_tags.c.namespace_id],
[metadef_namespaces.c.id])
fkc.drop()
if meta.bind.name != "ibm_db_sa":
# This index would not be created when it is db2 backend.
Index('ix_tags_namespace_id_name', metadef_tags.c.namespace_id,
metadef_tags.c.name).create()
else:
# NOTE(ochuprykov): fkc can't be dropped via `migrate` in sqlite,so it
# is necessary to recreate table manually and populate it with data
temp = Table('temp_', meta, *(
[c.copy() for c in metadef_tags.columns]))
temp.create()
migrate_engine.execute('insert into temp_ select * from metadef_tags')
metadef_tags.drop()
migrate_engine.execute('alter table temp_ rename to metadef_tags')
# Refresh old metadata for this table
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
metadef_tags = Table('metadef_tags', meta, autoload=True)
Index('ix_tags_namespace_id_name', metadef_tags.c.namespace_id,
metadef_tags.c.name).create()
uc = migrate.UniqueConstraint(metadef_tags.c.namespace_id,
metadef_tags.c.name)
uc.create()
if migrate_engine.name == 'mysql':
constraint = UniqueConstraint(metadef_properties.c.namespace_id,
metadef_properties.c.name,
name='namespace_id')
migrate_engine.execute(AddConstraint(constraint))
constraint = UniqueConstraint(metadef_objects.c.namespace_id,
metadef_objects.c.name,
name='namespace_id')
migrate_engine.execute(AddConstraint(constraint))
constraint = UniqueConstraint(metadef_ns_res_types.c.resource_type_id,
metadef_ns_res_types.c.namespace_id,
name='resource_type_id')
migrate_engine.execute(AddConstraint(constraint))
constraint = UniqueConstraint(metadef_namespaces.c.namespace,
name='namespace')
migrate_engine.execute(AddConstraint(constraint))
constraint = UniqueConstraint(metadef_resource_types.c.name,
name='name')
migrate_engine.execute(AddConstraint(constraint))
if migrate_engine.name == 'postgresql':
constraint = UniqueConstraint(
metadef_objects.c.namespace_id,
metadef_objects.c.name)
migrate_engine.execute(AddConstraint(constraint))
constraint = UniqueConstraint(
metadef_properties.c.namespace_id,
metadef_properties.c.name)
migrate_engine.execute(AddConstraint(constraint))
constraint = UniqueConstraint(
metadef_namespaces.c.namespace)
migrate_engine.execute(AddConstraint(constraint))
constraint = UniqueConstraint(
metadef_resource_types.c.name)
migrate_engine.execute(AddConstraint(constraint))
constraint = UniqueConstraint(
metadef_tags.c.namespace_id,
metadef_tags.c.name,
name='metadef_tags_namespace_id_name_key')
migrate_engine.execute(AddConstraint(constraint))
if migrate_engine.name == 'mysql':
fkc = migrate.ForeignKeyConstraint(
[metadef_ns_res_types.c.resource_type_id],
[metadef_namespaces.c.id],
name='metadef_namespace_resource_types_ibfk_2')
fkc.drop()
Index('ix_metadef_ns_res_types_namespace_id',
metadef_ns_res_types.c.namespace_id).drop()
fkc.create()
else:
Index('ix_metadef_ns_res_types_namespace_id',
metadef_ns_res_types.c.namespace_id).drop()
Index('ix_metadef_namespaces_namespace',
metadef_namespaces.c.namespace).drop()
Index('ix_metadef_namespaces_owner', metadef_namespaces.c.owner).drop()
Index('ix_metadef_objects_name', metadef_objects.c.name).drop()
Index('ix_metadef_objects_namespace_id',
metadef_objects.c.namespace_id).drop()
Index('ix_metadef_properties_name', metadef_properties.c.name).drop()
Index('ix_metadef_properties_namespace_id',
metadef_properties.c.namespace_id).drop()
if meta.bind.name == "ibm_db_sa":
# For db2, add these constraints back. It should not affect the
# origional logic for other database backends.
for (constraint_name, cols) in constraints:
_change_db2_unique_constraint('create', constraint_name, *cols)

View File

@ -231,27 +231,6 @@ def _update_sqlite_namespace_id_name_constraint(metadef, metadef_namespaces,
name=new_fk_name).create() name=new_fk_name).create()
def _downgrade_sqlite_namespace_id_name_constraint(metadef,
metadef_namespaces,
constraint_name,
fk_name):
migrate.UniqueConstraint(
metadef.c.namespace_id,
metadef.c.name,
name=constraint_name).drop()
migrate.UniqueConstraint(
metadef.c.namespace_id,
metadef.c.name).create()
migrate.ForeignKeyConstraint(
[metadef.c.namespace_id],
[metadef_namespaces.c.id],
name=fk_name).drop()
migrate.ForeignKeyConstraint(
[metadef.c.namespace_id],
[metadef_namespaces.c.id]).create()
def _drop_unique_constraint_if_exists(inspector, table_name, metadef): def _drop_unique_constraint_if_exists(inspector, table_name, metadef):
name = _get_unique_constraint_name(inspector, name = _get_unique_constraint_name(inspector,
table_name, table_name,
@ -281,24 +260,6 @@ def _drop_index_with_fk_constraint(metadef, metadef_namespaces,
fkc.create() fkc.create()
def _downgrade_constraint_with_fk(metadef, metadef_namespaces,
constraint_name,
fk_curr_name, fk_next_name):
fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id],
[metadef_namespaces.c.id],
name=fk_curr_name)
fkc.drop()
migrate.UniqueConstraint(metadef.c.namespace_id, metadef.c.name,
name=constraint_name).drop()
fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id],
[metadef_namespaces.c.id],
name=fk_next_name)
fkc.create()
def _get_unique_constraint_name(inspector, table_name, columns): def _get_unique_constraint_name(inspector, table_name, columns):
constraints = inspector.get_unique_constraints(table_name) constraints = inspector.get_unique_constraints(table_name)
for constraint in constraints: for constraint in constraints:
@ -479,126 +440,3 @@ def upgrade(migrate_engine):
migrate.UniqueConstraint( migrate.UniqueConstraint(
metadef_resource_types.c.name, metadef_resource_types.c.name,
name='uq_metadef_resource_types_name').create() name='uq_metadef_resource_types_name').create()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
# ORM tables
metadef_namespaces = Table('metadef_namespaces', meta, autoload=True)
metadef_objects = Table('metadef_objects', meta, autoload=True)
metadef_properties = Table('metadef_properties', meta, autoload=True)
metadef_tags = Table('metadef_tags', meta, autoload=True)
metadef_resource_types = Table('metadef_resource_types', meta,
autoload=True)
metadef_ns_res_types = Table('metadef_namespace_resource_types',
meta, autoload=True)
# Drop the unique constraints
if migrate_engine.name == 'sqlite':
# Objects
_downgrade_sqlite_namespace_id_name_constraint(
metadef_objects, metadef_namespaces,
'uq_metadef_objects_namespace_id_name',
'metadef_objects_fk_1')
# Properties
_downgrade_sqlite_namespace_id_name_constraint(
metadef_properties, metadef_namespaces,
'uq_metadef_properties_namespace_id_name',
'metadef_properties_fk_1')
# Tags
_downgrade_sqlite_namespace_id_name_constraint(
metadef_tags, metadef_namespaces,
'uq_metadef_tags_namespace_id_name',
'metadef_tags_fk_1')
# Namespaces
migrate.UniqueConstraint(
metadef_namespaces.c.namespace,
name='uq_metadef_namespaces_namespace').drop()
migrate.UniqueConstraint(
metadef_namespaces.c.namespace).create()
# ResourceTypes
migrate.UniqueConstraint(
metadef_resource_types.c.name,
name='uq_metadef_resource_types_name').drop()
migrate.UniqueConstraint(
metadef_resource_types.c.name).create()
else:
# For mysql, must drop foreign key constraints before dropping the
# unique constraint. So drop the fkc, then drop the constraints,
# then recreate the fkc.
# Objects
_downgrade_constraint_with_fk(
metadef_objects, metadef_namespaces,
'uq_metadef_objects_namespace_id_name',
'metadef_objects_fk_1', None)
# Properties
_downgrade_constraint_with_fk(
metadef_properties, metadef_namespaces,
'uq_metadef_properties_namespace_id_name',
'metadef_properties_fk_1', None)
# Tags
_downgrade_constraint_with_fk(
metadef_tags, metadef_namespaces,
'uq_metadef_tags_namespace_id_name',
'metadef_tags_fk_1', 'metadef_tags_namespace_id_fkey')
# Namespaces
migrate.UniqueConstraint(
metadef_namespaces.c.namespace,
name='uq_metadef_namespaces_namespace').drop()
# Resource_types
migrate.UniqueConstraint(
metadef_resource_types.c.name,
name='uq_metadef_resource_types_name').drop()
# Create dropped unique constraints as bad, non-unique indexes
Index('ix_metadef_objects_namespace_id',
metadef_objects.c.namespace_id).create()
Index('ix_metadef_properties_namespace_id',
metadef_properties.c.namespace_id).create()
# These need to be done before the metadef_tags and metadef_namespaces
# unique constraints are created to avoid 'tuple out of range' errors
# in db2.
Index('ix_metadef_tags_namespace_id',
metadef_tags.c.namespace_id,
metadef_tags.c.name).create()
Index('ix_metadef_namespaces_namespace',
metadef_namespaces.c.namespace).create()
# Create these everywhere, except for db2
if migrate_engine.name != 'ibm_db_sa':
Index('ix_metadef_resource_types_name',
metadef_resource_types.c.name).create()
Index('ix_metadef_ns_res_types_res_type_id_ns_id',
metadef_ns_res_types.c.resource_type_id,
metadef_ns_res_types.c.namespace_id).create()
else:
# Recreate the badly named unique constraints in db2
migrate.UniqueConstraint(
metadef_namespaces.c.namespace,
name='ix_namespaces_namespace').create()
migrate.UniqueConstraint(
metadef_objects.c.namespace_id,
metadef_objects.c.name,
name='ix_objects_namespace_id_name').create()
migrate.UniqueConstraint(
metadef_properties.c.namespace_id,
metadef_properties.c.name,
name='ix_metadef_properties_namespace_id_name').create()
migrate.UniqueConstraint(
metadef_tags.c.namespace_id,
metadef_tags.c.name).create()
migrate.UniqueConstraint(
metadef_resource_types.c.name,
name='ix_metadef_resource_types_name').create()

View File

@ -24,8 +24,3 @@ def upgrade(migrate_engine):
resource_types_table.update(values={'name': 'OS::Nova::Server'}).where( resource_types_table.update(values={'name': 'OS::Nova::Server'}).where(
resource_types_table.c.name == 'OS::Nova::Instance').execute() resource_types_table.c.name == 'OS::Nova::Instance').execute()
def downgrade(migrate_engine):
# NOTE(TravT): This is a bug fix (1537903). It shouldn't be downgraded.
return

View File

@ -94,13 +94,6 @@ class TestLegacyManage(TestManageBase):
db_api.get_engine(), db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, '20') db_migration.MIGRATE_REPO_PATH, '20')
@mock.patch.object(migration, 'db_sync')
def test_legacy_db_downgrade_version(self, db_sync):
self._main_test_helper(['glance.cmd.manage', 'db_downgrade', '20'],
migration.db_sync,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, '20')
def test_db_metadefs_unload(self): def test_db_metadefs_unload(self):
db_metadata.db_unload_metadefs = mock.Mock() db_metadata.db_unload_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db_unload_metadefs'], self._main_test_helper(['glance.cmd.manage', 'db_unload_metadefs'],
@ -207,13 +200,6 @@ class TestManage(TestManageBase):
db_api.get_engine(), db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, '20') db_migration.MIGRATE_REPO_PATH, '20')
@mock.patch.object(migration, 'db_sync')
def test_db_downgrade_version(self, db_sync):
self._main_test_helper(['glance.cmd.manage', 'db', 'downgrade', '20'],
migration.db_sync,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, '20')
def test_db_metadefs_unload(self): def test_db_metadefs_unload(self):
db_metadata.db_unload_metadefs = mock.Mock() db_metadata.db_unload_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db', 'unload_metadefs'], self._main_test_helper(['glance.cmd.manage', 'db', 'unload_metadefs'],

View File

@ -38,7 +38,6 @@ from oslo_utils import uuidutils
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange # NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range from six.moves import range
import sqlalchemy import sqlalchemy
from sqlalchemy import inspect
import sqlalchemy.types as types import sqlalchemy.types as types
from glance.common import crypt from glance.common import crypt
@ -47,9 +46,11 @@ from glance.common import timeutils
from glance.db import migration from glance.db import migration
from glance.db.sqlalchemy import migrate_repo from glance.db.sqlalchemy import migrate_repo
from glance.db.sqlalchemy.migrate_repo.schema import from_migration_import from glance.db.sqlalchemy.migrate_repo.schema import from_migration_import
from glance.db.sqlalchemy.migrate_repo import versions
from glance.db.sqlalchemy import models from glance.db.sqlalchemy import models
from glance.db.sqlalchemy import models_glare from glance.db.sqlalchemy import models_glare
from glance.db.sqlalchemy import models_metadef from glance.db.sqlalchemy import models_metadef
import glance.tests.utils as test_utils
from glance.i18n import _ from glance.i18n import _
@ -366,44 +367,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
if row['name'] == 'ramdisk_id': if row['name'] == 'ramdisk_id':
self.assertEqual(row['value'], uuids['ramdisk']) self.assertEqual(row['value'], uuids['ramdisk'])
def _post_downgrade_012(self, engine):
images = db_utils.get_table(engine, 'images')
image_members = db_utils.get_table(engine, 'image_members')
image_properties = db_utils.get_table(engine, 'image_properties')
# Find kernel, ramdisk and normal images. Make sure id has been
# changed back to an integer
ids = {}
for name in ('kernel', 'ramdisk', 'normal'):
image_name = '%s migration 012 test' % name
rows = images.select().where(
images.c.name == image_name).execute().fetchall()
self.assertEqual(1, len(rows))
row = rows[0]
self.assertFalse(uuidutils.is_uuid_like(row['id']))
ids[name] = row['id']
# Find all image_members to ensure image_id has been updated
results = image_members.select().where(
image_members.c.image_id == ids['normal']).execute().fetchall()
self.assertEqual(1, len(results))
# Find all image_properties to ensure image_id has been updated
# as well as ensure kernel_id and ramdisk_id values have been
# updated too
results = image_properties.select().where(
image_properties.c.image_id == ids['normal']).execute().fetchall()
self.assertEqual(2, len(results))
for row in results:
self.assertIn(row['name'], ('kernel_id', 'ramdisk_id'))
if row['name'] == 'kernel_id':
self.assertEqual(row['value'], str(ids['kernel']))
if row['name'] == 'ramdisk_id':
self.assertEqual(row['value'], str(ids['ramdisk']))
def _assert_invalid_swift_uri_raises_bad_store_uri(self, def _assert_invalid_swift_uri_raises_bad_store_uri(self,
legacy_parse_uri_fn): legacy_parse_uri_fn):
invalid_uri = ('swift://http://acct:usr:pass@example.com' invalid_uri = ('swift://http://acct:usr:pass@example.com'
@ -718,18 +681,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIn((owner_index, columns), index_data) self.assertIn((owner_index, columns), index_data)
def _post_downgrade_028(self, engine):
owner_index = "owner_image_idx"
columns = ["owner"]
images_table = db_utils.get_table(engine, 'images')
index_data = [(idx.name, idx.columns.keys())
for idx in images_table.indexes
if idx.name == owner_index]
self.assertNotIn((owner_index, columns), index_data)
def _pre_upgrade_029(self, engine): def _pre_upgrade_029(self, engine):
image_locations = db_utils.get_table(engine, 'image_locations') image_locations = db_utils.get_table(engine, 'image_locations')
@ -774,19 +725,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
d = jsonutils.loads(r['meta_data']) d = jsonutils.loads(r['meta_data'])
self.assertEqual(d, meta_data) self.assertEqual(d, meta_data)
def _post_downgrade_029(self, engine):
image_id = 'fake_029_id'
image_locations = db_utils.get_table(engine, 'image_locations')
records = image_locations.select().where(
image_locations.c.image_id == image_id).execute().fetchall()
for r in records:
md = r['meta_data']
d = pickle.loads(md)
self.assertIsInstance(d, dict)
def _check_030(self, engine, data): def _check_030(self, engine, data):
table = "tasks" table = "tasks"
index_type = ('ix_tasks_type', ['type']) index_type = ('ix_tasks_type', ['type'])
@ -828,10 +766,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
col_data = [col.name for col in tasks_table.columns] col_data = [col.name for col in tasks_table.columns]
self.assertEqual(expected, col_data) self.assertEqual(expected, col_data)
def _post_downgrade_030(self, engine):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine, 'tasks')
def _pre_upgrade_031(self, engine): def _pre_upgrade_031(self, engine):
images = db_utils.get_table(engine, 'images') images = db_utils.get_table(engine, 'images')
now = datetime.datetime.now() now = datetime.datetime.now()
@ -928,26 +862,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertNotIn('result', tasks_table.c) self.assertNotIn('result', tasks_table.c)
self.assertNotIn('message', tasks_table.c) self.assertNotIn('message', tasks_table.c)
def _post_downgrade_032(self, engine):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine, 'task_info')
tasks_table = db_utils.get_table(engine, 'tasks')
records = tasks_table.select().execute().fetchall()
self.assertEqual(2, len(records))
tasks = {t.id: t for t in records}
task_1 = tasks.get('task-1')
self.assertEqual('some input', task_1.input)
self.assertEqual('successful', task_1.result)
self.assertIsNone(task_1.message)
task_2 = tasks.get('task-2')
self.assertIsNone(task_2.input)
self.assertIsNone(task_2.result)
self.assertIsNone(task_2.message)
def _pre_upgrade_033(self, engine): def _pre_upgrade_033(self, engine):
images = db_utils.get_table(engine, 'images') images = db_utils.get_table(engine, 'images')
image_locations = db_utils.get_table(engine, 'image_locations') image_locations = db_utils.get_table(engine, 'image_locations')
@ -997,10 +911,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIn('status', r[0]) self.assertIn('status', r[0])
self.assertEqual(status_list[idx], r[0]['status']) self.assertEqual(status_list[idx], r[0]['status'])
def _post_downgrade_033(self, engine):
image_locations = db_utils.get_table(engine, 'image_locations')
self.assertNotIn('status', image_locations.c)
def _pre_upgrade_034(self, engine): def _pre_upgrade_034(self, engine):
images = db_utils.get_table(engine, 'images') images = db_utils.get_table(engine, 'images')
@ -1024,10 +934,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
.execute().fetchone()) .execute().fetchone())
self.assertIsNone(result.virtual_size) self.assertIsNone(result.virtual_size)
def _post_downgrade_034(self, engine):
images = db_utils.get_table(engine, 'images')
self.assertNotIn('virtual_size', images.c)
def _pre_upgrade_035(self, engine): def _pre_upgrade_035(self, engine):
self.assertRaises(sqlalchemy.exc.NoSuchTableError, self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine, 'metadef_namespaces') db_utils.get_table, engine, 'metadef_namespaces')
@ -1137,19 +1043,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
col_data = [col.name for col in table.columns] col_data = [col.name for col in table.columns]
self.assertEqual(expected_cols, col_data) self.assertEqual(expected_cols, col_data)
def _post_downgrade_035(self, engine):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine, 'metadef_namespaces')
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine, 'metadef_properties')
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine, 'metadef_objects')
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine, 'metadef_resource_types')
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine,
'metadef_namespace_resource_types')
def _pre_upgrade_036(self, engine): def _pre_upgrade_036(self, engine):
meta = sqlalchemy.MetaData() meta = sqlalchemy.MetaData()
meta.bind = engine meta.bind = engine
@ -1206,34 +1099,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
col_data = [col.name for col in table.columns] col_data = [col.name for col in table.columns]
self.assertEqual(expected_cols, col_data) self.assertEqual(expected_cols, col_data)
def _post_downgrade_036(self, engine):
meta = sqlalchemy.MetaData()
meta.bind = engine
# metadef_objects
table = sqlalchemy.Table("metadef_objects", meta, autoload=True)
expected_cols = [u'id',
u'namespace_id',
u'name',
u'description',
u'required',
u'schema',
u'created_at',
u'updated_at']
col_data = [col.name for col in table.columns]
self.assertEqual(expected_cols, col_data)
# metadef_properties
table = sqlalchemy.Table("metadef_properties", meta, autoload=True)
expected_cols = [u'id',
u'namespace_id',
u'name',
u'schema',
u'created_at',
u'updated_at']
col_data = [col.name for col in table.columns]
self.assertEqual(expected_cols, col_data)
def _check_037(self, engine, data): def _check_037(self, engine, data):
if engine.name == 'mysql': if engine.name == 'mysql':
self.assertFalse(unique_constraint_exist('image_id', self.assertFalse(unique_constraint_exist('image_id',
@ -1286,62 +1151,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertEqual('pending', image_member['status']) self.assertEqual('pending', image_member['status'])
def _post_downgrade_037(self, engine):
if engine.name == 'mysql':
self.assertTrue(unique_constraint_exist('image_id',
'image_properties',
engine))
if engine.name == 'postgresql':
self.assertTrue(index_exist('ix_image_properties_image_id_name',
'image_properties', engine))
self.assertFalse(unique_constraint_exist(
'ix_image_properties_image_id_name',
'image_properties',
engine))
image_members = db_utils.get_table(engine, 'image_members')
images = db_utils.get_table(engine, 'images')
self.assertTrue(image_members.c.status.nullable)
self.assertTrue(images.c.protected.nullable)
now = datetime.datetime.now()
temp = dict(
deleted=False,
created_at=now,
status='active',
is_public=True,
min_disk=0,
min_ram=0,
id='fake_image_035_d'
)
images.insert().values(temp).execute()
image = (images.select()
.where(images.c.id == 'fake_image_035_d')
.execute().fetchone())
self.assertIsNone(image['protected'])
temp = dict(
deleted=False,
created_at=now,
image_id='fake_image_035_d',
member='fake_member',
can_share=True,
id=4
)
image_members.insert().values(temp).execute()
image_member = (image_members.select()
.where(image_members.c.id == 4)
.execute().fetchone())
self.assertIsNone(image_member['status'])
def _pre_upgrade_038(self, engine): def _pre_upgrade_038(self, engine):
self.assertRaises(sqlalchemy.exc.NoSuchTableError, self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine, 'metadef_tags') db_utils.get_table, engine, 'metadef_tags')
@ -1360,10 +1169,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
col_data = [col.name for col in table.columns] col_data = [col.name for col in table.columns]
self.assertEqual(expected_cols, col_data) self.assertEqual(expected_cols, col_data)
def _post_downgrade_038(self, engine):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine, 'metadef_tags')
def _check_039(self, engine, data): def _check_039(self, engine, data):
meta = sqlalchemy.MetaData() meta = sqlalchemy.MetaData()
meta.bind = engine meta.bind = engine
@ -1416,77 +1221,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertTrue(index_exist('ix_metadef_properties_namespace_id', self.assertTrue(index_exist('ix_metadef_properties_namespace_id',
metadef_properties.name, engine)) metadef_properties.name, engine))
def _post_downgrade_039(self, engine):
meta = sqlalchemy.MetaData()
meta.bind = engine
metadef_namespaces = sqlalchemy.Table('metadef_namespaces', meta,
autoload=True)
metadef_properties = sqlalchemy.Table('metadef_properties', meta,
autoload=True)
metadef_objects = sqlalchemy.Table('metadef_objects', meta,
autoload=True)
metadef_ns_res_types = sqlalchemy.Table(
'metadef_namespace_resource_types',
meta, autoload=True)
metadef_resource_types = sqlalchemy.Table('metadef_resource_types',
meta, autoload=True)
self.assertFalse(index_exist('ix_metadef_ns_res_types_namespace_id',
metadef_ns_res_types.name, engine))
self.assertFalse(index_exist('ix_metadef_namespaces_namespace',
metadef_namespaces.name, engine))
self.assertFalse(index_exist('ix_metadef_namespaces_owner',
metadef_namespaces.name, engine))
self.assertFalse(index_exist('ix_metadef_objects_name',
metadef_objects.name, engine))
self.assertFalse(index_exist('ix_metadef_objects_namespace_id',
metadef_objects.name, engine))
self.assertFalse(index_exist('ix_metadef_properties_name',
metadef_properties.name, engine))
self.assertFalse(index_exist('ix_metadef_properties_namespace_id',
metadef_properties.name, engine))
self.assertTrue(index_exist('ix_namespaces_namespace',
metadef_namespaces.name, engine))
self.assertTrue(index_exist('ix_objects_namespace_id_name',
metadef_objects.name, engine))
self.assertTrue(index_exist('ix_metadef_properties_namespace_id_name',
metadef_properties.name, engine))
if engine.name == 'postgresql':
inspector = inspect(engine)
self.assertEqual(1, len(inspector.get_unique_constraints(
'metadef_objects')))
self.assertEqual(1, len(inspector.get_unique_constraints(
'metadef_properties')))
if engine.name == 'mysql':
self.assertTrue(unique_constraint_exist(
'namespace_id', metadef_properties.name, engine))
self.assertTrue(unique_constraint_exist(
'namespace_id', metadef_objects.name, engine))
self.assertTrue(unique_constraint_exist(
'resource_type_id', metadef_ns_res_types.name, engine))
self.assertTrue(unique_constraint_exist(
'namespace', metadef_namespaces.name, engine))
self.assertTrue(unique_constraint_exist(
'name', metadef_resource_types.name, engine))
def _check_040(self, engine, data): def _check_040(self, engine, data):
meta = sqlalchemy.MetaData() meta = sqlalchemy.MetaData()
meta.bind = engine meta.bind = engine
@ -1755,73 +1489,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
metadef_resource_types.name, engine) metadef_resource_types.name, engine)
) )
def _post_downgrade_042(self, engine):
meta = sqlalchemy.MetaData()
meta.bind = engine
metadef_namespaces = sqlalchemy.Table('metadef_namespaces', meta,
autoload=True)
metadef_objects = sqlalchemy.Table('metadef_objects', meta,
autoload=True)
metadef_properties = sqlalchemy.Table('metadef_properties', meta,
autoload=True)
metadef_tags = sqlalchemy.Table('metadef_tags', meta, autoload=True)
metadef_resource_types = sqlalchemy.Table('metadef_resource_types',
meta, autoload=True)
metadef_ns_res_types = sqlalchemy.Table(
'metadef_namespace_resource_types',
meta, autoload=True)
# These have been recreated
self.assertTrue(index_exist('ix_metadef_namespaces_namespace',
metadef_namespaces.name, engine))
self.assertTrue(index_exist('ix_metadef_objects_namespace_id',
metadef_objects.name, engine))
self.assertTrue(index_exist('ix_metadef_properties_namespace_id',
metadef_properties.name, engine))
self.assertTrue(index_exist('ix_metadef_tags_namespace_id',
metadef_tags.name, engine))
self.assertTrue(index_exist('ix_metadef_resource_types_name',
metadef_resource_types.name, engine))
self.assertTrue(index_exist(
'ix_metadef_ns_res_types_res_type_id_ns_id',
metadef_ns_res_types.name, engine))
# The rest must remain
self.assertTrue(index_exist('ix_metadef_namespaces_owner',
metadef_namespaces.name, engine))
self.assertTrue(index_exist('ix_metadef_objects_name',
metadef_objects.name, engine))
self.assertTrue(index_exist('ix_metadef_properties_name',
metadef_properties.name, engine))
self.assertTrue(index_exist('ix_metadef_tags_name',
metadef_tags.name, engine))
self.assertTrue(index_exist('ix_metadef_ns_res_types_namespace_id',
metadef_ns_res_types.name, engine))
# Dropped
self.assertFalse(unique_constraint_exist
('uq_metadef_objects_namespace_id_name',
metadef_objects.name, engine)
)
self.assertFalse(unique_constraint_exist
('uq_metadef_properties_namespace_id_name',
metadef_properties.name, engine)
)
self.assertFalse(unique_constraint_exist
('uq_metadef_tags_namespace_id_name',
metadef_tags.name, engine)
)
self.assertFalse(unique_constraint_exist
('uq_metadef_namespaces_namespace',
metadef_namespaces.name, engine)
)
self.assertFalse(unique_constraint_exist
('uq_metadef_resource_types_name',
metadef_resource_types.name, engine)
)
def assert_table(self, engine, table_name, indices, columns): def assert_table(self, engine, table_name, indices, columns):
table = db_utils.get_table(engine, table_name) table = db_utils.get_table(engine, table_name)
index_data = [(index.name, index.columns.keys()) for index in index_data = [(index.name, index.columns.keys()) for index in
@ -1831,6 +1498,24 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertItemsEqual(indices, index_data) self.assertItemsEqual(indices, index_data)
class TestMigrations(test_base.DbTestCase, test_utils.BaseTestCase):
def test_no_downgrade(self):
migrate_file = versions.__path__[0]
for parent, dirnames, filenames in os.walk(migrate_file):
for filename in filenames:
if filename.split('.')[1] == 'py':
model_name = filename.split('.')[0]
model = __import__(
'glance.db.sqlalchemy.migrate_repo.versions.' +
model_name)
obj = getattr(getattr(getattr(getattr(getattr(
model, 'db'), 'sqlalchemy'), 'migrate_repo'),
'versions'), model_name)
func = getattr(obj, 'downgrade', None)
self.assertIsNone(func)
class TestMysqlMigrations(test_base.MySQLOpportunisticTestCase, class TestMysqlMigrations(test_base.MySQLOpportunisticTestCase,
MigrationsMixin): MigrationsMixin):

View File

@ -0,0 +1,11 @@
---
prelude: >
Database downgrades have been removed from the Glance source tree. Please
see ``upgrade`` section for more details.
upgrade:
- The ``db_downgrade`` command has been removed from the
``glance-manage`` utility and all database downgrade
scripts have been removed. In accord with OpenStack
policy, Glance cannot be downgraded any more. Operators
are advised to make a full database backup of their
production data before attempting any upgrade.