Enable support for MySQL with test migrations.

This commit makes the necessary changes to enable the migrations
tests to pass with both mysql and sqlite. However the proper mysql
database still needs to be created beforehand for the tests to run
with mysql.

The sqlalchemy alter() method breaks on sqlite when mysql is enabled
for other databases. So for sqlite testing the alter() calls are now
done manually.

This also adds the MySQL-python module to the test-requires list
so that it will get installed for CI and the virtualenv used for
unit tests.

Change-Id: Ic72476b8e6a910c25747641ef1882639019b433e
This commit is contained in:
Matthew Treinish 2013-02-14 12:43:58 -05:00
parent 8f76eda330
commit 70907328ea
5 changed files with 179 additions and 12 deletions

View File

@ -0,0 +1,61 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE images_backup (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
INSERT INTO images_backup
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram
FROM images;
DROP TABLE images;
CREATE TABLE images (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER,
min_ram INTEGER,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
CREATE INDEX ix_images_deleted ON images (deleted);
CREATE INDEX ix_images_is_public ON images (is_public);
INSERT INTO images
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram
FROM images_backup;
DROP TABLE images_backup;
COMMIT;

View File

@ -0,0 +1,62 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE images_backup (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER,
min_ram INTEGER,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
INSERT INTO images_backup
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram
FROM images;
DROP TABLE images;
CREATE TABLE images (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);
CREATE INDEX ix_images_deleted ON images (deleted);
CREATE INDEX ix_images_is_public ON images (is_public);
INSERT INTO images
SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram
FROM images_backup;
DROP TABLE images_backup;
COMMIT;

View File

@ -41,7 +41,6 @@ def upgrade(migrate_engine):
t_images = _get_table('images', meta)
t_image_members = _get_table('image_members', meta)
t_image_properties = _get_table('image_properties', meta)
if migrate_engine.url.get_dialect().name == "sqlite":
_upgrade_sqlite(t_images, t_image_members, t_image_properties)
_update_all_ids_to_uuids(t_images, t_image_members, t_image_properties)
@ -70,9 +69,31 @@ def _upgrade_sqlite(t_images, t_image_members, t_image_properties):
"""
Upgrade 011 -> 012 with special SQLite-compatible logic.
"""
t_images.c.id.alter(sqlalchemy.String(36), primary_key=True)
sql_commands = [
"""CREATE TABLE images_backup (
id VARCHAR(36) NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);""",
"""INSERT INTO images_backup
SELECT * FROM images;""",
"""CREATE TABLE image_members_backup (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
@ -111,16 +132,38 @@ def _upgrade_sqlite(t_images, t_image_members, t_image_properties):
for command in sql_commands:
meta.bind.execute(command)
_sqlite_table_swap(t_image_members, t_image_properties)
_sqlite_table_swap(t_image_members, t_image_properties, t_images)
def _downgrade_sqlite(t_images, t_image_members, t_image_properties):
"""
Downgrade 012 -> 011 with special SQLite-compatible logic.
"""
t_images.c.id.alter(sqlalchemy.Integer(), primary_key=True)
sql_commands = [
"""CREATE TABLE images_backup (
id INTEGER NOT NULL,
name VARCHAR(255),
size INTEGER,
status VARCHAR(30) NOT NULL,
is_public BOOLEAN NOT NULL,
location TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
disk_format VARCHAR(20),
container_format VARCHAR(20),
checksum VARCHAR(32),
owner VARCHAR(255),
min_disk INTEGER NOT NULL,
min_ram INTEGER NOT NULL,
PRIMARY KEY (id),
CHECK (is_public IN (0, 1)),
CHECK (deleted IN (0, 1))
);""",
"""INSERT INTO images_backup
SELECT * FROM images;""",
"""CREATE TABLE image_members_backup (
id INTEGER NOT NULL,
image_id INTEGER NOT NULL,
@ -159,7 +202,7 @@ def _downgrade_sqlite(t_images, t_image_members, t_image_properties):
for command in sql_commands:
meta.bind.execute(command)
_sqlite_table_swap(t_image_members, t_image_properties)
_sqlite_table_swap(t_image_members, t_image_properties, t_images)
def _upgrade_other(t_images, t_image_members, t_image_properties):
@ -204,18 +247,18 @@ def _downgrade_other(t_images, t_image_members, t_image_properties):
fk.create()
def _sqlite_table_swap(t_image_members, t_image_properties):
def _sqlite_table_swap(t_image_members, t_image_properties, t_images):
t_image_members.drop()
t_image_properties.drop()
t_images.drop()
meta.bind.execute("ALTER TABLE images_backup "
"RENAME TO images")
meta.bind.execute("ALTER TABLE image_members_backup "
"RENAME TO image_members")
meta.bind.execute("ALTER TABLE image_properties_backup "
"RENAME TO image_properties")
for index in t_image_members.indexes.union(t_image_properties.indexes):
index.create()
def _get_table(table_name, metadata):
"""Return a sqlalchemy Table definition with associated metadata."""

View File

@ -62,8 +62,8 @@ def _get_connect_string(backend,
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
if backend == "mysql":
backend = "mysql+mysqldb"
return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
% locals())

View File

@ -18,5 +18,6 @@ requests
testtools>=0.9.22
# Optional packages that should be installed when testing
xattr>=0.6.0
MySQL-python
pysendfile==2.0.0
xattr>=0.6.0