Make database downgrade works

Fixes bug #854905

Use sqlalchemy reflection whenever possible:
    http://sqlalchemy-migrate.readthedocs.org/en/latest/versioning.html

Work around sqlalchemy-migrate sqlite 'bool column not deletable' issue:
    http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=143
    Add following sql scripts for sqlite:
        002_sqlite_downgrade.sql
        015_sqlite_downgrade.sql
        033_sqlite_downgrade.sql
        050_sqlite_downgrade.sql
        068_sqlite_downgrade.sql

Work around sqlalchemy-migrate sqlite 'table with foreign key column not
deletable' issue:
    http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=94
    Add following sql scripts for sqlite:
        003_sqlite_downgrade.sql
        006_sqlite_downgrade.sql
        007_sqlite_downgrade.sql
        012_sqlite_upgrade.sql
        013_sqlite_downgrade.sql
        020_sqlite_downgrade.sql
        030_sqlite_downgrade.sql
        038_sqlite_downgrade.sql
        042_sqlite_downgrade.sql
        053_sqlite_downgrade.sql
        067_sqlite_downgrade.sql

Work around sqlalchemy-migrate 'migrate drops engine reference' issue:
    http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=72

Add following sql scripts for long primary key to work with utf-8
mysql table:
    072_mysql_upgrade.sql

Add following sql scripts for postgresql:
    002_postgresql_downgrade.sql

Add snake walk test cases for database migration based on glance migration
test.

Change-Id: Ib454ecb4662bbf47736c1b12d9a4f969f180ceb6
This commit is contained in:
Hengqing Hu 2012-02-08 00:02:54 +08:00
parent 5073c4506e
commit 8e825c4024
105 changed files with 5139 additions and 2298 deletions

File diff suppressed because it is too large Load Diff

View File

@ -20,194 +20,139 @@ from sqlalchemy import Boolean, Column, DateTime, ForeignKey
from sqlalchemy import Integer, MetaData, String, Table, Text
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
services = Table('services', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
networks = Table('networks', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
volumes = Table('volumes', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
certificates = Table('certificates', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('user_id',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('project_id',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('file_name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
consoles = Table('consoles', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('instance_id', Integer()),
Column('password',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('port', Integer(), nullable=True),
Column('pool_id',
Integer(),
ForeignKey('console_pools.id')),
)
console_pools = Table('console_pools', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('username',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('password',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('console_type',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('public_hostname',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('host',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('compute_host',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
instance_actions = Table('instance_actions', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_id',
Integer(),
ForeignKey('instances.id')),
Column('action',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('error',
Text(length=None, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
iscsi_targets = Table('iscsi_targets', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('target_num', Integer()),
Column('host',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('volume_id',
Integer(),
ForeignKey('volumes.id'),
nullable=True),
)
#
# Tables to alter
#
auth_tokens = Table('auth_tokens', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('token_hash',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
primary_key=True,
nullable=False),
Column('user_id', Integer()),
Column('server_manageent_url',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('storage_url',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('cdn_management_url',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
instances_availability_zone = Column(
'availability_zone',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
instances_locked = Column('locked',
Boolean(create_constraint=True, name=None))
networks_cidr_v6 = Column(
'cidr_v6',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
networks_ra_server = Column(
'ra_server',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
services_availability_zone = Column(
'availability_zone',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
volumes = Table('volumes', meta, autoload=True)
instances = Table('instances', meta, autoload=True)
services = Table('services', meta, autoload=True)
networks = Table('networks', meta, autoload=True)
auth_tokens = Table('auth_tokens', meta, autoload=True)
#
# New Tables
#
certificates = Table('certificates', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('user_id',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('project_id',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('file_name',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
consoles = Table('consoles', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_name',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('instance_id', Integer()),
Column('password',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('port', Integer(), nullable=True),
Column('pool_id',
Integer(),
ForeignKey('console_pools.id')),
)
console_pools = Table('console_pools', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('address',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('username',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('password',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('console_type',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('public_hostname',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('host',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('compute_host',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
instance_actions = Table('instance_actions', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_id',
Integer(),
ForeignKey('instances.id')),
Column('action',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('error',
Text(length=None, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
iscsi_targets = Table('iscsi_targets', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('target_num', Integer()),
Column('host',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('volume_id',
Integer(),
ForeignKey('volumes.id'),
nullable=True),
)
tables = [certificates, console_pools, consoles, instance_actions,
iscsi_targets]
for table in tables:
@ -225,8 +170,67 @@ def upgrade(migrate_engine):
unicode_error=None,
_warn_on_bytestring=False))
#
# New Columns
#
instances_availability_zone = Column(
'availability_zone',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
instances_locked = Column('locked',
Boolean(create_constraint=True, name=None))
networks_cidr_v6 = Column(
'cidr_v6',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
networks_ra_server = Column(
'ra_server',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
services_availability_zone = Column(
'availability_zone',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
instances.create_column(instances_availability_zone)
instances.create_column(instances_locked)
networks.create_column(networks_cidr_v6)
networks.create_column(networks_ra_server)
services.create_column(services_availability_zone)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
volumes = Table('volumes', meta, autoload=True)
instances = Table('instances', meta, autoload=True)
services = Table('services', meta, autoload=True)
networks = Table('networks', meta, autoload=True)
auth_tokens = Table('auth_tokens', meta, autoload=True)
certificates = Table('certificates', meta, autoload=True)
consoles = Table('consoles', meta, autoload=True)
console_pools = Table('console_pools', meta, autoload=True)
instance_actions = Table('instance_actions', meta, autoload=True)
iscsi_targets = Table('iscsi_targets', meta, autoload=True)
# table order matters, don't change
tables = [certificates, consoles, console_pools, instance_actions,
iscsi_targets]
for table in tables:
table.drop()
auth_tokens.c.user_id.alter(type=Integer())
instances.drop_column('availability_zone')
instances.drop_column('locked')
networks.drop_column('cidr_v6')
networks.drop_column('ra_server')
services.drop_column('availability_zone')

View File

@ -0,0 +1,20 @@
BEGIN;
DROP TABLE certificates;
DROP TABLE consoles;
DROP TABLE console_pools;
DROP TABLE instance_actions;
DROP TABLE iscsi_targets;
ALTER TABLE auth_tokens ADD COLUMN user_id_backup INTEGER;
UPDATE auth_tokens SET user_id_backup = CAST(user_id AS INTEGER);
ALTER TABLE auth_tokens DROP COLUMN user_id;
ALTER TABLE auth_tokens RENAME COLUMN user_id_backup TO user_id;
ALTER TABLE instances DROP COLUMN availability_zone;
ALTER TABLE instances DROP COLUMN locked;
ALTER TABLE networks DROP COLUMN cidr_v6;
ALTER TABLE networks DROP COLUMN ra_server;
ALTER TABLE services DROP COLUMN availability_zone;
COMMIT;

View File

@ -0,0 +1,388 @@
BEGIN TRANSACTION;
DROP TABLE certificates;
DROP TABLE console_pools;
DROP TABLE consoles;
DROP TABLE instance_actions;
DROP TABLE iscsi_targets;
CREATE TEMPORARY TABLE auth_tokens_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
token_hash VARCHAR(255) NOT NULL,
user_id VARCHAR(255),
server_manageent_url VARCHAR(255),
storage_url VARCHAR(255),
cdn_management_url VARCHAR(255),
PRIMARY KEY (token_hash),
CHECK (deleted IN (0, 1))
);
INSERT INTO auth_tokens_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
token_hash,
user_id,
server_manageent_url,
storage_url,
cdn_management_url
FROM auth_tokens;
DROP TABLE auth_tokens;
CREATE TABLE auth_tokens (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
token_hash VARCHAR(255) NOT NULL,
user_id INTEGER,
server_manageent_url VARCHAR(255),
storage_url VARCHAR(255),
cdn_management_url VARCHAR(255),
PRIMARY KEY (token_hash),
CHECK (deleted IN (0, 1))
);
INSERT INTO auth_tokens
SELECT created_at,
updated_at,
deleted_at,
deleted,
token_hash,
user_id,
server_manageent_url,
storage_url,
cdn_management_url
FROM auth_tokens_backup;
DROP TABLE auth_tokens_backup;
CREATE TEMPORARY TABLE instances_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
internal_id INTEGER,
admin_pass VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
image_id VARCHAR(255),
kernel_id VARCHAR(255),
ramdisk_id VARCHAR(255),
server_name VARCHAR(255),
launch_index INTEGER,
key_name VARCHAR(255),
key_data TEXT,
state INTEGER,
state_description VARCHAR(255),
memory_mb INTEGER,
vcpus INTEGER,
local_gb INTEGER,
hostname VARCHAR(255),
host VARCHAR(255),
instance_type VARCHAR(255),
user_data TEXT,
reservation_id VARCHAR(255),
mac_address VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
availability_zone VARCHAR(255),
locked BOOLEAN,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (locked IN (0, 1))
);
INSERT INTO instances_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
internal_id,
admin_pass,
user_id,
project_id,
image_id,
kernel_id,
ramdisk_id,
server_name,
launch_index,
key_name,
key_data,
state,
state_description,
memory_mb,
vcpus,
local_gb,
hostname,
host,
instance_type,
user_data,
reservation_id,
mac_address,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
availability_zone,
locked
FROM instances;
DROP TABLE instances;
CREATE TABLE instances (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
internal_id INTEGER,
admin_pass VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
image_id VARCHAR(255),
kernel_id VARCHAR(255),
ramdisk_id VARCHAR(255),
server_name VARCHAR(255),
launch_index INTEGER,
key_name VARCHAR(255),
key_data TEXT,
state INTEGER,
state_description VARCHAR(255),
memory_mb INTEGER,
vcpus INTEGER,
local_gb INTEGER,
hostname VARCHAR(255),
host VARCHAR(255),
instance_type VARCHAR(255),
user_data TEXT,
reservation_id VARCHAR(255),
mac_address VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1))
);
INSERT INTO instances
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
internal_id,
admin_pass,
user_id,
project_id,
image_id,
kernel_id,
ramdisk_id,
server_name,
launch_index,
key_name,
key_data,
state,
state_description,
memory_mb,
vcpus,
local_gb,
hostname,
host,
instance_type,
user_data,
reservation_id,
mac_address,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description
FROM instances_backup;
DROP TABLE instances_backup;
CREATE TEMPORARY TABLE networks_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
injected BOOLEAN,
cidr VARCHAR(255),
netmask VARCHAR(255),
bridge VARCHAR(255),
gateway VARCHAR(255),
broadcast VARCHAR(255),
dns VARCHAR(255),
vlan INTEGER,
vpn_public_address VARCHAR(255),
vpn_public_port INTEGER,
vpn_private_address VARCHAR(255),
dhcp_start VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
cidr_v6 VARCHAR(255),
ra_server VARCHAR(255),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (injected IN (0, 1))
);
INSERT INTO networks_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
injected,
cidr,
netmask,
bridge,
gateway,
broadcast,
dns,
vlan,
vpn_public_address,
vpn_public_port,
vpn_private_address,
dhcp_start,
project_id,
host,
cidr_v6,
ra_server
FROM networks;
DROP TABLE networks;
CREATE TABLE networks (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
injected BOOLEAN,
cidr VARCHAR(255),
netmask VARCHAR(255),
bridge VARCHAR(255),
gateway VARCHAR(255),
broadcast VARCHAR(255),
dns VARCHAR(255),
vlan INTEGER,
vpn_public_address VARCHAR(255),
vpn_public_port INTEGER,
vpn_private_address VARCHAR(255),
dhcp_start VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (injected IN (0, 1))
);
INSERT INTO networks
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
injected,
cidr,
netmask,
bridge,
gateway,
broadcast,
dns,
vlan,
vpn_public_address,
vpn_public_port,
vpn_private_address,
dhcp_start,
project_id,
host
FROM networks_backup;
DROP TABLE networks_backup;
CREATE TEMPORARY TABLE services_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
host VARCHAR(255),
binary VARCHAR(255),
topic VARCHAR(255),
report_count INTEGER NOT NULL,
disabled BOOLEAN,
availability_zone VARCHAR(255),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (disabled IN (0, 1))
);
INSERT INTO services_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
host,
binary,
topic,
report_count,
disabled,
availability_zone
FROM services;
DROP TABLE services;
CREATE TABLE services (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
host VARCHAR(255),
binary VARCHAR(255),
topic VARCHAR(255),
report_count INTEGER NOT NULL,
disabled BOOLEAN,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (disabled IN (0, 1))
);
INSERT INTO services
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
host,
binary,
topic,
report_count,
disabled
FROM services_backup;
DROP TABLE services_backup;
COMMIT;

View File

@ -17,30 +17,26 @@
from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
networks = Table('networks', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
#
# Tables to alter
#
networks_label = Column(
'label',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
networks = Table('networks', meta, autoload=True)
networks_label = Column(
'label',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
networks.create_column(networks_label)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
networks = Table('networks', meta, autoload=True)
networks.drop_column('label')

View File

@ -0,0 +1,111 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE networks_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
injected BOOLEAN,
cidr VARCHAR(255),
netmask VARCHAR(255),
bridge VARCHAR(255),
gateway VARCHAR(255),
broadcast VARCHAR(255),
dns VARCHAR(255),
vlan INTEGER,
vpn_public_address VARCHAR(255),
vpn_public_port INTEGER,
vpn_private_address VARCHAR(255),
dhcp_start VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
cidr_v6 VARCHAR(255),
ra_server VARCHAR(255),
label VARCHAR(255),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (injected IN (0, 1))
);
INSERT INTO networks_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
injected,
cidr,
netmask,
bridge,
gateway,
broadcast,
dns,
vlan,
vpn_public_address,
vpn_public_port,
vpn_private_address,
dhcp_start,
project_id,
host,
cidr_v6,
ra_server,
label
FROM networks;
DROP TABLE networks;
CREATE TABLE networks (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
injected BOOLEAN,
cidr VARCHAR(255),
netmask VARCHAR(255),
bridge VARCHAR(255),
gateway VARCHAR(255),
broadcast VARCHAR(255),
dns VARCHAR(255),
vlan INTEGER,
vpn_public_address VARCHAR(255),
vpn_public_port INTEGER,
vpn_private_address VARCHAR(255),
dhcp_start VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
cidr_v6 VARCHAR(255),
ra_server VARCHAR(255),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (injected IN (0, 1))
);
INSERT INTO networks
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
injected,
cidr,
netmask,
bridge,
gateway,
broadcast,
dns,
vlan,
vpn_public_address,
vpn_public_port,
vpn_private_address,
dhcp_start,
project_id,
host,
cidr_v6,
ra_server
FROM networks_backup;
DROP TABLE networks_backup;
COMMIT;

View File

@ -17,43 +17,50 @@ from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
#
# New Tables
#
zones = Table('zones', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('api_url',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('username',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('password',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
#
# Tables to alter
#
# (none currently)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
#
# New Tables
#
zones = Table('zones', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('api_url',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('username',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('password',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
for table in (zones, ):
try:
table.create()
except Exception:
LOG.info(repr(table))
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
zones = Table('zones', meta, autoload=True)
for table in (zones, ):
table.drop()

View File

@ -19,54 +19,39 @@ from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
from sqlalchemy import MetaData, String, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
quotas = Table('quotas', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
instance_metadata_table = Table('instance_metadata', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('value',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
#
# New columns
#
quota_metadata_items = Column('metadata_items', Integer())
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
instances = Table('instances', meta, autoload=True)
quotas = Table('quotas', meta, autoload=True)
instance_metadata_table = Table('instance_metadata', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('value',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
for table in (instance_metadata_table, ):
try:
table.create()
@ -75,4 +60,22 @@ def upgrade(migrate_engine):
LOG.exception('Exception while creating table')
raise
quota_metadata_items = Column('metadata_items', Integer())
quotas.create_column(quota_metadata_items)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
instances = Table('instances', meta, autoload=True)
quotas = Table('quotas', meta, autoload=True)
instance_metadata_table = Table('instance_metadata', meta, autoload=True)
for table in (instance_metadata_table, ):
table.drop()
quotas.drop_column('metadata_items')

View File

@ -17,52 +17,38 @@
from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
#
volumes = Table('volumes', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
volumes = Table('volumes', meta, autoload=True)
# Add columns to existing tables
volumes_provider_location = Column('provider_location',
String(length=256,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
#
# New Tables
#
# None
#
# Tables to alter
#
# None
#
# Columns to add to existing tables
#
volumes_provider_location = Column('provider_location',
volumes_provider_auth = Column('provider_auth',
String(length=256,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
volumes_provider_auth = Column('provider_auth',
String(length=256,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
# Add columns to existing tables
volumes.create_column(volumes_provider_location)
volumes.create_column(volumes_provider_auth)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
volumes.drop_column('provider_location')
volumes.drop_column('provider_auth')

View File

@ -0,0 +1,113 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE volumes_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
ec2_id VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_id INTEGER,
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(256),
provider_auth VARCHAR(256),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(instance_id) REFERENCES instances (id)
);
INSERT INTO volumes_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
host,
size,
availability_zone,
instance_id,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
provider_location,
provider_auth
FROM volumes;
DROP TABLE volumes;
CREATE TABLE volumes (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
ec2_id VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_id INTEGER,
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(instance_id) REFERENCES instances (id)
);
INSERT INTO volumes
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
host,
size,
availability_zone,
instance_id,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description
FROM volumes_backup;
DROP TABLE volumes_backup;
COMMIT;

View File

@ -15,71 +15,56 @@
from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
#
fixed_ips = Table(
"fixed_ips",
meta,
Column(
"id",
Integer(),
primary_key=True,
nullable=False))
#
# New Tables
#
# None
#
# Tables to alter
#
# None
#
# Columns to add to existing tables
#
fixed_ips_addressV6 = Column(
"addressV6",
String(
length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
fixed_ips_netmaskV6 = Column(
"netmaskV6",
String(
length=3,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
fixed_ips_gatewayV6 = Column(
"gatewayV6",
String(
length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
fixed_ips = Table('fixed_ips', meta, autoload=True)
#
# New Columns
#
fixed_ips_addressV6 = Column(
"addressV6",
String(
length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
fixed_ips_netmaskV6 = Column(
"netmaskV6",
String(
length=3,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
fixed_ips_gatewayV6 = Column(
"gatewayV6",
String(
length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
# Add columns to existing tables
fixed_ips.create_column(fixed_ips_addressV6)
fixed_ips.create_column(fixed_ips_netmaskV6)
fixed_ips.create_column(fixed_ips_gatewayV6)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
fixed_ips = Table('fixed_ips', meta, autoload=True)
fixed_ips.drop_column('addressV6')
fixed_ips.drop_column('netmaskV6')
fixed_ips.drop_column('gatewayV6')

View File

@ -0,0 +1,79 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE fixed_ips_backup (
id INTEGER NOT NULL,
address VARCHAR(255),
network_id INTEGER,
instance_id INTEGER,
allocated BOOLEAN DEFAULT FALSE,
leased BOOLEAN DEFAULT FALSE,
reserved BOOLEAN DEFAULT FALSE,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
addressV6 VARCHAR(255),
netmaskV6 VARCHAR(3),
gatewayV6 VARCHAR(255),
PRIMARY KEY (id),
CHECK (leased IN (0, 1)),
CHECK (allocated IN (0, 1)),
CHECK (deleted IN (0, 1)),
CHECK (reserved IN (0, 1))
);
INSERT INTO fixed_ips_backup
SELECT id,
address,
network_id,
instance_id,
allocated,
leased,
reserved,
created_at,
updated_at,
deleted_at,
deleted,
addressV6,
netmaskV6,
gatewayV6
FROM fixed_ips;
DROP TABLE fixed_ips;
CREATE TABLE fixed_ips (
id INTEGER NOT NULL,
address VARCHAR(255),
network_id INTEGER,
instance_id INTEGER,
allocated BOOLEAN DEFAULT FALSE,
leased BOOLEAN DEFAULT FALSE,
reserved BOOLEAN DEFAULT FALSE,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
CHECK (leased IN (0, 1)),
CHECK (allocated IN (0, 1)),
CHECK (deleted IN (0, 1)),
CHECK (reserved IN (0, 1))
);
INSERT INTO fixed_ips
SELECT id,
address,
network_id,
instance_id,
allocated,
leased,
reserved,
created_at,
updated_at,
deleted_at,
deleted
FROM fixed_ips_backup;
DROP TABLE fixed_ips_backup;
COMMIT;

View File

@ -17,37 +17,36 @@ from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
#
# New Tables
#
instance_types = Table('instance_types', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True),
Column('id', Integer(), primary_key=True, nullable=False),
Column('memory_mb', Integer(), nullable=False),
Column('vcpus', Integer(), nullable=False),
Column('local_gb', Integer(), nullable=False),
Column('flavorid', Integer(), nullable=False, unique=True),
Column('swap', Integer(), nullable=False, default=0),
Column('rxtx_quota', Integer(), nullable=False, default=0),
Column('rxtx_cap', Integer(), nullable=False, default=0))
def upgrade(migrate_engine):
# Upgrade operations go here
# Don't create your own engine; bind migrate_engine
# to your metadata
meta = MetaData()
meta.bind = migrate_engine
#
# New Tables
#
instance_types = Table('instance_types', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('name',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True),
Column('id', Integer(), primary_key=True, nullable=False),
Column('memory_mb', Integer(), nullable=False),
Column('vcpus', Integer(), nullable=False),
Column('local_gb', Integer(), nullable=False),
Column('flavorid', Integer(), nullable=False, unique=True),
Column('swap', Integer(), nullable=False, default=0),
Column('rxtx_quota', Integer(), nullable=False, default=0),
Column('rxtx_cap', Integer(), nullable=False, default=0))
try:
instance_types.create()
except Exception:
@ -79,5 +78,8 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
for table in (instance_types):
meta = MetaData()
meta.bind = migrate_engine
instance_types = Table('instance_types', meta, autoload=True)
for table in (instance_types, ):
table.drop()

View File

@ -13,44 +13,41 @@
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.from sqlalchemy import *
# under the License.
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
from sqlalchemy import MetaData, String, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
migrations = Table('migrations', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('source_compute', String(255)),
Column('dest_compute', String(255)),
Column('dest_host', String(255)),
Column('instance_id', Integer, ForeignKey('instances.id'),
nullable=True),
Column('status', String(255)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
instances = Table('instances', meta, autoload=True)
#
# New Tables
#
migrations = Table('migrations', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('source_compute', String(255)),
Column('dest_compute', String(255)),
Column('dest_host', String(255)),
Column('instance_id', Integer, ForeignKey('instances.id'),
nullable=True),
Column('status', String(255)),
)
for table in (migrations, ):
try:
table.create()
@ -58,3 +55,16 @@ def upgrade(migrate_engine):
LOG.info(repr(table))
LOG.exception('Exception while creating table')
raise
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
instances = Table('instances', meta, autoload=True)
migrations = Table('migrations', meta, autoload=True)
for table in (migrations, ):
table.drop()

View File

@ -16,24 +16,20 @@
from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
instances_os_type = Column('os_type',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instances_os_type = Column('os_type',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
instances.create_column(instances_os_type)
migrate_engine.execute(instances.update()\
.where(instances.c.os_type == None)\
@ -41,6 +37,9 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instances.drop_column('os_type')

View File

@ -20,58 +20,42 @@ from sqlalchemy import Boolean, Column, DateTime, Integer, MetaData
from sqlalchemy import Table, Text
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
compute_nodes = Table('compute_nodes', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('service_id', Integer(), nullable=False),
Column('vcpus', Integer(), nullable=False),
Column('memory_mb', Integer(), nullable=False),
Column('local_gb', Integer(), nullable=False),
Column('vcpus_used', Integer(), nullable=False),
Column('memory_mb_used', Integer(), nullable=False),
Column('local_gb_used', Integer(), nullable=False),
Column('hypervisor_type',
Text(convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False),
Column('hypervisor_version', Integer(), nullable=False),
Column('cpu_info',
Text(convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False),
)
#
# Tables to alter
#
instances_launched_on = Column(
'launched_on',
Text(convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
compute_nodes = Table('compute_nodes', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('service_id', Integer(), nullable=False),
Column('vcpus', Integer(), nullable=False),
Column('memory_mb', Integer(), nullable=False),
Column('local_gb', Integer(), nullable=False),
Column('vcpus_used', Integer(), nullable=False),
Column('memory_mb_used', Integer(), nullable=False),
Column('local_gb_used', Integer(), nullable=False),
Column('hypervisor_type',
Text(convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False),
Column('hypervisor_version', Integer(), nullable=False),
Column('cpu_info',
Text(convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False),
)
try:
compute_nodes.create()
except Exception:
@ -80,4 +64,22 @@ def upgrade(migrate_engine):
meta.drop_all(tables=[compute_nodes])
raise
instances_launched_on = Column(
'launched_on',
Text(convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
instances.create_column(instances_launched_on)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
compute_nodes = Table('compute_nodes', meta, autoload=True)
compute_nodes.drop()
instances.drop_column('launched_on')

View File

@ -16,135 +16,75 @@
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
from sqlalchemy import MetaData, String, Table
meta = MetaData()
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
#
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# Tables to alter
#
networks = Table('networks', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('injected', Boolean(create_constraint=True, name=None)),
Column('cidr',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('netmask',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('bridge',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('gateway',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('broadcast',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('dns',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('vlan', Integer()),
Column('vpn_public_address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('vpn_public_port', Integer()),
Column('vpn_private_address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('dhcp_start',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('project_id',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('host',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('cidr_v6',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('ra_server', String(length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
Column(
'label',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
fixed_ips = Table('fixed_ips', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('network_id',
Integer(),
ForeignKey('networks.id'),
nullable=True),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=True),
Column('allocated', Boolean(create_constraint=True, name=None)),
Column('leased', Boolean(create_constraint=True, name=None)),
Column('reserved', Boolean(create_constraint=True, name=None)),
Column("addressV6", String(length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
Column("netmaskV6", String(length=3,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
Column("gatewayV6", String(length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
)
#
# New Tables
#
# None
#
# Columns to add to existing tables
#
networks_netmask_v6 = Column(
'netmask_v6',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
instances = Table('instances', meta, autoload=True)
networks = Table('networks', meta, autoload=True)
fixed_ips = Table('fixed_ips', meta, autoload=True)
# Alter column name
networks.c.ra_server.alter(name='gateway_v6')
# Add new column to existing table
networks_netmask_v6 = Column(
'netmask_v6',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
networks.create_column(networks_netmask_v6)
# drop existing columns from table
fixed_ips.c.addressV6.drop()
fixed_ips.c.netmaskV6.drop()
fixed_ips.c.gatewayV6.drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
instances = Table('instances', meta, autoload=True)
networks = Table('networks', meta, autoload=True)
fixed_ips = Table('fixed_ips', meta, autoload=True)
networks.c.gateway_v6.alter(name='ra_server')
networks.drop_column('netmask_v6')
fixed_ips_addressV6 = Column(
"addressV6",
String(
length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
fixed_ips_netmaskV6 = Column(
"netmaskV6",
String(
length=3,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
fixed_ips_gatewayV6 = Column(
"gatewayV6",
String(
length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
for column in (fixed_ips_addressV6,
fixed_ips_netmaskV6,
fixed_ips_gatewayV6):
fixed_ips.create_column(column)

View File

@ -0,0 +1,195 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE networks_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
injected BOOLEAN,
cidr VARCHAR(255),
netmask VARCHAR(255),
bridge VARCHAR(255),
gateway VARCHAR(255),
broadcast VARCHAR(255),
dns VARCHAR(255),
vlan INTEGER,
vpn_public_address VARCHAR(255),
vpn_public_port INTEGER,
vpn_private_address VARCHAR(255),
dhcp_start VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
cidr_v6 VARCHAR(255),
ra_server VARCHAR(255),
label VARCHAR(255),
PRIMARY KEY (id),
CHECK (injected IN (0, 1)),
CHECK (deleted IN (0, 1))
);
INSERT INTO networks_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
injected,
cidr,
netmask,
bridge,
gateway,
broadcast,
dns,
vlan,
vpn_public_address,
vpn_public_port,
vpn_private_address,
dhcp_start,
project_id,
host,
cidr_v6,
ra_server,
label
FROM networks;
DROP TABLE networks;
CREATE TABLE networks (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
injected BOOLEAN,
cidr VARCHAR(255),
netmask VARCHAR(255),
bridge VARCHAR(255),
gateway VARCHAR(255),
broadcast VARCHAR(255),
dns VARCHAR(255),
vlan INTEGER,
vpn_public_address VARCHAR(255),
vpn_public_port INTEGER,
vpn_private_address VARCHAR(255),
dhcp_start VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
cidr_v6 VARCHAR(255),
gateway_v6 VARCHAR(255),
label VARCHAR(255),
netmask_v6 VARCHAR(255),
PRIMARY KEY (id),
CHECK (injected IN (0, 1)),
CHECK (deleted IN (0, 1))
);
INSERT INTO networks
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
injected,
cidr,
netmask,
bridge,
gateway,
broadcast,
dns,
vlan,
vpn_public_address,
vpn_public_port,
vpn_private_address,
dhcp_start,
project_id,
host,
cidr_v6,
ra_server AS gateway_v6,
label,
NULL AS netmask_v6
FROM networks_backup;
DROP TABLE networks_backup;
CREATE TEMPORARY TABLE fixed_ips_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
address VARCHAR(255),
network_id INTEGER,
instance_id INTEGER,
allocated BOOLEAN,
leased BOOLEAN,
reserved BOOLEAN,
addressV6 VARCHAR(255),
netmaskV6 VARCHAR(3),
gatewayV6 VARCHAR(255),
PRIMARY KEY (id),
CHECK (reserved IN (0, 1)),
CHECK (allocated IN (0, 1)),
CHECK (leased IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(instance_id) REFERENCES instances (id),
FOREIGN KEY(network_id) REFERENCES networks (id)
);
INSERT INTO fixed_ips_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
address,
network_id,
instance_id,
allocated,
leased,
reserved,
addressV6,
netmaskV6,
gatewayV6
FROM fixed_ips;
DROP TABLE fixed_ips;
CREATE TABLE fixed_ips (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
address VARCHAR(255),
network_id INTEGER,
instance_id INTEGER,
allocated BOOLEAN,
leased BOOLEAN,
reserved BOOLEAN,
PRIMARY KEY (id),
CHECK (reserved IN (0, 1)),
CHECK (allocated IN (0, 1)),
CHECK (leased IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(instance_id) REFERENCES instances (id),
FOREIGN KEY(network_id) REFERENCES networks (id)
);
INSERT INTO fixed_ips
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
address,
network_id,
instance_id,
allocated,
leased,
reserved
FROM fixed_ips_backup;
DROP TABLE fixed_ips_backup;
COMMIT;

View File

@ -13,34 +13,31 @@
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.from sqlalchemy import *
# under the License.
from sqlalchemy import Column, Integer, MetaData, Table
meta = MetaData()
migrations = Table('migrations', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# Tables to alter
#
#
old_flavor_id = Column('old_flavor_id', Integer())
new_flavor_id = Column('new_flavor_id', Integer())
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
migrations = Table('migrations', meta, autoload=True)
old_flavor_id = Column('old_flavor_id', Integer())
new_flavor_id = Column('new_flavor_id', Integer())
migrations.create_column(old_flavor_id)
migrations.create_column(new_flavor_id)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
migrations.drop_column(old_flavor_id)
migrations.drop_column(new_flavor_id)
migrations = Table('migrations', meta, autoload=True)
migrations.drop_column('old_flavor_id')
migrations.drop_column('new_flavor_id')

View File

@ -0,0 +1,69 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE migrations_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
source_compute VARCHAR(255),
dest_compute VARCHAR(255),
dest_host VARCHAR(255),
instance_id INTEGER,
status VARCHAR(255),
old_flavor_id INTEGER,
new_flavor_id INTEGER,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(instance_id) REFERENCES instances (id)
);
INSERT INTO migrations_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
source_compute,
dest_compute,
dest_host,
instance_id,
status,
old_flavor_id,
new_flavor_id
FROM migrations;
DROP TABLE migrations;
CREATE TABLE migrations (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
source_compute VARCHAR(255),
dest_compute VARCHAR(255),
dest_host VARCHAR(255),
instance_id INTEGER,
status VARCHAR(255),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(instance_id) REFERENCES instances (id)
);
INSERT INTO migrations
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
source_compute,
dest_compute,
dest_host,
instance_id,
status
FROM migrations_backup;
DROP TABLE migrations_backup;
COMMIT;

View File

@ -15,37 +15,22 @@
# under the License.
from sqlalchemy import Column, Integer, MetaData, String, Table
#from nova import log as logging
meta = MetaData()
c_instance_type = Column('instance_type',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
c_instance_type_id = Column('instance_type_id',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
instance_types = Table('instance_types', meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instance_types = Table('instance_types', meta, autoload=True)
instances = Table('instances', meta, autoload=True)
c_instance_type_id = Column('instance_type_id',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
instances.create_column(c_instance_type_id)
@ -63,17 +48,25 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instance_types = Table('instance_types', meta, autoload=True)
instances = Table('instances', meta, autoload=True)
c_instance_type = Column('instance_type',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
instances.create_column(c_instance_type)
type_names = {}
recs = migrate_engine.execute(instance_types.select())
for row in recs:
type_id = row[0]
type_name = row[1]
type_names[row[0]] = row[1]
for type_id, type_name in type_names.iteritems():
migrate_engine.execute(instances.update()\
.where(instances.c.instance_type_id == type_id)\
.values(instance_type=type_name))

View File

@ -17,19 +17,19 @@
from sqlalchemy import Boolean, Column, MetaData, Table
meta = MetaData()
c_auto_assigned = Column('auto_assigned', Boolean, default=False)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
floating_ips = Table('floating_ips',
meta,
autoload=True,
autoload_with=migrate_engine)
floating_ips = Table('floating_ips', meta, autoload=True)
c_auto_assigned = Column('auto_assigned', Boolean, default=False)
floating_ips.create_column(c_auto_assigned)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
floating_ips = Table('floating_ips', meta, autoload=True)
floating_ips.drop_column('auto_assigned')

View File

@ -0,0 +1,62 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE floating_ips_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
address VARCHAR(255),
fixed_ip_id INTEGER,
project_id VARCHAR(255),
host VARCHAR(255),
auto_assigned BOOLEAN,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (auto_assigned IN (0, 1)),
FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id)
);
INSERT INTO floating_ips_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
address,
fixed_ip_id,
project_id,
host,
auto_assigned
FROM floating_ips;
DROP TABLE floating_ips;
CREATE TABLE floating_ips (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
address VARCHAR(255),
fixed_ip_id INTEGER,
project_id VARCHAR(255),
host VARCHAR(255),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id)
);
INSERT INTO floating_ips
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
address,
fixed_ip_id,
project_id,
host
FROM floating_ips_backup;
DROP TABLE floating_ips_backup;
COMMIT;

View File

@ -19,8 +19,6 @@ from sqlalchemy import MetaData, String, Table
from nova import utils
meta = MetaData()
resources = [
'instances',
'cores',
@ -31,7 +29,7 @@ resources = [
]
def old_style_quotas_table(name):
def old_style_quotas_table(meta, name):
return Table(name, meta,
Column('id', Integer(), primary_key=True),
Column('created_at', DateTime(),
@ -53,7 +51,7 @@ def old_style_quotas_table(name):
)
def new_style_quotas_table(name):
def new_style_quotas_table(meta, name):
return Table(name, meta,
Column('id', Integer(), primary_key=True),
Column('created_at', DateTime(),
@ -75,8 +73,8 @@ def new_style_quotas_table(name):
)
def quotas_table(migrate_engine, name='quotas'):
return Table(name, meta, autoload=True, autoload_with=migrate_engine)
def quotas_table(meta, name='quotas'):
return Table(name, meta, autoload=True)
def _assert_no_duplicate_project_ids(quotas):
@ -177,12 +175,13 @@ def convert_backward(migrate_engine, old_quotas, new_quotas):
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
old_quotas = quotas_table(migrate_engine)
old_quotas = quotas_table(meta)
assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas)
new_quotas = new_style_quotas_table('quotas_new')
new_quotas = new_style_quotas_table(meta, 'quotas_new')
new_quotas.create()
convert_forward(migrate_engine, old_quotas, new_quotas)
old_quotas.drop()
@ -190,18 +189,19 @@ def upgrade(migrate_engine):
# clear metadata to work around this:
# http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128
meta.clear()
new_quotas = quotas_table(migrate_engine, 'quotas_new')
new_quotas = quotas_table(meta, 'quotas_new')
new_quotas.rename('quotas')
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData()
meta.bind = migrate_engine
new_quotas = quotas_table(migrate_engine)
new_quotas = quotas_table(meta)
assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas)
old_quotas = old_style_quotas_table('quotas_old')
old_quotas = old_style_quotas_table(meta, 'quotas_old')
old_quotas.create()
convert_backward(migrate_engine, old_quotas, new_quotas)
new_quotas.drop()
@ -209,5 +209,5 @@ def downgrade(migrate_engine):
# clear metadata to work around this:
# http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128
meta.clear()
old_quotas = quotas_table(migrate_engine, 'quotas_old')
old_quotas = quotas_table(meta, 'quotas_old')
old_quotas.rename('quotas')

View File

@ -18,14 +18,14 @@
from sqlalchemy import Column, Integer, MetaData, String, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instances = Table('instances', meta, autoload=True)
types = {}
for instance in migrate_engine.execute(instances.select()):
@ -56,9 +56,10 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instances = Table('instances', meta, autoload=True)
integer_column = instances.c.instance_type_id
string_column = Column('instance_type_id_str',

View File

@ -16,26 +16,20 @@
from sqlalchemy import MetaData, Table
meta = MetaData()
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
tokens = Table('auth_tokens', meta, autoload=True,
autoload_with=migrate_engine)
tokens = Table('auth_tokens', meta, autoload=True)
c_manageent = tokens.c.server_manageent_url
c_manageent.alter(name='server_management_url')
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tokens = Table('auth_tokens', meta, autoload=True,
autoload_with=migrate_engine)
tokens = Table('auth_tokens', meta, autoload=True)
c_management = tokens.c.server_management_url
c_management.alter(name='server_manageent_url')

View File

@ -20,45 +20,51 @@ from sqlalchemy import Integer, DateTime, Boolean, String
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
snapshots = Table('snapshots', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_id', Integer(), nullable=False),
Column('user_id',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('project_id',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('status',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('progress',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('volume_size', Integer()),
Column('scheduled_at', DateTime(timezone=False)),
Column('display_name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('display_description',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
#
# New Tables
#
snapshots = Table('snapshots', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_id', Integer(), nullable=False),
Column('user_id',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('project_id',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('status',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('progress',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('volume_size', Integer()),
Column('scheduled_at', DateTime(timezone=False)),
Column('display_name',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('display_description',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
try:
snapshots.create()
except Exception:
@ -70,4 +76,7 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData()
meta.bind = migrate_engine
snapshots = Table('snapshots', meta, autoload=True)
snapshots.drop()

View File

@ -18,28 +18,23 @@
from sqlalchemy import Column, Table, MetaData, Integer
meta = MetaData()
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
#
volumes = Table('volumes', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Column
#
snapshot_id = Column('snapshot_id', Integer())
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
snapshot_id = Column('snapshot_id', Integer())
# Add columns to existing tables
volumes.create_column(snapshot_id)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
volumes.drop_column('snapshot_id')

View File

@ -0,0 +1,119 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE volumes_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
ec2_id VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_id INTEGER,
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(256),
provider_auth VARCHAR(256),
snapshot_id INTEGER,
PRIMARY KEY (id),
FOREIGN KEY(instance_id) REFERENCES instances (id),
CHECK (deleted IN (0, 1))
);
INSERT INTO volumes_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
host,
size,
availability_zone,
instance_id,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
provider_location,
provider_auth,
snapshot_id
FROM volumes;
DROP TABLE volumes;
CREATE TABLE volumes (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
ec2_id VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_id INTEGER,
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(256),
provider_auth VARCHAR(256),
PRIMARY KEY (id),
FOREIGN KEY(instance_id) REFERENCES instances (id),
CHECK (deleted IN (0, 1))
);
INSERT INTO volumes
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
host,
size,
availability_zone,
instance_id,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
provider_location,
provider_auth
FROM volumes_backup;
DROP TABLE volumes_backup;
COMMIT;

View File

@ -17,24 +17,22 @@
from sqlalchemy import MetaData, Table
meta = MetaData()
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instances = Table('instances', meta, autoload=True)
image_id_column = instances.c.image_id
image_id_column.alter(name='image_ref')
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instances = Table('instances', meta, autoload=True)
image_ref_column = instances.c.image_ref
image_ref_column.alter(name='image_id')

View File

@ -16,12 +16,11 @@
from sqlalchemy import MetaData
meta = MetaData()
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == "mysql":
migrate_engine.execute("ALTER TABLE auth_tokens Engine=InnoDB")
@ -62,4 +61,4 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta.bind = migrate_engine
pass

View File

@ -16,30 +16,27 @@
from sqlalchemy import Column, MetaData, String, Table
meta = MetaData()
instances_vm_mode = Column('vm_mode',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instances = Table('instances', meta, autoload=True)
instances_vm_mode = Column('vm_mode',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
instances.create_column(instances_vm_mode)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instances = Table('instances', meta, autoload=True)
instances.drop_column('vm_mode')

View File

@ -18,62 +18,57 @@ from sqlalchemy import DateTime, Boolean, Integer, String
from sqlalchemy import ForeignKey
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
volumes = Table('volumes', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
snapshots = Table('snapshots', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
block_device_mapping = Table('block_device_mapping', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, autoincrement=True),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=False),
Column('device_name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False),
Column('delete_on_termination',
Boolean(create_constraint=True, name=None),
default=False),
Column('virtual_name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True),
Column('snapshot_id',
Integer(),
ForeignKey('snapshots.id'),
nullable=True),
Column('volume_id', Integer(), ForeignKey('volumes.id'),
nullable=True),
Column('volume_size', Integer(), nullable=True),
Column('no_device',
Boolean(create_constraint=True, name=None),
nullable=True),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
instances = Table('instances', meta, autoload=True)
volumes = Table('volumes', meta, autoload=True)
snapshots = Table('snapshots', meta, autoload=True)
#
# New Tables
#
block_device_mapping = Table('block_device_mapping', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, autoincrement=True),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=False),
Column('device_name',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False),
Column('delete_on_termination',
Boolean(create_constraint=True, name=None),
default=False),
Column('virtual_name',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True),
Column('snapshot_id',
Integer(),
ForeignKey('snapshots.id'),
nullable=True),
Column('volume_id', Integer(), ForeignKey('volumes.id'),
nullable=True),
Column('volume_size', Integer(), nullable=True),
Column('no_device',
Boolean(create_constraint=True, name=None),
nullable=True),
)
try:
block_device_mapping.create()
except Exception:
@ -85,4 +80,13 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
instances = Table('instances', meta, autoload=True)
volumes = Table('volumes', meta, autoload=True)
snapshots = Table('snapshots', meta, autoload=True)
block_device_mapping = Table('block_device_mapping', meta, autoload=True)
block_device_mapping.drop()

View File

@ -19,15 +19,13 @@ from sqlalchemy import Column, Integer, MetaData, String, Table
from nova import utils
meta = MetaData()
instances = Table("instances", meta,
Column("id", Integer(), primary_key=True, nullable=False))
uuid_column = Column("uuid", String(36))
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
uuid_column = Column("uuid", String(36))
instances.create_column(uuid_column)
rows = migrate_engine.execute(instances.select())
@ -39,5 +37,9 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances.drop_column(uuid_column)
instances = Table('instances', meta, autoload=True)
instances.drop_column('uuid')

View File

@ -18,59 +18,72 @@ from sqlalchemy import MetaData, String, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
#
# New Tables
#
builds = Table('agent_builds', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('hypervisor',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('os',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('architecture',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('version',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('url',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('md5hash',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
#
# New Column
#
architecture = Column('architecture', String(length=255))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
#
# New Tables
#
builds = Table('agent_builds', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('hypervisor',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('os',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('architecture',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('version',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('url',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('md5hash',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
for table in (builds, ):
try:
table.create()
except Exception:
LOG.info(repr(table))
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instances = Table('instances', meta, autoload=True)
#
# New Columns
#
architecture = Column('architecture', String(length=255))
# Add columns to existing tables
instances.create_column(architecture)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
builds = Table('agent_builds', meta, autoload=True)
for table in (builds, ):
table.drop()
instances = Table('instances', meta, autoload=True)
instances.drop_column('architecture')

View File

@ -16,56 +16,38 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import Integer, MetaData, String
from sqlalchemy import Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
services = Table('services', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
networks = Table('networks', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
provider_fw_rules = Table('provider_fw_rules', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('protocol',
String(length=5, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('from_port', Integer()),
Column('to_port', Integer()),
Column('cidr',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
#
# New Tables
#
provider_fw_rules = Table('provider_fw_rules', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('protocol',
String(length=5, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('from_port', Integer()),
Column('to_port', Integer()),
Column('cidr',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
for table in (provider_fw_rules,):
try:
table.create()
@ -73,3 +55,11 @@ def upgrade(migrate_engine):
LOG.info(repr(table))
LOG.exception('Exception while creating table')
raise
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
provider_fw_rules = Table('provider_fw_rules', meta, autoload=True)
for table in (provider_fw_rules,):
table.drop()

View File

@ -18,41 +18,40 @@ from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
from sqlalchemy import MetaData, String, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instance_types = Table('instance_types', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
instance_type_extra_specs_table = Table('instance_type_extra_specs', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_type_id',
Integer(),
ForeignKey('instance_types.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('value',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
instance_types = Table('instance_types', meta, autoload=True)
#
# New Tables
#
instance_type_extra_specs_table = Table('instance_type_extra_specs', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_type_id',
Integer(),
ForeignKey('instance_types.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('value',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
for table in (instance_type_extra_specs_table, ):
try:
table.create()
@ -64,5 +63,14 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
instance_types = Table('instance_types', meta, autoload=True)
instance_type_extra_specs_table = Table('instance_type_extra_specs',
meta,
autoload=True)
for table in (instance_type_extra_specs_table, ):
table.drop()

View File

@ -14,25 +14,28 @@
from sqlalchemy import Column, Float, Integer, MetaData, Table
meta = MetaData()
zones = Table('zones', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
weight_offset = Column('weight_offset', Float(), default=0.0)
weight_scale = Column('weight_scale', Float(), default=1.0)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
zones = Table('zones', meta, autoload=True)
#
# New Columns
#
weight_offset = Column('weight_offset', Float(), default=0.0)
weight_scale = Column('weight_scale', Float(), default=1.0)
zones.create_column(weight_offset)
zones.create_column(weight_scale)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
zones.drop_column(weight_offset)
zones.drop_column(weight_scale)
zones = Table('zones', meta, autoload=True)
zones.drop_column('weight_offset')
zones.drop_column('weight_scale')

View File

@ -13,52 +13,18 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from sqlalchemy import select, Boolean, Column, DateTime, ForeignKey
from sqlalchemy import Integer, MetaData, String
from sqlalchemy import Table
from nova import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
meta = MetaData()
# virtual interface table to add to DB
virtual_interfaces = Table('virtual_interfaces', meta,
Column('created_at', DateTime(timezone=False),
default=utils.utcnow()),
Column('updated_at', DateTime(timezone=False),
onupdate=utils.utcnow()),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True),
Column('network_id',
Integer(),
ForeignKey('networks.id')),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=False),
mysql_engine='InnoDB')
# bridge_interface column to add to networks table
interface = Column('bridge_interface',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False))
# virtual interface id column to add to fixed_ips table
# foreignkey added in next migration
virtual_interface_id = Column('virtual_interface_id',
Integer())
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# grab tables and (column for dropping later)
@ -67,6 +33,13 @@ def upgrade(migrate_engine):
fixed_ips = Table('fixed_ips', meta, autoload=True)
c = instances.columns['mac_address']
interface = Column('bridge_interface',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False))
virtual_interface_id = Column('virtual_interface_id',
Integer())
# add interface column to networks table
# values will have to be set manually before running nova
try:
@ -75,6 +48,31 @@ def upgrade(migrate_engine):
LOG.error(_("interface column not added to networks table"))
raise
#
# New Tables
#
virtual_interfaces = Table('virtual_interfaces', meta,
Column('created_at', DateTime(timezone=False),
default=utils.utcnow()),
Column('updated_at', DateTime(timezone=False),
onupdate=utils.utcnow()),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('address',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True),
Column('network_id',
Integer(),
ForeignKey('networks.id')),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=False),
mysql_engine='InnoDB')
# create virtual_interfaces table
try:
virtual_interfaces.create()
@ -120,5 +118,29 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
LOG.error(_("Can't downgrade without losing data"))
raise Exception
meta = MetaData()
meta.bind = migrate_engine
# grab tables and (column for dropping later)
instances = Table('instances', meta, autoload=True)
networks = Table('networks', meta, autoload=True)
fixed_ips = Table('fixed_ips', meta, autoload=True)
virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
mac_address = Column('mac_address',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
instances.create_column(mac_address)
s = select([instances.c.id, virtual_interfaces.c.address],
virtual_interfaces.c.instance_id == instances.c.id)
for row in s.execute():
u = instances.update().values(mac_address=row['address']).\
where(instances.c.id == row['id'])
networks.drop_column('bridge_interface')
virtual_interfaces.drop()
fixed_ips.drop_column('virtual_interface_id')

View File

@ -0,0 +1,377 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE instances_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
internal_id INTEGER,
admin_pass VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
image_ref VARCHAR(255),
kernel_id VARCHAR(255),
ramdisk_id VARCHAR(255),
server_name VARCHAR(255),
launch_index INTEGER,
key_name VARCHAR(255),
key_data TEXT,
state INTEGER,
state_description VARCHAR(255),
memory_mb INTEGER,
vcpus INTEGER,
local_gb INTEGER,
hostname VARCHAR(255),
host VARCHAR(255),
user_data TEXT,
reservation_id VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
availability_zone VARCHAR(255),
locked BOOLEAN,
os_type VARCHAR(255),
launched_on TEXT,
instance_type_id INTEGER,
vm_mode VARCHAR(255),
uuid VARCHAR(36),
architecture VARCHAR(255),
PRIMARY KEY (id),
CHECK (locked IN (0, 1)),
CHECK (deleted IN (0, 1))
);
INSERT INTO instances_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
internal_id,
admin_pass,
user_id,
project_id,
image_ref,
kernel_id,
ramdisk_id,
server_name,
launch_index,
key_name,
key_data,
state,
state_description,
memory_mb,
vcpus,
local_gb,
hostname,
host,
user_data,
reservation_id,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
availability_zone,
locked,
os_type,
launched_on,
instance_type_id,
vm_mode,
uuid,
architecture
FROM instances;
DROP TABLE instances;
CREATE TABLE instances (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
internal_id INTEGER,
admin_pass VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
image_ref VARCHAR(255),
kernel_id VARCHAR(255),
ramdisk_id VARCHAR(255),
server_name VARCHAR(255),
launch_index INTEGER,
key_name VARCHAR(255),
key_data TEXT,
state INTEGER,
state_description VARCHAR(255),
memory_mb INTEGER,
vcpus INTEGER,
local_gb INTEGER,
hostname VARCHAR(255),
host VARCHAR(255),
user_data TEXT,
reservation_id VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
availability_zone VARCHAR(255),
locked BOOLEAN,
os_type VARCHAR(255),
launched_on TEXT,
instance_type_id INTEGER,
vm_mode VARCHAR(255),
uuid VARCHAR(36),
architecture VARCHAR(255),
mac_address VARCHAR(255),
PRIMARY KEY (id),
CHECK (locked IN (0, 1)),
CHECK (deleted IN (0, 1))
);
INSERT INTO instances
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
internal_id,
admin_pass,
user_id,
project_id,
image_ref,
kernel_id,
ramdisk_id,
server_name,
launch_index,
key_name,
key_data,
state,
state_description,
memory_mb,
vcpus,
local_gb,
hostname,
host,
user_data,
reservation_id,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
availability_zone,
locked,
os_type,
launched_on,
instance_type_id,
vm_mode,
uuid,
architecture,
NULL AS mac_address
FROM instances_backup;
DROP TABLE instances_backup;
UPDATE instances SET mac_address=(SELECT address
FROM virtual_interfaces
WHERE virtual_interfaces.instance_id = instances.id);
CREATE TEMPORARY TABLE networks_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
injected BOOLEAN,
cidr VARCHAR(255),
netmask VARCHAR(255),
bridge VARCHAR(255),
gateway VARCHAR(255),
broadcast VARCHAR(255),
dns VARCHAR(255),
vlan INTEGER,
vpn_public_address VARCHAR(255),
vpn_public_port INTEGER,
vpn_private_address VARCHAR(255),
dhcp_start VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
cidr_v6 VARCHAR(255),
gateway_v6 VARCHAR(255),
label VARCHAR(255),
netmask_v6 VARCHAR(255),
bridge_interface VARCHAR(255),
PRIMARY KEY (id),
CHECK (injected IN (0, 1)),
CHECK (deleted IN (0, 1))
);
INSERT INTO networks_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
injected,
cidr,
netmask,
bridge,
gateway,
broadcast,
dns,
vlan,
vpn_public_address,
vpn_public_port,
vpn_private_address,
dhcp_start,
project_id,
host,
cidr_v6,
gateway_v6,
label,
netmask_v6,
bridge_interface
FROM networks;
DROP TABLE networks;
CREATE TABLE networks (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
injected BOOLEAN,
cidr VARCHAR(255),
netmask VARCHAR(255),
bridge VARCHAR(255),
gateway VARCHAR(255),
broadcast VARCHAR(255),
dns VARCHAR(255),
vlan INTEGER,
vpn_public_address VARCHAR(255),
vpn_public_port INTEGER,
vpn_private_address VARCHAR(255),
dhcp_start VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
cidr_v6 VARCHAR(255),
gateway_v6 VARCHAR(255),
label VARCHAR(255),
netmask_v6 VARCHAR(255),
PRIMARY KEY (id),
CHECK (injected IN (0, 1)),
CHECK (deleted IN (0, 1))
);
INSERT INTO networks
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
injected,
cidr,
netmask,
bridge,
gateway,
broadcast,
dns,
vlan,
vpn_public_address,
vpn_public_port,
vpn_private_address,
dhcp_start,
project_id,
host,
cidr_v6,
gateway_v6,
label,
netmask_v6
FROM networks_backup;
DROP TABLE networks_backup;
DROP TABLE virtual_interfaces;
CREATE TEMPORARY TABLE fixed_ips_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
address VARCHAR(255),
network_id INTEGER,
instance_id INTEGER,
allocated BOOLEAN,
leased BOOLEAN,
reserved BOOLEAN,
virtual_interface_id INTEGER,
PRIMARY KEY (id),
CHECK (reserved IN (0, 1)),
CHECK (allocated IN (0, 1)),
CHECK (leased IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(instance_id) REFERENCES instances (id),
FOREIGN KEY(network_id) REFERENCES networks (id)
);
INSERT INTO fixed_ips_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
address,
network_id,
instance_id,
allocated,
leased,
reserved,
virtual_interface_id
FROM fixed_ips;
DROP TABLE fixed_ips;
CREATE TABLE fixed_ips (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
address VARCHAR(255),
network_id INTEGER,
instance_id INTEGER,
allocated BOOLEAN,
leased BOOLEAN,
reserved BOOLEAN,
PRIMARY KEY (id),
CHECK (reserved IN (0, 1)),
CHECK (allocated IN (0, 1)),
CHECK (leased IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(instance_id) REFERENCES instances (id),
FOREIGN KEY(network_id) REFERENCES networks (id)
);
INSERT INTO fixed_ips
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
address,
network_id,
instance_id,
allocated,
leased,
reserved
FROM fixed_ips_backup;
DROP TABLE fixed_ips_backup;
COMMIT;

View File

@ -13,16 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from sqlalchemy import MetaData, Table
from migrate import ForeignKeyConstraint
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
@ -41,9 +41,14 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
# grab tables
fixed_ips = Table('fixed_ips', meta, autoload=True)
virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
# drop foreignkey if not sqlite
try:
if not dialect.startswith('sqlite'):

View File

@ -15,33 +15,28 @@
from sqlalchemy import Column, Integer, MetaData, Table, String
meta = MetaData()
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Column
#
root_device_name = Column(
'root_device_name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
root_device_name = Column(
'root_device_name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
instances.create_column(root_device_name)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instances.drop_column('root_device_name')

View File

@ -17,28 +17,26 @@
from sqlalchemy import Column, Table, MetaData, Boolean, String
meta = MetaData()
fixed_ips_host = Column('host', String(255))
networks_multi_host = Column('multi_host', Boolean, default=False)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
fixed_ips_host = Column('host', String(255))
fixed_ips = Table('fixed_ips', meta, autoload=True)
fixed_ips.create_column(fixed_ips_host)
networks_multi_host = Column('multi_host', Boolean, default=False)
networks = Table('networks', meta, autoload=True)
networks.create_column(networks_multi_host)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
fixed_ips = Table('fixed_ips', meta, autoload=True)
fixed_ips.drop_column(fixed_ips_host)
fixed_ips.drop_column('host')
networks = Table('networks', meta, autoload=True)
networks.drop_column(networks_multi_host)
networks.drop_column('multi_host')

View File

@ -0,0 +1,193 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE fixed_ips_backup (
id INTEGER NOT NULL,
address VARCHAR(255),
virtual_interface_id INTEGER,
network_id INTEGER,
instance_id INTEGER,
allocated BOOLEAN default FALSE,
leased BOOLEAN default FALSE,
reserved BOOLEAN default FALSE,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
host VARCHAR(255),
PRIMARY KEY (id),
FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id)
);
INSERT INTO fixed_ips_backup
SELECT id,
address,
virtual_interface_id,
network_id,
instance_id,
allocated,
leased,
reserved,
created_at,
updated_at,
deleted_at,
deleted,
host
FROM fixed_ips;
DROP TABLE fixed_ips;
CREATE TABLE fixed_ips (
id INTEGER NOT NULL,
address VARCHAR(255),
virtual_interface_id INTEGER,
network_id INTEGER,
instance_id INTEGER,
allocated BOOLEAN default FALSE,
leased BOOLEAN default FALSE,
reserved BOOLEAN default FALSE,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id)
);
INSERT INTO fixed_ips
SELECT id,
address,
virtual_interface_id,
network_id,
instance_id,
allocated,
leased,
reserved,
created_at,
updated_at,
deleted_at,
deleted
FROM fixed_ips_backup;
DROP TABLE fixed_ips_backup;
CREATE TEMPORARY TABLE networks_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
injected BOOLEAN,
cidr VARCHAR(255),
netmask VARCHAR(255),
bridge VARCHAR(255),
gateway VARCHAR(255),
broadcast VARCHAR(255),
dns VARCHAR(255),
vlan INTEGER,
vpn_public_address VARCHAR(255),
vpn_public_port INTEGER,
vpn_private_address VARCHAR(255),
dhcp_start VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
cidr_v6 VARCHAR(255),
gateway_v6 VARCHAR(255),
label VARCHAR(255),
netmask_v6 VARCHAR(255),
bridge_interface VARCHAR(255),
multi_host BOOLEAN,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (injected IN (0, 1)),
CHECK (multi_host IN (0, 1))
);
INSERT INTO networks_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
injected,
cidr,
netmask,
bridge,
gateway,
broadcast,
dns,
vlan,
vpn_public_address,
vpn_public_port,
vpn_private_address,
dhcp_start,
project_id,
host,
cidr_v6,
gateway_v6,
label,
netmask_v6,
bridge_interface,
multi_host
FROM networks;
DROP TABLE networks;
CREATE TABLE networks(
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
injected BOOLEAN,
cidr VARCHAR(255),
netmask VARCHAR(255),
bridge VARCHAR(255),
gateway VARCHAR(255),
broadcast VARCHAR(255),
dns VARCHAR(255),
vlan INTEGER,
vpn_public_address VARCHAR(255),
vpn_public_port INTEGER,
vpn_private_address VARCHAR(255),
dhcp_start VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
cidr_v6 VARCHAR(255),
gateway_v6 VARCHAR(255),
label VARCHAR(255),
netmask_v6 VARCHAR(255),
bridge_interface VARCHAR(255),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (injected IN (0, 1))
);
INSERT INTO networks
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
injected,
cidr,
netmask,
bridge,
gateway,
broadcast,
dns,
vlan,
vpn_public_address,
vpn_public_port,
vpn_private_address,
dhcp_start,
project_id,
host,
cidr_v6,
gateway_v6,
label,
netmask_v6,
bridge_interface
FROM networks_backup;
DROP TABLE networks_backup;
COMMIT;

View File

@ -13,26 +13,17 @@
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.from sqlalchemy import *
# under the License.
from sqlalchemy import Column, Integer, String, MetaData, Table
meta = MetaData()
#
# Tables to alter
#
#
instance_id = Column('instance_id', Integer())
instance_uuid = Column('instance_uuid', String(255))
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
migrations = Table('migrations', meta, autoload=True)
instance_uuid = Column('instance_uuid', String(255))
migrations.create_column(instance_uuid)
if migrate_engine.name == "mysql":
@ -46,7 +37,10 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
migrations = Table('migrations', meta, autoload=True)
migrations.c.instance_uuid.drop()
instance_id = Column('instance_id', Integer())
migrations.create_column(instance_id)

View File

@ -17,22 +17,23 @@
from sqlalchemy import Column, Table, MetaData, String
meta = MetaData()
dns2 = Column('dns2', String(255))
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
networks = Table('networks', meta, autoload=True)
networks.c.dns.alter(name='dns1')
dns2 = Column('dns2', String(255))
networks.create_column(dns2)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
networks = Table('networks', meta, autoload=True)
networks.c.dns1.alter(name='dns')
networks.drop_column(dns2)
networks.drop_column('dns2')

View File

@ -13,29 +13,20 @@
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.from sqlalchemy import *
# under the License.
from sqlalchemy import Column, Integer, MetaData, Table
meta = MetaData()
#
# Tables to alter
#
#
old_flavor_id = Column('old_flavor_id', Integer())
new_flavor_id = Column('new_flavor_id', Integer())
old_instance_type_id = Column('old_instance_type_id', Integer())
new_instance_type_id = Column('new_instance_type_id', Integer())
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_types = Table('instance_types', meta, autoload=True)
migrations = Table('migrations', meta, autoload=True)
old_instance_type_id = Column('old_instance_type_id', Integer())
new_instance_type_id = Column('new_instance_type_id', Integer())
migrations.create_column(old_instance_type_id)
migrations.create_column(new_instance_type_id)
@ -57,20 +48,32 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_types = Table('instance_types', meta, autoload=True)
migrations = Table('migrations', meta, autoload=True)
old_flavor_id = Column('old_flavor_id', Integer())
new_flavor_id = Column('new_flavor_id', Integer())
migrations.create_column(old_flavor_id)
migrations.create_column(new_flavor_id)
# Convert instance_type_id to flavor_id
itypes = {}
for instance_type in migrate_engine.execute(instance_types.select()):
itypes[instance_type.flavorid] = instance_type.id
for instance_type_flavorid in itypes.keys():
migrate_engine.execute(migrations.update()\
.where(migrations.c.old_instance_type_id == instance_type.id)\
.values(old_flavor_id=instance_type.flavorid))
.where(migrations.c.old_instance_type_id ==
itypes[instance_type_flavorid])\
.values(old_flavor_id=instance_type_flavorid))
migrate_engine.execute(migrations.update()\
.where(migrations.c.new_instance_type_id == instance_type.id)\
.values(new_flavor_id=instance_type.flavorid))
.where(migrations.c.new_instance_type_id ==
itypes[instance_type_flavorid])\
.values(new_flavor_id=instance_type_flavorid))
migrations.c.old_instance_type_id.drop()
migrations.c.new_instance_type_id.drop()

View File

@ -14,24 +14,29 @@
from sqlalchemy import Column, MetaData, Table, String
meta = MetaData()
admin_pass = Column(
'admin_pass',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instances = Table('instances', meta, autoload=True)
instances.drop_column('admin_pass')
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instances = Table('instances', meta, autoload=True)
#
# New Columns
#
admin_pass = Column(
'admin_pass',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
instances.create_column(admin_pass)

View File

@ -19,16 +19,13 @@ from sqlalchemy import Column, Integer, MetaData, String, Table
from nova import utils
meta = MetaData()
virtual_interfaces = Table("virtual_interfaces", meta,
Column("id", Integer(), primary_key=True,
nullable=False))
uuid_column = Column("uuid", String(36))
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
uuid_column = Column('uuid', String(36))
virtual_interfaces.create_column(uuid_column)
rows = migrate_engine.execute(virtual_interfaces.select())
@ -40,5 +37,9 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
virtual_interfaces.drop_column(uuid_column)
virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
virtual_interfaces.drop_column('uuid')

View File

@ -0,0 +1,63 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE virtual_interfaces_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
address VARCHAR(255),
network_id INTEGER,
instance_id INTEGER NOT NULL,
uuid VARCHAR(36),
PRIMARY KEY (id),
FOREIGN KEY(network_id) REFERENCES networks (id),
FOREIGN KEY(instance_id) REFERENCES instances (id),
UNIQUE (address),
CHECK (deleted IN (0, 1))
);
INSERT INTO virtual_interfaces_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
address,
network_id,
instance_id,
uuid
FROM virtual_interfaces;
DROP TABLE virtual_interfaces;
CREATE TABLE virtual_interfaces (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
address VARCHAR(255),
network_id INTEGER,
instance_id INTEGER NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY(network_id) REFERENCES networks (id),
FOREIGN KEY(instance_id) REFERENCES instances (id),
UNIQUE (address),
CHECK (deleted IN (0, 1))
);
INSERT INTO virtual_interfaces
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
address,
network_id,
instance_id
FROM virtual_interfaces_backup;
DROP TABLE virtual_interfaces_backup;
COMMIT;

View File

@ -14,35 +14,36 @@
from sqlalchemy import Column, Integer, MetaData, Table, String
meta = MetaData()
accessIPv4 = Column(
'access_ip_v4',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
accessIPv6 = Column(
'access_ip_v6',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
accessIPv4 = Column(
'access_ip_v4',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
accessIPv6 = Column(
'access_ip_v6',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
instances.create_column(accessIPv4)
instances.create_column(accessIPv6)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instances.drop_column('access_ip_v4')
instances.drop_column('access_ip_v6')

View File

@ -19,15 +19,13 @@ from sqlalchemy import Column, Integer, MetaData, String, Table
from nova import utils
meta = MetaData()
networks = Table("networks", meta,
Column("id", Integer(), primary_key=True, nullable=False))
uuid_column = Column("uuid", String(36))
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
networks = Table('networks', meta, autoload=True)
uuid_column = Column("uuid", String(36))
networks.create_column(uuid_column)
rows = migrate_engine.execute(networks.select())
@ -39,5 +37,9 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
networks.drop_column(uuid_column)
networks = Table('networks', meta, autoload=True)
networks.drop_column('uuid')

View File

@ -17,20 +17,20 @@
from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
instances = Table("instances", meta,
Column("id", Integer(), primary_key=True, nullable=False))
# matches the size of an image_ref
config_drive_column = Column("config_drive", String(255), nullable=True)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table("instances", meta, autoload=True)
config_drive_column = Column("config_drive", String(255), nullable=True)
instances.create_column(config_drive_column)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances.drop_column(config_drive_column)
instances = Table("instances", meta, autoload=True)
instances.drop_column('config_drive')

View File

@ -20,83 +20,72 @@ from sqlalchemy import Boolean, ForeignKey
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of tables .
#
volumes = Table('volumes', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
volume_type_id = Column('volume_type_id', Integer(), nullable=True)
# New Tables
#
volume_types = Table('volume_types', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True))
volume_type_extra_specs_table = Table('volume_type_extra_specs', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_type_id',
Integer(),
ForeignKey('volume_types.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('value',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
volume_metadata_table = Table('volume_metadata', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_id',
Integer(),
ForeignKey('volumes.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('value',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
new_tables = (volume_types,
volume_type_extra_specs_table,
volume_metadata_table)
#
# Tables to alter
#
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
#
# New Tables
#
volume_types = Table('volume_types', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('name',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True))
volume_type_extra_specs_table = Table('volume_type_extra_specs', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_type_id',
Integer(),
ForeignKey('volume_types.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('value',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
volume_metadata_table = Table('volume_metadata', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_id',
Integer(),
ForeignKey('volumes.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('value',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
new_tables = (volume_types,
volume_type_extra_specs_table,
volume_metadata_table)
for table in new_tables:
try:
table.create()
@ -105,13 +94,29 @@ def upgrade(migrate_engine):
LOG.exception('Exception while creating table')
raise
#
# New Columns
#
volume_type_id = Column('volume_type_id', Integer(), nullable=True)
volumes.create_column(volume_type_id)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
volumes.drop_column(volume_type_id)
volumes = Table('volumes', meta, autoload=True)
for table in new_tables:
volumes.drop_column('volume_type_id')
volume_types = Table('volume_types', meta, autoload=True)
volume_type_extra_specs_table = Table('volume_type_extra_specs',
meta,
autoload=True)
volume_metadata_table = Table('volume_metadata', meta, autoload=True)
# table order matters, don't change
for table in (volume_type_extra_specs_table,
volume_types,
volume_metadata_table):
table.drop()

View File

@ -0,0 +1,129 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE volumes_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
ec2_id VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_id INTEGER,
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(256),
provider_auth VARCHAR(256),
snapshot_id INTEGER,
volume_type_id INTEGER,
PRIMARY KEY (id),
FOREIGN KEY(instance_id) REFERENCES instances (id),
CHECK (deleted IN (0, 1))
);
INSERT INTO volumes_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
host,
size,
availability_zone,
instance_id,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
provider_location,
provider_auth,
snapshot_id,
volume_type_id
FROM volumes;
DROP TABLE volumes;
CREATE TABLE volumes (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
ec2_id VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_id INTEGER,
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(256),
provider_auth VARCHAR(256),
snapshot_id INTEGER,
PRIMARY KEY (id),
FOREIGN KEY(instance_id) REFERENCES instances (id),
CHECK (deleted IN (0, 1))
);
INSERT INTO volumes
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
host,
size,
availability_zone,
instance_id,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
provider_location,
provider_auth,
snapshot_id
FROM volumes_backup;
DROP TABLE volumes_backup;
DROP TABLE volume_type_extra_specs;
DROP TABLE volume_types;
DROP TABLE volume_metadata;
COMMIT;

View File

@ -20,48 +20,52 @@ from sqlalchemy import Boolean
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
#
# New Tables
#
virtual_storage_arrays = Table('virtual_storage_arrays', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('display_name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('display_description',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('project_id',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('availability_zone',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('instance_type_id', Integer(), nullable=False),
Column('image_ref',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('vc_count', Integer(), nullable=False),
Column('vol_count', Integer(), nullable=False),
Column('status',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
#
# New Tables
#
virtual_storage_arrays = Table('virtual_storage_arrays', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('display_name',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('display_description',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('project_id',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('availability_zone',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('instance_type_id', Integer(), nullable=False),
Column('image_ref',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('vc_count', Integer(), nullable=False),
Column('vol_count', Integer(), nullable=False),
Column('status',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
try:
virtual_storage_arrays.create()
except Exception:
@ -71,6 +75,10 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
virtual_storage_arrays = Table('virtual_storage_arrays',
meta,
autoload=True)
virtual_storage_arrays.drop()

View File

@ -20,16 +20,6 @@ from nova.compute import task_states
from nova.compute import vm_states
meta = MetaData()
c_task_state = Column('task_state',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
_upgrade_translations = {
"stopping": {
"state_description": vm_states.ACTIVE,
@ -92,10 +82,10 @@ _downgrade_translations = {
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_table = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instance_table = Table('instances', meta, autoload=True)
c_state = instance_table.c.state
c_state.alter(name='power_state')
@ -103,6 +93,11 @@ def upgrade(migrate_engine):
c_vm_state = instance_table.c.state_description
c_vm_state.alter(name='vm_state')
c_task_state = Column('task_state',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
instance_table.create_column(c_task_state)
for old_state, values in _upgrade_translations.iteritems():
@ -113,10 +108,10 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_table = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instance_table = Table('instances', meta, autoload=True)
c_task_state = instance_table.c.task_state

View File

@ -13,26 +13,21 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from sqlalchemy import Column, Integer, MetaData, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
networks = Table('networks', meta,
Column("id", Integer(), primary_key=True, nullable=False))
# Add priority column to networks table
priority = Column('priority', Integer())
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
networks = Table('networks', meta, autoload=True)
priority = Column('priority', Integer())
try:
networks.create_column(priority)
except Exception:
@ -41,5 +36,9 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
networks.drop_column(priority)
networks = Table('networks', meta, autoload=True)
networks.drop_column('priority')

View File

@ -14,35 +14,36 @@
from sqlalchemy import Column, Integer, MetaData, Table, String
meta = MetaData()
default_local_device = Column(
'default_local_device',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
default_swap_device = Column(
'default_swap_device',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
default_local_device = Column(
'default_local_device',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
default_swap_device = Column(
'default_swap_device',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
instances.create_column(default_local_device)
instances.create_column(default_swap_device)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instances.drop_column('default_swap_device')
instances.drop_column('default_local_device')

View File

@ -17,14 +17,13 @@ from migrate import ForeignKeyConstraint
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
@ -38,7 +37,6 @@ def upgrade(migrate_engine):
ForeignKeyConstraint(columns=[vifs.c.instance_id],
refcolumns=[instances.c.id],
name=fkey_name).drop()
except Exception:
LOG.error(_("foreign key constraint couldn't be removed"))
raise
@ -46,6 +44,7 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData()
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):

View File

@ -1,4 +1,3 @@
COMMIT;
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE virtual_interfaces_backup (
created_at DATETIME,

View File

@ -14,22 +14,20 @@
from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
zones = Table('zones', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
name = Column('name', String(255))
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
zones = Table('zones', meta, autoload=True)
name = Column('name', String(255))
zones.create_column(name)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
zones.drop_column(name)
zones = Table('zones', meta, autoload=True)
zones.drop_column('name')

View File

@ -14,24 +14,20 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from sqlalchemy import Column, Integer, MetaData, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
instances = Table('instances', meta,
Column("id", Integer(), primary_key=True, nullable=False))
# Add progress column to instances table
progress = Column('progress', Integer())
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
progress = Column('progress', Integer())
try:
instances.create_column(progress)
except Exception:
@ -40,5 +36,9 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances.drop_column(progress)
instances = Table('instances', meta, autoload=True)
instances.drop_column('progress')

View File

@ -14,26 +14,24 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, Integer, MetaData, Table, Boolean
meta = MetaData()
# temporary table for creating the new columns
instances = Table("instances", meta,
Column("id", Integer(), primary_key=True, nullable=False))
# The new column
managed_disk = Column("managed_disk", Boolean(create_constraint=False,
name=None))
from sqlalchemy import Boolean, Column, Integer, MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
managed_disk = Column("managed_disk", Boolean(create_constraint=False,
name=None))
instances.create_column(managed_disk)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances.drop_column(managed_disk)
instances = Table('instances', meta, autoload=True)
instances.drop_column('managed_disk')

View File

@ -0,0 +1,207 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE instances_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
internal_id INTEGER,
user_id VARCHAR(255),
project_id VARCHAR(255),
image_ref VARCHAR(255),
kernel_id VARCHAR(255),
ramdisk_id VARCHAR(255),
server_name VARCHAR(255),
launch_index INTEGER,
key_name VARCHAR(255),
key_data TEXT,
power_state INTEGER,
vm_state VARCHAR(255),
memory_mb INTEGER,
vcpus INTEGER,
local_gb INTEGER,
hostname VARCHAR(255),
host VARCHAR(255),
user_data TEXT,
reservation_id VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
availability_zone VARCHAR(255),
locked BOOLEAN,
os_type VARCHAR(255),
launched_on TEXT,
instance_type_id INTEGER,
vm_mode VARCHAR(255),
uuid VARCHAR(36),
architecture VARCHAR(255),
root_device_name VARCHAR(255),
access_ip_v4 VARCHAR(255),
access_ip_v6 VARCHAR(255),
config_drive VARCHAR(255),
task_state VARCHAR(255),
default_local_device VARCHAR(255),
default_swap_device VARCHAR(255),
progress INTEGER,
managed_disk BOOLEAN,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (locked IN (0, 1)),
CHECK (managed_disk IN (0, 1))
);
INSERT INTO instances_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
internal_id,
user_id,
project_id,
image_ref,
kernel_id,
ramdisk_id,
server_name,
launch_index,
key_name,
key_data,
power_state,
vm_state,
memory_mb,
vcpus,
local_gb,
hostname,
host,
user_data,
reservation_id,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
availability_zone,
locked,
os_type,
launched_on,
instance_type_id,
vm_mode,
uuid,
architecture,
root_device_name,
access_ip_v4,
access_ip_v6,
config_drive,
task_state,
default_local_device,
default_swap_device,
progress,
managed_disk
FROM instances;
DROP TABLE instances;
CREATE TABLE instances (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
internal_id INTEGER,
user_id VARCHAR(255),
project_id VARCHAR(255),
image_ref VARCHAR(255),
kernel_id VARCHAR(255),
ramdisk_id VARCHAR(255),
server_name VARCHAR(255),
launch_index INTEGER,
key_name VARCHAR(255),
key_data TEXT,
power_state INTEGER,
vm_state VARCHAR(255),
memory_mb INTEGER,
vcpus INTEGER,
local_gb INTEGER,
hostname VARCHAR(255),
host VARCHAR(255),
user_data TEXT,
reservation_id VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
availability_zone VARCHAR(255),
locked BOOLEAN,
os_type VARCHAR(255),
launched_on TEXT,
instance_type_id INTEGER,
vm_mode VARCHAR(255),
uuid VARCHAR(36),
architecture VARCHAR(255),
root_device_name VARCHAR(255),
access_ip_v4 VARCHAR(255),
access_ip_v6 VARCHAR(255),
config_drive VARCHAR(255),
task_state VARCHAR(255),
default_local_device VARCHAR(255),
default_swap_device VARCHAR(255),
progress INTEGER,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (locked IN (0, 1))
);
INSERT INTO instances
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
internal_id,
user_id,
project_id,
image_ref,
kernel_id,
ramdisk_id,
server_name,
launch_index,
key_name,
key_data,
power_state,
vm_state,
memory_mb,
vcpus,
local_gb,
hostname,
host,
user_data,
reservation_id,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
availability_zone,
locked,
os_type,
launched_on,
instance_type_id,
vm_mode,
uuid,
architecture,
root_device_name,
access_ip_v4,
access_ip_v6,
config_drive,
task_state,
default_local_device,
default_swap_device,
progress
FROM instances_backup;
DROP TABLE instances_backup;
COMMIT;

View File

@ -14,19 +14,21 @@
from sqlalchemy import Column, Integer, MetaData, Table
meta = MetaData()
instance_types = Table("instance_types", meta, Column("id", Integer(),
primary_key=True, nullable=False))
vcpu_weight = Column("vcpu_weight", Integer())
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_types = Table('instance_types', meta, autoload=True)
vcpu_weight = Column("vcpu_weight", Integer())
instance_types.create_column(vcpu_weight)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_types.drop_column(vcpu_weight)
instance_types = Table('instance_types', meta, autoload=True)
instance_types.drop_column('vcpu_weight')

View File

@ -18,32 +18,33 @@ from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
from sqlalchemy import MetaData, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
# Table definition
volumes = Table('volumes', meta,
Column('id', Integer(), primary_key=True, nullable=False)
)
export_devices = Table('export_devices', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('shelf_id', Integer()),
Column('blade_id', Integer()),
Column('volume_id',
Integer(),
ForeignKey('volumes.id'),
nullable=True),
)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
volumes = Table('volumes', meta, autoload=True)
#
# New Tables
#
export_devices = Table('export_devices', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('shelf_id', Integer()),
Column('blade_id', Integer()),
Column('volume_id',
Integer(),
ForeignKey('volumes.id'),
nullable=True),
)
try:
export_devices.create()
except Exception:
@ -53,5 +54,12 @@ def downgrade(migrate_engine):
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
volumes = Table('volumes', meta, autoload=True)
export_devices = Table('export_devices', meta, autoload=True)
export_devices.drop()

View File

@ -13,23 +13,26 @@
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.from sqlalchemy import *
# under the License.
from sqlalchemy import Column, MetaData, Table, Text
meta = MetaData()
new_column = Column('connection_info', Text())
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table('block_device_mapping', meta, autoload=True)
new_column = Column('connection_info', Text())
table.create_column(new_column)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table('block_device_mapping', meta, autoload=True)
table.c.connection_info.drop()

View File

@ -0,0 +1,87 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE block_device_mapping_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
instance_id INTEGER NOT NULL,
device_name VARCHAR(255) NOT NULL,
delete_on_termination BOOLEAN,
virtual_name VARCHAR(255),
snapshot_id INTEGER,
volume_id INTEGER,
volume_size INTEGER,
no_device BOOLEAN,
connection_info TEXT,
PRIMARY KEY (id),
FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
CHECK (deleted IN (0, 1)),
CHECK (delete_on_termination IN (0, 1)),
CHECK (no_device IN (0, 1)),
FOREIGN KEY(volume_id) REFERENCES volumes (id),
FOREIGN KEY(instance_id) REFERENCES instances (id)
);
INSERT INTO block_device_mapping_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
instance_id,
device_name,
delete_on_termination,
virtual_name,
snapshot_id,
volume_id,
volume_size,
no_device,
connection_info
FROM block_device_mapping;
DROP TABLE block_device_mapping;
CREATE TABLE block_device_mapping (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
instance_id INTEGER NOT NULL,
device_name VARCHAR(255) NOT NULL,
delete_on_termination BOOLEAN,
virtual_name VARCHAR(255),
snapshot_id INTEGER,
volume_id INTEGER,
volume_size INTEGER,
no_device BOOLEAN,
PRIMARY KEY (id),
FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
CHECK (deleted IN (0, 1)),
CHECK (delete_on_termination IN (0, 1)),
CHECK (no_device IN (0, 1)),
FOREIGN KEY(volume_id) REFERENCES volumes (id),
FOREIGN KEY(instance_id) REFERENCES instances (id)
);
INSERT INTO block_device_mapping
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
instance_id,
device_name,
delete_on_termination,
virtual_name,
snapshot_id,
volume_id,
volume_size,
no_device
FROM block_device_mapping_backup;
DROP TABLE block_device_mapping_backup;
COMMIT;

View File

@ -20,31 +20,33 @@ from sqlalchemy import Integer, BigInteger, DateTime, Boolean, String
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
bw_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_id', Integer(), nullable=False),
Column('network_label',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('start_period', DateTime(timezone=False), nullable=False),
Column('last_refreshed', DateTime(timezone=False)),
Column('bw_in', BigInteger()),
Column('bw_out', BigInteger()))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
#
# New Tables
#
bw_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_id', Integer(), nullable=False),
Column('network_label',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('start_period', DateTime(timezone=False), nullable=False),
Column('last_refreshed', DateTime(timezone=False)),
Column('bw_in', BigInteger()),
Column('bw_out', BigInteger()))
try:
bw_cache.create()
except Exception:
@ -55,6 +57,8 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta.bind = migrate_engine
# Operations to reverse the above upgrade go here.
meta = MetaData()
meta.bind = migrate_engine
bw_cache = Table('bw_usage_cache', meta, autoload=True)
bw_cache.drop()

View File

@ -17,25 +17,19 @@
import migrate
import migrate.changeset
import sqlalchemy
from sqlalchemy import Column, Integer, String, MetaData, Table
from nova import log as logging
LOG = logging.getLogger(__name__)
meta = sqlalchemy.MetaData()
def _get_table():
return sqlalchemy.Table('instance_types', meta, autoload=True)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_types = _get_table()
instance_types = Table('instance_types', meta, autoload=True)
string_column = sqlalchemy.Column('flavorid_str', sqlalchemy.String(255))
string_column = Column('flavorid_str', String(255))
string_column.create(instance_types)
@ -74,11 +68,12 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_types = _get_table()
integer_column = sqlalchemy.Column('flavorid_int',
sqlalchemy.Integer())
instance_types = Table('instance_types', meta, autoload=True)
integer_column = Column('flavorid_int', Integer())
integer_column.create(instance_types)

View File

@ -15,35 +15,35 @@
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table
from nova import log as logging
meta = sqlalchemy.MetaData()
LOG = logging.getLogger(__name__)
s3_images = sqlalchemy.Table('s3_images', meta,
sqlalchemy.Column('created_at',
sqlalchemy.DateTime(timezone=False)),
sqlalchemy.Column('updated_at',
sqlalchemy.DateTime(timezone=False)),
sqlalchemy.Column('deleted_at',
sqlalchemy.DateTime(timezone=False)),
sqlalchemy.Column('deleted',
sqlalchemy.Boolean(create_constraint=True, name=None)),
sqlalchemy.Column('id', sqlalchemy.Integer(),
primary_key=True,
nullable=False,
autoincrement=True),
sqlalchemy.Column('uuid', sqlalchemy.String(36),
nullable=False))
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
#
# New Tables
#
s3_images = Table('s3_images', meta,
Column('created_at',
DateTime(timezone=False)),
Column('updated_at',
DateTime(timezone=False)),
Column('deleted_at',
DateTime(timezone=False)),
Column('deleted',
Boolean(create_constraint=True, name=None)),
Column('id', Integer(),
primary_key=True,
nullable=False,
autoincrement=True),
Column('uuid', String(36),
nullable=False))
try:
s3_images.create()
except Exception:
@ -53,6 +53,8 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
s3_images = Table('s3_images', meta, autoload=True)
s3_images.drop()

View File

@ -12,87 +12,82 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from sqlalchemy import Boolean, Column, DateTime, ForeignKey
from sqlalchemy import Integer, MetaData, String
from sqlalchemy import Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
#
volumes = Table('volumes', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
flavors = Table('sm_flavors', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('label',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('description',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
backend = Table('sm_backend_config', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('flavor_id', Integer(), ForeignKey('sm_flavors.id'),
nullable=False),
Column('sr_uuid',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('sr_type',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('config_params',
String(length=2047,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
)
sm_vol = Table('sm_volume', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), ForeignKey('volumes.id'),
primary_key=True, nullable=False),
Column('backend_id', Integer(), ForeignKey('sm_backend_config.id'),
nullable=False),
Column('vdi_uuid',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
#
# Tables to alter
#
# (none currently)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
volumes = Table('volumes', meta, autoload=True)
#
# New Tables
#
flavors = Table('sm_flavors', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('label',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('description',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
backend = Table('sm_backend_config', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('flavor_id', Integer(), ForeignKey('sm_flavors.id'),
nullable=False),
Column('sr_uuid',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('sr_type',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('config_params',
String(length=2047,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
)
sm_vol = Table('sm_volume', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), ForeignKey('volumes.id'),
primary_key=True, nullable=False),
Column('backend_id', Integer(),
ForeignKey('sm_backend_config.id'),
nullable=False),
Column('vdi_uuid',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
for table in (flavors, backend, sm_vol):
try:
table.create()
@ -101,7 +96,16 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
volumes = Table('volumes', meta, autoload=True)
flavors = Table('sm_flavors', meta, autoload=True)
backend = Table('sm_backend_config', meta, autoload=True)
sm_vol = Table('sm_volume', meta, autoload=True)
for table in (flavors, backend, sm_vol):
try:
table.drop()

View File

@ -17,24 +17,21 @@
from sqlalchemy import MetaData, Table
meta = MetaData()
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instances = Table('instances', meta, autoload=True)
managed_disk = instances.c.managed_disk
managed_disk.alter(name='auto_disk_config')
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instances = Table('instances', meta, autoload=True)
image_ref_column = instances.c.auto_disk_config
image_ref_column.alter(name='managed_disk')

View File

@ -14,23 +14,15 @@
from sqlalchemy import Column, Integer, Float, MetaData, Table
meta = MetaData()
def _get_table(table_name):
return Table(table_name, meta, autoload=True)
rxtx_base = Column('rxtx_base', Integer)
rxtx_factor = Column('rxtx_factor', Float, default=1)
rxtx_quota = Column('rxtx_quota', Integer)
rxtx_cap = Column('rxtx_cap', Integer)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_types = _get_table('instance_types')
networks = _get_table('networks')
instance_types = Table('instance_types', meta, autoload=True)
networks = Table('networks', meta, autoload=True)
rxtx_base = Column('rxtx_base', Integer)
rxtx_factor = Column('rxtx_factor', Float, default=1)
instance_types.create_column(rxtx_factor)
networks.create_column(rxtx_base)
@ -47,10 +39,13 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_types = _get_table('instance_types')
networks = _get_table('networks')
instance_types = Table('instance_types', meta, autoload=True)
networks = Table('networks', meta, autoload=True)
rxtx_quota = Column('rxtx_quota', Integer)
rxtx_cap = Column('rxtx_cap', Integer)
instance_types.create_column(rxtx_quota)
instance_types.create_column(rxtx_cap)

View File

@ -11,13 +11,13 @@ BEGIN TRANSACTION;
vcpus INTEGER NOT NULL,
local_gb INTEGER NOT NULL,
swap INTEGER NOT NULL,
rxtx_cap INTEGER,
rxtx_quota INTEGER,
rxtx_quota INTEGER NOT NULL,
rxtx_cap INTEGER NOT NULL,
vcpu_weight INTEGER,
flavorid VARCHAR(255),
PRIMARY KEY (id),
UNIQUE (flavorid),
CHECK (deleted IN (0, 1)),
UNIQUE (flavorid),
UNIQUE (name)
);
@ -32,11 +32,11 @@ BEGIN TRANSACTION;
vcpus,
local_gb,
swap,
0 as rxtx_quota,
COALESCE(rxtx_factor, 1) * COALESCE ((SELECT MIN(rxtx_base)
FROM networks
WHERE rxtx_base > 0), 1)
as rxtx_cap,
0 as rxtx_cap,
vcpu_weight,
flavorid FROM instance_types;
@ -53,8 +53,8 @@ BEGIN TRANSACTION;
vcpus INTEGER NOT NULL,
local_gb INTEGER NOT NULL,
swap INTEGER NOT NULL,
rxtx_quota INTEGER NOT NULL,
rxtx_cap INTEGER NOT NULL,
rxtx_factor INTEGER NOT NULL,
vcpu_weight INTEGER,
flavorid VARCHAR(255),
PRIMARY KEY (id),
@ -67,68 +67,71 @@ BEGIN TRANSACTION;
DROP TABLE instance_types_backup;
CREATE TABLE networks_backup (
created_at datetime DEFAULT NULL,
updated_at datetime DEFAULT NULL,
deleted_at datetime DEFAULT NULL,
deleted tinyint(1) DEFAULT NULL,
id int(11) NOT NULL,
injected tinyint(1) DEFAULT NULL,
cidr varchar(255) DEFAULT NULL,
netmask varchar(255) DEFAULT NULL,
bridge varchar(255) DEFAULT NULL,
gateway varchar(255) DEFAULT NULL,
broadcast varchar(255) DEFAULT NULL,
dns1 varchar(255) DEFAULT NULL,
vlan int(11) DEFAULT NULL,
vpn_public_address varchar(255) DEFAULT NULL,
vpn_public_port int(11) DEFAULT NULL,
vpn_private_address varchar(255) DEFAULT NULL,
dhcp_start varchar(255) DEFAULT NULL,
project_id varchar(255) DEFAULT NULL,
host varchar(255) DEFAULT NULL,
cidr_v6 varchar(255) DEFAULT NULL,
gateway_v6 varchar(255) DEFAULT NULL,
label varchar(255) DEFAULT NULL,
netmask_v6 varchar(255) DEFAULT NULL,
bridge_interface varchar(255) DEFAULT NULL,
multi_host tinyint(1) DEFAULT NULL,
dns2 varchar(255) DEFAULT NULL,
uuid varchar(36) DEFAULT NULL,
priority int(11) DEFAULT NULL,
PRIMARY KEY (`id`)
);
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
injected BOOLEAN,
cidr VARCHAR(255),
netmask VARCHAR(255),
bridge VARCHAR(255),
gateway VARCHAR(255),
broadcast VARCHAR(255),
dns1 VARCHAR(255),
vlan INTEGER,
vpn_public_address VARCHAR(255),
vpn_public_port INTEGER,
vpn_private_address VARCHAR(255),
dhcp_start VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
cidr_v6 VARCHAR(255),
gateway_v6 VARCHAR(255),
label VARCHAR(255),
netmask_v6 VARCHAR(255),
bridge_interface VARCHAR(255),
multi_host BOOLEAN,
dns2 VARCHAR(255),
uuid VARCHAR(36),
priority INTEGER,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (injected IN (0, 1)),
CHECK (multi_host IN (0, 1))
);
INSERT INTO networks_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
injected,
cidr,
netmask,
bridge,
gateway,
broadcast,
dns1,
vlan,
vpn_public_address,
vpn_public_port,
vpn_private_address,
dhcp_start,
project_id,
host,
cidr_v6,
gateway_v6,
label,
netmask_v6,
bridge_interface,
multi_host,
dns2,
uuid,
priority
FROM networks;
INSERT INTO networks_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
injected,
cidr,
netmask,
bridge,
gateway,
broadcast,
dns1,
vlan,
vpn_public_address,
vpn_public_port,
vpn_private_address,
dhcp_start,
project_id,
host,
cidr_v6,
gateway_v6,
label,
netmask_v6,
bridge_interface,
multi_host,
dns2,
uuid,
priority
FROM networks;
DROP TABLE networks;
ALTER TABLE networks_backup RENAME TO networks;
DROP TABLE networks;
ALTER TABLE networks_backup RENAME TO networks;
COMMIT;

View File

@ -17,13 +17,13 @@ from migrate import ForeignKeyConstraint
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
@ -45,6 +45,7 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData()
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):

View File

@ -1,4 +1,3 @@
COMMIT;
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE virtual_interfaces_backup (
created_at DATETIME,

View File

@ -14,16 +14,16 @@
from sqlalchemy import Index, MetaData, Table
meta = MetaData()
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
Index('uuid', instances.c.uuid, unique=True).create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
Index('uuid', instances.c.uuid, unique=True).drop(migrate_engine)

View File

@ -13,39 +13,40 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from sqlalchemy import Boolean, Column, DateTime, ForeignKey
from sqlalchemy import Integer, MetaData, String
from sqlalchemy import Table, Text
from nova import log as logging
from nova import utils
meta = MetaData()
LOG = logging.getLogger(__name__)
# instance info cache table to add to DB
instance_info_caches = Table('instance_info_caches', meta,
Column('created_at', DateTime(timezone=False),
default=utils.utcnow()),
Column('updated_at', DateTime(timezone=False),
onupdate=utils.utcnow()),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True),
Column('network_info', Text()),
Column('instance_id', String(36),
ForeignKey('instances.uuid'),
nullable=False,
unique=True),
mysql_engine='InnoDB')
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# load instances for fk
# load tables for fk
instances = Table('instances', meta, autoload=True)
#
# New Tables
#
instance_info_caches = Table('instance_info_caches', meta,
Column('created_at', DateTime(timezone=False),
default=utils.utcnow()),
Column('updated_at', DateTime(timezone=False),
onupdate=utils.utcnow()),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True),
Column('network_info', Text()),
Column('instance_id', String(36),
ForeignKey('instances.uuid'),
nullable=False,
unique=True),
mysql_engine='InnoDB')
# create instance_info_caches table
try:
instance_info_caches.create()
@ -55,6 +56,13 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
instances = Table('instances', meta, autoload=True)
instance_info_caches = Table('instance_info_caches', meta, autoload=True)
try:
instance_info_caches.drop()
except Exception:

View File

@ -17,41 +17,35 @@ from sqlalchemy import Boolean, Column, DateTime, Integer, ForeignKey
from sqlalchemy import MetaData, String, Table, Text
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
#
# New Tables
#
instance_faults = Table('instance_faults', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None),
default=False),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_uuid', String(36, ForeignKey('instances.uuid'))),
Column('code', Integer(), nullable=False),
Column('message',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('details',
Text(length=None, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
#
# Tables to alter
#
# (none currently)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
#
# New Tables
#
instance_faults = Table('instance_faults', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None),
default=False),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_uuid', String(36, ForeignKey('instances.uuid'))),
Column('code', Integer(), nullable=False),
Column('message',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('details',
Text(length=None, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
try:
instance_faults.create()
except Exception:
@ -60,4 +54,7 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData()
meta.bind = migrate_engine
instance_faults = Table('instance_faults', meta, autoload=True)
instance_faults.drop()

View File

@ -15,26 +15,22 @@
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy import select, Column, ForeignKey, Integer, String
from sqlalchemy import select, Column, ForeignKey, Integer
from sqlalchemy import MetaData, String, Table
from migrate import ForeignKeyConstraint
from nova import log as logging
LOG = logging.getLogger(__name__)
meta = sqlalchemy.MetaData()
def _get_table(name):
return sqlalchemy.Table(name, meta, autoload=True)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
instance_actions = _get_table('instance_actions')
instances = _get_table('instances')
instance_actions = Table('instance_actions', meta, autoload=True)
instances = Table('instances', meta, autoload=True)
uuid_column = Column('instance_uuid', String(36))
uuid_column.create(instance_actions)
@ -64,9 +60,10 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_actions = _get_table('instance_actions')
instances = _get_table('instances')
instance_actions = Table('instance_actions', meta, autoload=True)
instances = Table('instances', meta, autoload=True)
id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
id_column.create(instance_actions)

View File

@ -14,10 +14,9 @@
from sqlalchemy import Index, MetaData, Table
meta = MetaData()
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
index = Index('project_id', instances.c.project_id)
@ -25,6 +24,7 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
index = Index('project_id', instances.c.project_id)

View File

@ -15,18 +15,17 @@
import json
from sqlalchemy import *
from migrate import *
from sqlalchemy import select, MetaData, Table
from nova import ipv6
from nova import log as logging
from nova import utils
meta = MetaData()
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# grab tables
instance_info_caches = Table('instance_info_caches', meta, autoload=True)
@ -235,6 +234,7 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
# facepalm
meta = MetaData()
meta.bind = migrate_engine
instance_info_caches = Table('instance_info_caches', meta, autoload=True)

View File

@ -13,26 +13,24 @@
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.from sqlalchemy import *
# under the License.
from sqlalchemy import Column, MetaData, Table, String
from sqlalchemy import Column, MetaData, String, Table
from nova import flags
flags.DECLARE('default_floating_pool', 'nova.network.manager')
flags.DECLARE('public_interface', 'nova.network.linux_net')
FLAGS = flags.FLAGS
meta = MetaData()
pool_column = Column('pool', String(255))
interface_column = Column('interface', String(255))
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table('floating_ips', meta, autoload=True)
pool_column = Column('pool', String(255))
interface_column = Column('interface', String(255))
table.create_column(pool_column)
table.create_column(interface_column)
table.update().values(pool=FLAGS.default_floating_pool,
@ -40,6 +38,7 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table('floating_ips', meta, autoload=True)
table.c.pool.drop()

View File

@ -0,0 +1,69 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE floating_ips_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
address VARCHAR(255),
fixed_ip_id INTEGER,
project_id VARCHAR(255),
host VARCHAR(255),
auto_assigned BOOLEAN,
pool VARCHAR(255),
interface VARCHAR(255),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id)
);
INSERT INTO floating_ips_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
address,
fixed_ip_id,
project_id,
host,
auto_assigned,
pool,
interface
FROM floating_ips;
DROP TABLE floating_ips;
CREATE TABLE floating_ips (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
address VARCHAR(255),
fixed_ip_id INTEGER,
project_id VARCHAR(255),
host VARCHAR(255),
auto_assigned BOOLEAN,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id)
);
INSERT INTO floating_ips
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
address,
fixed_ip_id,
project_id,
host,
auto_assigned
FROM floating_ips_backup;
DROP TABLE floating_ips_backup;
COMMIT;

View File

@ -12,27 +12,25 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import Boolean
from sqlalchemy import Column, Table
meta = MetaData()
shutdown_terminate = Column(
'shutdown_terminate', Boolean(), default=True)
disable_terminate = Column(
'disable_terminate', Boolean(), default=False)
from sqlalchemy import Column, MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
shutdown_terminate = Column(
'shutdown_terminate', Boolean(), default=True)
disable_terminate = Column(
'disable_terminate', Boolean(), default=False)
instances.create_column(shutdown_terminate)
instances.create_column(disable_terminate)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instances.drop_column(shutdown_terminate)
instances.drop_column(disable_terminate)
instances.drop_column('shutdown_terminate')
instances.drop_column('disable_terminate')

View File

@ -0,0 +1,219 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE instances_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
internal_id INTEGER,
user_id VARCHAR(255),
project_id VARCHAR(255),
image_ref VARCHAR(255),
kernel_id VARCHAR(255),
ramdisk_id VARCHAR(255),
server_name VARCHAR(255),
launch_index INTEGER,
key_name VARCHAR(255),
key_data TEXT,
power_state INTEGER,
vm_state VARCHAR(255),
memory_mb INTEGER,
vcpus INTEGER,
local_gb INTEGER,
hostname VARCHAR(255),
host VARCHAR(255),
user_data TEXT,
reservation_id VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
availability_zone VARCHAR(255),
locked BOOLEAN,
os_type VARCHAR(255),
launched_on TEXT,
instance_type_id INTEGER,
vm_mode VARCHAR(255),
uuid VARCHAR(36),
architecture VARCHAR(255),
root_device_name VARCHAR(255),
access_ip_v4 VARCHAR(255),
access_ip_v6 VARCHAR(255),
config_drive VARCHAR(255),
task_state VARCHAR(255),
default_local_device VARCHAR(255),
default_swap_device VARCHAR(255),
progress INTEGER,
auto_disk_config BOOLEAN,
shutdown_terminate BOOLEAN,
disable_terminate BOOLEAN,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (locked IN (0, 1)),
CHECK (auto_disk_config IN (0, 1)),
CHECK (shutdown_terminate IN (0, 1)),
CHECK (disable_terminate IN (0, 1))
);
INSERT INTO instances_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
internal_id,
user_id,
project_id,
image_ref,
kernel_id,
ramdisk_id,
server_name,
launch_index,
key_name,
key_data,
power_state,
vm_state,
memory_mb,
vcpus,
local_gb,
hostname,
host,
user_data,
reservation_id,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
availability_zone,
locked,
os_type,
launched_on,
instance_type_id,
vm_mode,
uuid,
architecture,
root_device_name,
access_ip_v4,
access_ip_v6,
config_drive,
task_state,
default_local_device,
default_swap_device,
progress,
auto_disk_config,
shutdown_terminate,
disable_terminate
FROM instances;
DROP TABLE instances;
CREATE TABLE instances(
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id INTEGER NOT NULL,
internal_id INTEGER,
user_id VARCHAR(255),
project_id VARCHAR(255),
image_ref VARCHAR(255),
kernel_id VARCHAR(255),
ramdisk_id VARCHAR(255),
server_name VARCHAR(255),
launch_index INTEGER,
key_name VARCHAR(255),
key_data TEXT,
power_state INTEGER,
vm_state VARCHAR(255),
memory_mb INTEGER,
vcpus INTEGER,
local_gb INTEGER,
hostname VARCHAR(255),
host VARCHAR(255),
user_data TEXT,
reservation_id VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
availability_zone VARCHAR(255),
locked BOOLEAN,
os_type VARCHAR(255),
launched_on TEXT,
instance_type_id INTEGER,
vm_mode VARCHAR(255),
uuid VARCHAR(36),
architecture VARCHAR(255),
root_device_name VARCHAR(255),
access_ip_v4 VARCHAR(255),
access_ip_v6 VARCHAR(255),
config_drive VARCHAR(255),
task_state VARCHAR(255),
default_local_device VARCHAR(255),
default_swap_device VARCHAR(255),
progress INTEGER,
auto_disk_config BOOLEAN,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
CHECK (locked IN (0, 1)),
CHECK (auto_disk_config IN (0, 1))
);
CREATE INDEX project_id ON instances (project_id);
CREATE UNIQUE INDEX uuid ON instances (uuid);
INSERT INTO instances
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
internal_id,
user_id,
project_id,
image_ref,
kernel_id,
ramdisk_id,
server_name,
launch_index,
key_name,
key_data,
power_state,
vm_state,
memory_mb,
vcpus,
local_gb,
hostname,
host,
user_data,
reservation_id,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
availability_zone,
locked,
os_type,
launched_on,
instance_type_id,
vm_mode,
uuid,
architecture,
root_device_name,
access_ip_v4,
access_ip_v6,
config_drive,
task_state,
default_local_device,
default_swap_device,
progress,
auto_disk_config
FROM instances_backup;
DROP TABLE instances_backup;
COMMIT;

View File

@ -19,26 +19,22 @@
from sqlalchemy import Column, Integer, MetaData, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
# Add disk_available_least column to compute_nodes table.
# Thinking about qcow2 image support, both compressed and virtual disk size
# has to be considered.
# disk_available stores "total disk size - used disk(compressed disk size)",
# while disk_available_least stores
# "total disk size - used disk(virtual disk size)".
# virtual disk size is used for kvm block migration.
compute_nodes = Table('compute_nodes', meta,
Column('id', Integer(), primary_key=True, nullable=False))
disk_available_least = Column('disk_available_least', Integer(), default=0)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
disk_available_least = Column('disk_available_least', Integer(), default=0)
compute_nodes = Table('compute_nodes', meta, autoload=True)
# Add disk_available_least column to compute_nodes table.
# Thinking about qcow2 image support, both compressed and virtual disk size
# has to be considered.
# disk_available stores "total disk size - used disk(compressed disk size)"
# while disk_available_least stores
# "total disk size - used disk(virtual disk size)".
# virtual disk size is used for kvm block migration.
try:
compute_nodes.create_column(disk_available_least)
except Exception:
@ -47,5 +43,8 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
compute_nodes.drop_column(disk_available_least)
compute_nodes = Table('compute_nodes', meta, autoload=True)
compute_nodes.drop_column('disk_available_least')

View File

@ -64,7 +64,7 @@ BEGIN TRANSACTION;
PRIMARY KEY (id),
FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id),
FOREIGN KEY(network_id) REFERENCES networks (id),
FOREIGN KEY(instance_id) REFERENCES instanced (id)
FOREIGN KEY(instance_id) REFERENCES instances (id)
);
CREATE TABLE floating_ips (

View File

@ -17,13 +17,13 @@ from migrate import ForeignKeyConstraint
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
@ -71,6 +71,7 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData()
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):

View File

@ -17,71 +17,75 @@ from sqlalchemy import MetaData, Column, ForeignKey, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
aggregates = Table('aggregates', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(),
primary_key=True, nullable=False, autoincrement=True),
Column('name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True),
Column('operational_state',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False),
Column('availability_zone',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False),
)
hosts = Table('aggregate_hosts', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('host',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True),
Column('aggregate_id', Integer(), ForeignKey('aggregates.id'),
nullable=False),
)
metadata = Table('aggregate_metadata', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('aggregate_id',
Integer(),
ForeignKey('aggregates.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False),
Column('value',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False))
tables = (aggregates, hosts, metadata)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
#
# New Tables
#
aggregates = Table('aggregates', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(),
primary_key=True, nullable=False, autoincrement=True),
Column('name',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True),
Column('operational_state',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False),
Column('availability_zone',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False),
)
hosts = Table('aggregate_hosts', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('host',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True),
Column('aggregate_id', Integer(), ForeignKey('aggregates.id'),
nullable=False),
)
metadata = Table('aggregate_metadata', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('aggregate_id',
Integer(),
ForeignKey('aggregates.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False),
Column('value',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False))
tables = (aggregates, hosts, metadata)
for table in tables:
try:
table.create()
@ -90,8 +94,14 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for table in tables:
aggregates = Table('aggregates', meta, autoload=True)
hosts = Table('aggregate_hosts', meta, autoload=True)
metadata = Table('aggregate_metadata', meta, autoload=True)
# table order matters, don't change
for table in (hosts, metadata, aggregates):
try:
table.drop()
except Exception:

View File

@ -17,40 +17,43 @@ from sqlalchemy import Boolean, Column, DateTime, ForeignKey
from sqlalchemy import MetaData, String, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
#
# New Tables
#
dns_domains = Table('dns_domains', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('domain',
String(length=512, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
primary_key=True, nullable=False),
Column('scope',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('availability_zone',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('project_id',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
ForeignKey('projects.id'))
)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# load instances for fk
instances = Table('projects', meta, autoload=True)
# load tables for fk
projects = Table('projects', meta, autoload=True)
#
# New Tables
#
dns_domains = Table('dns_domains', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('domain',
String(length=512, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
primary_key=True, nullable=False),
Column('scope',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('availability_zone',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('project_id',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
ForeignKey('projects.id'))
)
# create dns_domains table
try:
dns_domains.create()
@ -60,6 +63,13 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# load tables for fk
projects = Table('projects', meta, autoload=True)
dns_domains = Table('dns_domains', meta, autoload=True)
try:
dns_domains.drop()
except Exception:

View File

@ -0,0 +1,13 @@
CREATE TABLE dns_domains (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
domain VARCHAR(512) CHARACTER SET latin1 NOT NULL,
scope VARCHAR(255),
availability_zone VARCHAR(255),
project_id VARCHAR(255),
PRIMARY KEY (domain),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(project_id) REFERENCES projects (id)
);

View File

@ -12,19 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Table, Column, MetaData, Integer
from sqlalchemy import Column, Integer, MetaData, Table
from nova import log as logging
new_columns = [
Column('free_ram_mb', Integer()),
Column('free_disk_gb', Integer()),
Column('current_workload', Integer()),
Column('running_vms', Integer()),
]
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
@ -32,6 +24,15 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine
compute_nodes = Table('compute_nodes', meta, autoload=True)
#
# New Columns
#
new_columns = [
Column('free_ram_mb', Integer()),
Column('free_disk_gb', Integer()),
Column('current_workload', Integer()),
Column('running_vms', Integer()),
]
for column in new_columns:
compute_nodes.create_column(column)
@ -41,5 +42,8 @@ def downgrade(migrate_engine):
meta.bind = migrate_engine
compute_nodes = Table('compute_nodes', meta, autoload=True)
for column in new_columns:
compute_notes.drop_column(column)
for column in ('free_ram_mb',
'free_disk_gb',
'current_workload',
'running_vms'):
compute_nodes.drop_column(column)

View File

@ -15,21 +15,13 @@
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy import select, Column, Integer
from sqlalchemy import select, Column, Integer, MetaData, Table
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
meta = sqlalchemy.MetaData()
def _get_table(name):
return sqlalchemy.Table(name, meta, autoload=True)
def upgrade_libvirt(instances, instance_types):
# Update instance_types first
@ -73,8 +65,9 @@ def check_instance_presence(migrate_engine, instances_table):
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = _get_table('instances')
instances = Table('instances', meta, autoload=True)
data_present = check_instance_presence(migrate_engine, instances)
@ -83,7 +76,7 @@ def upgrade(migrate_engine):
"connection_type to run migration migration")
raise exception.Error(msg)
instance_types = _get_table('instance_types')
instance_types = Table('instance_types', meta, autoload=True)
for table in (instances, instance_types):
root_gb = Column('root_gb', Integer)
@ -108,12 +101,13 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = _get_table('instances')
instance_types = _get_table('instance_types')
instances = Table('instances', meta, autoload=True)
instance_types = Table('instance_types', meta, autoload=True)
for table in (instances, instance_types):
local_gb = Column('root_gb', Integer)
local_gb = Column('local_gb', Integer)
local_gb.create(table)
try:

View File

@ -15,43 +15,40 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from sqlalchemy import and_, select
from sqlalchemy import BigInteger, Boolean, Column, DateTime
from sqlalchemy import Integer, MetaData, String
from sqlalchemy import Table
from nova import utils
meta = MetaData()
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
bw_usage_cache = Table('bw_usage_cache', meta,
Column('id', Integer, primary_key=True),
Column('network_label', String(255)),
Column('instance_id', Integer, nullable=False),
Column('start_period', DateTime, nullable=False),
Column('last_refreshed', DateTime),
Column('bw_in', BigInteger),
Column('bw_out', BigInteger),
Column('created_at', DateTime(timezone=False),
default=utils.utcnow()),
Column('updated_at', DateTime(timezone=False),
onupdate=utils.utcnow()),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True,
name=None)))
vifs = Table('virtual_interfaces', meta, autoload=True)
networks = Table('networks', meta, autoload=True)
mac_column = Column('mac', String(255))
try:
bw_usage_cache.create_column(mac_column)
except Exception:
# NOTE(jkoelker) passing here since this migration was broken
# at one point
pass
bw_usage_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_id', Integer(), nullable=False),
Column('network_label',
String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('start_period', DateTime(timezone=False),
nullable=False),
Column('last_refreshed', DateTime(timezone=False)),
Column('bw_in', BigInteger()),
Column('bw_out', BigInteger()),
useexisting=True)
mac_column = Column('mac', String(255))
bw_usage_cache.create_column(mac_column)
bw_usage_cache.update()\
.values(mac=select([vifs.c.address])\
@ -65,40 +62,36 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
bw_usage_cache = Table('bw_usage_cache', meta,
Column('id', Integer, primary_key=True),
Column('mac', String(255)),
Column('instance_id', Integer, nullable=False),
Column('start_period', DateTime, nullable=False),
Column('last_refreshed', DateTime),
Column('bw_in', BigInteger),
Column('bw_out', BigInteger),
Column('created_at', DateTime(timezone=False),
default=utils.utcnow()),
Column('updated_at', DateTime(timezone=False),
onupdate=utils.utcnow()),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True,
name=None)))
vifs = Table('virtual_interfaces', meta, autoload=True)
network = Table('networks', meta, autoload=True)
network_label_column = Column('network_label', String(255))
bw_usage_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_id', Integer(), nullable=False),
Column('mac', String(255)),
Column('start_period', DateTime(timezone=False),
nullable=False),
Column('last_refreshed', DateTime(timezone=False)),
Column('bw_in', BigInteger()),
Column('bw_out', BigInteger()),
useexisting=True)
network_label_column = Column('network_label', String(255))
bw_usage_cache.create_column(network_label_column)
try:
bw_usage_cache.update()\
.values(network_label=select([network.c.label])\
.where(and_(
network.c.id == vifs.c.network_id,
vifs.c.address == bw_usage_cache.c.mac,
bw_usage_cache.c.instance_id == vifs.c.instance_id))\
.as_scalar()).execute()
except Exception:
# NOTE(jkoelker) passing here since this migration was broken
# at one point
pass
bw_usage_cache.update()\
.values(network_label=select([network.c.label])\
.where(and_(
network.c.id == vifs.c.network_id,
vifs.c.address == bw_usage_cache.c.mac,
bw_usage_cache.c.instance_id == vifs.c.instance_id))\
.as_scalar()).execute()
bw_usage_cache.c.mac.drop()

View File

@ -18,8 +18,6 @@
from sqlalchemy import MetaData, Table
from migrate.changeset.constraint import UniqueConstraint
meta = MetaData()
def _get_constraint_names(engine_name):
@ -42,6 +40,7 @@ def _get_constraint_names(engine_name):
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
c_names = _get_constraint_names(migrate_engine.name)
@ -62,6 +61,7 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
c_names = _get_constraint_names(migrate_engine.name)

View File

@ -17,10 +17,9 @@
from sqlalchemy import MetaData
meta = MetaData()
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# NOTE (ironcamel): The only table we are not converting to utf8 here is

View File

@ -12,23 +12,22 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
meta = MetaData()
zones = Table('zones', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
is_parent = Column('is_parent', Boolean(), default=False)
rpc_host = Column('rpc_host', String(255))
rpc_port = Column('rpc_port', Integer())
rpc_virtual_host = Column('rpc_virtual_host', String(255))
from sqlalchemy import Boolean, Column
from sqlalchemy import Integer, MetaData, String
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
zones = Table('zones', meta, autoload=True)
is_parent = Column('is_parent', Boolean(), default=False)
rpc_host = Column('rpc_host', String(255))
rpc_port = Column('rpc_port', Integer())
rpc_virtual_host = Column('rpc_virtual_host', String(255))
zones.create_column(is_parent)
zones.create_column(rpc_host)
zones.create_column(rpc_port)
@ -36,9 +35,12 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
zones.drop_column(rpc_virtual_host)
zones.drop_column(rpc_port)
zones.drop_column(rpc_host)
zones.drop_column(is_parent)
zones = Table('zones', meta, autoload=True)
zones.drop_column('rpc_virtual_host')
zones.drop_column('rpc_port')
zones.drop_column('rpc_host')
zones.drop_column('is_parent')

Some files were not shown because too many files have changed in this diff Show More