Create an integer id for the resource table

Move the old uuid "id" to a new column "uuid". The migration
script is largely copied from the event_uuid_to_id script.

This is needed for https://review.openstack.org/#/c/156693/

Change-Id: Idf7adc9d01425592ec174c373b1b03626c3a0a1d
Closes-bug: #1415237
This commit is contained in:
Angus Salkeld 2015-03-10 13:08:05 +10:00
parent bb5fec7725
commit eb8c81f8db
11 changed files with 581 additions and 17 deletions

View File

@ -0,0 +1,472 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import uuid
import migrate
import migrate.changeset.constraint as constraint
from oslo_utils import timeutils
import sqlalchemy
from sqlalchemy.schema import DropConstraint, ForeignKeyConstraint
# "the upgrade plan" (might be wrong)
# 1. resource_data:
# 2. rename resource_id to tmp_res_uuid
# 3. add resource_id as int
# 4. resource:
# 5. full schema change
# 6. resource_data:
# 7. populate the correct resource_id
# 8. drop tmp_res_uuid and make resource_id a foreignkey
def upgrade_resource_data_pre(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
rd_table = sqlalchemy.Table('resource_data', meta, autoload=True)
res_table = sqlalchemy.Table('resource', meta, autoload=True)
# remove foreignkey on resource_id
inspector = sqlalchemy.inspect(migrate_engine)
fkc_name = inspector.get_foreign_keys('resource_data')[0]['name']
fkc = ForeignKeyConstraint([rd_table.c.resource_id], [res_table.c.id],
fkc_name)
migrate_engine.execute(DropConstraint(fkc))
# migrate.ForeignKeyConstraint(columns=[rd_table.c.resource_id],
# refcolumns=[res_table.c.id]).drop()
# rename resource_id -> tmp_res_uuid
rd_table.c.resource_id.alter('tmp_res_uuid', sqlalchemy.String(36))
# create the new resource_id column (no foreignkey yet)
res_id_column_kwargs = {}
if migrate_engine.name == 'ibm_db_sa':
# NOTE(mriedem): This is turned into a foreignkey key constraint
# later so it must be non-nullable.
res_id_column_kwargs['nullable'] = False
res_id = sqlalchemy.Column('resource_id', sqlalchemy.Integer,
**res_id_column_kwargs)
rd_table.create_column(res_id)
def upgrade_sqlite_resource_data_pre(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
sqlalchemy.Table('resource', meta, autoload=True)
rd_table = sqlalchemy.Table(
'new_resource_data', meta,
sqlalchemy.Column('id',
sqlalchemy.Integer,
primary_key=True,
nullable=False),
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
sqlalchemy.Column('key', sqlalchemy.String(255)),
sqlalchemy.Column('value', sqlalchemy.Text),
sqlalchemy.Column('redact', sqlalchemy.Boolean),
sqlalchemy.Column('decrypt_method', sqlalchemy.String(64)),
sqlalchemy.Column('resource_id', sqlalchemy.Integer,
nullable=False),
sqlalchemy.Column('tmp_res_uuid', sqlalchemy.String(36),
nullable=False))
rd_table.create()
prev_rd_table = sqlalchemy.Table('resource_data', meta, autoload=True)
rd_list = list(prev_rd_table.select().order_by(
sqlalchemy.sql.expression.asc(prev_rd_table.c.created_at))
.execute())
for rd in rd_list:
values = {'key': rd.key,
'value': rd.value,
'redact': rd.redact,
'decrypt_method': rd.decrypt_method,
'resource_id': 0,
'tmp_res_uuid': rd.resource_id}
migrate_engine.execute(rd_table.insert(values))
prev_rd_table.drop()
rd_table.rename('resource_data')
def upgrade_resource(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
res_table = sqlalchemy.Table('resource', meta, autoload=True)
res_uuid_column_kwargs = {}
if migrate_engine.name == 'ibm_db_sa':
# NOTE(mriedem): DB2 10.5 doesn't support unique constraints over
# nullable columns, it creates a unique index instead, so we have
# to make the uuid column non-nullable in the DB2 case.
res_uuid_column_kwargs['nullable'] = False
res_uuid = sqlalchemy.Column('uuid', sqlalchemy.String(length=36),
default=lambda: str(uuid.uuid4),
**res_uuid_column_kwargs)
res_table.create_column(res_uuid)
if migrate_engine.name == 'postgresql':
sequence = sqlalchemy.Sequence('res')
sqlalchemy.schema.CreateSequence(sequence,
bind=migrate_engine).execute()
res_id = sqlalchemy.Column('tmp_id', sqlalchemy.Integer,
server_default=sqlalchemy.text(
"nextval('res')"))
else:
res_id_column_kwargs = {}
if migrate_engine.name == 'ibm_db_sa':
# NOTE(mriedem): This is turned into a primary key constraint
# later so it must be non-nullable.
res_id_column_kwargs['nullable'] = False
res_id = sqlalchemy.Column('tmp_id', sqlalchemy.Integer,
**res_id_column_kwargs)
res_table.create_column(res_id)
fake_autoincrement = itertools.count(1)
res_list = res_table.select().order_by(
sqlalchemy.sql.expression.asc(
res_table.c.created_at)).execute().fetchall()
for res in res_list:
values = {'tmp_id': fake_autoincrement.next(), 'uuid': res.id}
update = res_table.update().where(
res_table.c.id == res.id).values(values)
migrate_engine.execute(update)
constraint_kwargs = {'table': res_table}
if migrate_engine.name == 'ibm_db_sa':
# NOTE(mriedem): DB2 gives a random name to the unique constraint
# if one is not provided so let's set the standard name ourselves.
constraint_kwargs['name'] = 'uniq_resource0uuid0'
cons = constraint.UniqueConstraint('uuid', **constraint_kwargs)
cons.create()
if migrate_engine.name == 'postgresql':
# resource_id_seq will be dropped in the case of removing `id` column
# set owner to none for saving this sequence (it is needed in the
# earlier migration)
migrate_engine.execute('alter sequence resource_id_seq owned by none')
res_table.c.id.drop()
alter_kwargs = {}
if migrate_engine.name == 'ibm_db_sa':
alter_kwargs['nullable'] = False
res_table.c.tmp_id.alter('id', sqlalchemy.Integer, **alter_kwargs)
cons = constraint.PrimaryKeyConstraint('tmp_id', table=res_table)
cons.create()
if migrate_engine.name == 'ibm_db_sa':
# NOTE(chenxiao): For DB2, setting "ID" column "autoincrement=True"
# can't make sense after above "tmp_id=>id" transformation,
# so should work around it.
sql = ("ALTER TABLE RESOURCE ALTER COLUMN ID SET GENERATED BY "
"DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1)")
migrate_engine.execute(sql)
else:
res_table.c.tmp_id.alter(sqlalchemy.Integer, autoincrement=True)
def upgrade_resource_data_post(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
res_table = sqlalchemy.Table('resource', meta, autoload=True)
rd_table = sqlalchemy.Table('resource_data', meta, autoload=True)
# set: resource_data.resource_id = resource.id
res_list = res_table.select().order_by(
sqlalchemy.sql.expression.asc(
res_table.c.created_at)).execute().fetchall()
for res in res_list:
values = {'resource_id': res.id}
update = rd_table.update().where(
rd_table.c.tmp_res_uuid == res.uuid).values(values)
migrate_engine.execute(update)
# set foreignkey on resource_id
if migrate_engine.name == 'mysql':
inspector = sqlalchemy.inspect(migrate_engine)
name = inspector.get_indexes('resource_data')[0]['name']
sqlalchemy.Index(name, rd_table.c.resource_id).drop()
cons = migrate.ForeignKeyConstraint(columns=[rd_table.c.resource_id],
refcolumns=[res_table.c.id])
cons.create()
rd_table.c.resource_id.alter(nullable=False)
rd_table.c.tmp_res_uuid.drop()
def upgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
upgrade_sqlite_resource_data_pre(migrate_engine)
upgrade_sqlite_resource(migrate_engine)
else:
upgrade_resource_data_pre(migrate_engine)
upgrade_resource(migrate_engine)
upgrade_resource_data_post(migrate_engine)
def upgrade_sqlite_resource(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
# (pafuent) Here it isn't recommended to import the table from the models,
# because in future migrations the model could change and this migration
# could fail.
# I know it is ugly but it's the only way that I found to 'freeze'
# the model state for this migration.
stack_table = sqlalchemy.Table('stack', meta, autoload=True)
res_table = sqlalchemy.Table(
'new_resource', meta,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('stack_id', sqlalchemy.String(36),
sqlalchemy.ForeignKey(stack_table.c.id),
nullable=False),
sqlalchemy.Column('uuid', sqlalchemy.String(36),
default=lambda: str(uuid.uuid4()),
unique=True),
sqlalchemy.Column('name', sqlalchemy.String(255)),
sqlalchemy.Column('nova_instance', sqlalchemy.String(255)),
sqlalchemy.Column('action', sqlalchemy.String(255)),
sqlalchemy.Column('status', sqlalchemy.String(255)),
sqlalchemy.Column('status_reason', sqlalchemy.String(255)),
sqlalchemy.Column('rsrc_metadata', sqlalchemy.Text),
sqlalchemy.Column('properties_data', sqlalchemy.Text),
sqlalchemy.Column('created_at', sqlalchemy.DateTime,
default=timeutils.utcnow),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime,
onupdate=timeutils.utcnow))
res_table.create()
prev_res_table = sqlalchemy.Table('resource', meta, autoload=True)
res_list = list(prev_res_table.select().order_by(
sqlalchemy.sql.expression.asc(prev_res_table.c.created_at))
.execute())
for res in res_list:
values = {
'stack_id': res.stack_id,
'uuid': res.id,
'name': res.name,
'nova_instance': res.nova_instance,
'action': res.action,
'status': res.status,
'status_reason': res.status_reason,
'rsrc_metadata': res.rsrc_metadata,
'properties_data': res.properties_data,
'created_at': res.created_at,
'updated_at': res.updated_at}
migrate_engine.execute(res_table.insert(values))
prev_res_table.drop()
res_table.rename('resource')
def downgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
downgrade_sqlite_resource_data_pre(migrate_engine)
downgrade_sqlite_resource(migrate_engine)
else:
downgrade_resource_data_pre(migrate_engine)
downgrade_resource(migrate_engine)
downgrade_resource_data_post(migrate_engine)
def downgrade_resource_data_pre(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
rd_table = sqlalchemy.Table('resource_data', meta, autoload=True)
res_table = sqlalchemy.Table('resource', meta, autoload=True)
# remove foreignkey on resource_id
inspector = sqlalchemy.inspect(migrate_engine)
fkc_name = inspector.get_foreign_keys('resource_data')[0]['name']
fkc = ForeignKeyConstraint([rd_table.c.resource_id], [res_table.c.id],
fkc_name)
migrate_engine.execute(DropConstraint(fkc))
# rename resource_id -> tmp_res_id
rd_table.c.resource_id.alter(name='tmp_res_id')
# create the new resource_id column (no foreignkey yet)
res_id_column_kwargs = {}
if migrate_engine.name == 'ibm_db_sa':
# NOTE(mriedem): This is turned into a foreignkey key constraint
# later so it must be non-nullable.
res_id_column_kwargs['nullable'] = False
res_id = sqlalchemy.Column('resource_id', sqlalchemy.String(36),
**res_id_column_kwargs)
rd_table.create_column(res_id)
# reload metadata due to some strange behaviour of sqlalchemy
meta = sqlalchemy.MetaData(bind=migrate_engine)
rd_table = sqlalchemy.Table('resource_data', meta, autoload=True)
res_list = res_table.select().order_by(
sqlalchemy.sql.expression.asc(
res_table.c.created_at)).execute().fetchall()
for res in res_list:
values = {'resource_id': res.uuid}
update = rd_table.update().where(
rd_table.c.tmp_res_id == res.id).values(values)
migrate_engine.execute(update)
def downgrade_sqlite_resource_data_pre(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
sqlalchemy.Table('resource', meta, autoload=True)
rd_table = sqlalchemy.Table(
'new_resource_data', meta,
sqlalchemy.Column('id',
sqlalchemy.Integer,
primary_key=True,
nullable=False),
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
sqlalchemy.Column('key', sqlalchemy.String(255)),
sqlalchemy.Column('value', sqlalchemy.Text),
sqlalchemy.Column('redact', sqlalchemy.Boolean),
sqlalchemy.Column('decrypt_method', sqlalchemy.String(64)),
sqlalchemy.Column('resource_id', sqlalchemy.Integer,
nullable=False),
sqlalchemy.Column('tmp_res_id', sqlalchemy.Integer,
nullable=False))
rd_table.create()
prev_rd_table = sqlalchemy.Table('resource_data', meta, autoload=True)
rd_list = list(prev_rd_table.select().order_by(
sqlalchemy.sql.expression.asc(prev_rd_table.c.created_at))
.execute())
for rd in rd_list:
values = {'key': rd.key,
'value': rd.value,
'redact': rd.redact,
'decrypt_method': rd.decrypt_method,
'resource_id': "foo",
'tmp_res_id': rd.resource_id}
migrate_engine.execute(rd_table.insert(values))
prev_rd_table.drop()
rd_table.rename('resource_data')
def downgrade_resource(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
res_table = sqlalchemy.Table('resource', meta, autoload=True)
res_id_column_kwargs = {}
if migrate_engine.name == 'ibm_db_sa':
res_id_column_kwargs['nullable'] = False
res_id = sqlalchemy.Column('tmp_id', sqlalchemy.String(length=36),
default=lambda: str(uuid.uuid4),
**res_id_column_kwargs)
res_id.create(res_table)
res_list = res_table.select().execute()
for res in res_list:
values1 = {'tmp_id': res.uuid}
update = res_table.update().where(
res_table.c.uuid == res.uuid).values(values1)
migrate_engine.execute(update)
res_table.c.id.drop()
res_table.c.uuid.drop()
cons = constraint.PrimaryKeyConstraint('tmp_id', table=res_table)
cons.create()
alter_kwargs = {}
# NOTE(mriedem): DB2 won't allow a primary key on a nullable column so
# we have to make it non-nullable.
if migrate_engine.name == 'ibm_db_sa':
alter_kwargs['nullable'] = False
res_table.c.tmp_id.alter('id', default=lambda: str(uuid.uuid4),
**alter_kwargs)
if migrate_engine.name == 'postgresql':
sequence = sqlalchemy.Sequence('res')
sqlalchemy.schema.DropSequence(sequence, bind=migrate_engine).execute()
def downgrade_sqlite_resource(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
# (pafuent) Here it isn't recommended to import the table from the models,
# because in future migrations the model could change and this migration
# could fail.
# I know it is ugly but it's the only way that I found to 'freeze'
# the model state for this migration.
stack_table = sqlalchemy.Table('stack', meta, autoload=True)
res_table = sqlalchemy.Table(
'new_resource', meta,
sqlalchemy.Column('id', sqlalchemy.String(36),
default=lambda: str(uuid.uuid4())),
sqlalchemy.Column('stack_id', sqlalchemy.String(36),
sqlalchemy.ForeignKey(stack_table.c.id),
nullable=False),
sqlalchemy.Column('name', sqlalchemy.String(255)),
sqlalchemy.Column('nova_instance', sqlalchemy.String(255)),
sqlalchemy.Column('action', sqlalchemy.String(255)),
sqlalchemy.Column('status', sqlalchemy.String(255)),
sqlalchemy.Column('status_reason', sqlalchemy.String(255)),
sqlalchemy.Column('rsrc_metadata', sqlalchemy.Text),
sqlalchemy.Column('properties_data', sqlalchemy.Text),
sqlalchemy.Column('created_at', sqlalchemy.DateTime,
default=timeutils.utcnow),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime,
onupdate=timeutils.utcnow))
res_table.create()
prev_res_table = sqlalchemy.Table('resource', meta, autoload=True)
res_list = prev_res_table.select().execute().fetchall()
for res in res_list:
values = {
'id': res.uuid,
'stack_id': res.stack_id,
'name': res.name,
'nova_instance': res.nova_instance,
'status': res.status,
'status_reason': res.status_reason,
'rsrc_metadata': res.rsrc_metadata,
'properties_data': res.properties_data,
'created_at': res.created_at,
'updated_at': res.updated_at}
migrate_engine.execute(res_table.insert(values))
prev_res_table.drop()
res_table.rename('resource')
def downgrade_resource_data_post(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
res_table = sqlalchemy.Table('resource', meta, autoload=True)
rd_table = sqlalchemy.Table('resource_data', meta, autoload=True)
# set: resource_data.resource_id = resource.id
if migrate_engine.name == 'sqlite':
res_list = res_table.select().order_by(
sqlalchemy.sql.expression.asc(
res_table.c.created_at)).execute().fetchall()
for res in res_list:
values = {'resource_id': res.id}
update = rd_table.update().where(
rd_table.c.tmp_res_id == res.id).values(values)
migrate_engine.execute(update)
# set foreignkey on resource_id
if migrate_engine.name == 'mysql':
sqlalchemy.Index('resource_data_resource_id_fkey',
rd_table.c.resource_id).drop()
cons = migrate.ForeignKeyConstraint(columns=[rd_table.c.resource_id],
refcolumns=[res_table.c.id])
cons.create()
rd_table.c.resource_id.alter(nullable=False)
rd_table.c.tmp_res_id.drop()

View File

@ -255,7 +255,7 @@ class ResourceData(BASE, HeatBase):
redact = sqlalchemy.Column('redact', sqlalchemy.Boolean)
decrypt_method = sqlalchemy.Column(sqlalchemy.String(64))
resource_id = sqlalchemy.Column('resource_id',
sqlalchemy.String(36),
sqlalchemy.Integer,
sqlalchemy.ForeignKey('resource.id'),
nullable=False)
@ -265,9 +265,10 @@ class Resource(BASE, HeatBase, StateAware):
__tablename__ = 'resource'
id = sqlalchemy.Column(sqlalchemy.String(36),
primary_key=True,
default=lambda: str(uuid.uuid4()))
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
uuid = sqlalchemy.Column(sqlalchemy.String(36),
default=lambda: str(uuid.uuid4()),
unique=True)
name = sqlalchemy.Column('name', sqlalchemy.String(255), nullable=True)
nova_instance = sqlalchemy.Column('nova_instance', sqlalchemy.String(255))
# odd name as "metadata" is reserved

View File

@ -163,6 +163,7 @@ class Resource(object):
self.status = self.COMPLETE
self.status_reason = ''
self.id = None
self.uuid = None
self._data = {}
self._rsrc_metadata = None
self._stored_properties_data = None
@ -187,6 +188,7 @@ class Resource(object):
self.status = resource.status
self.status_reason = resource.status_reason
self.id = resource.id
self.uuid = resource.uuid
try:
self._data = db_api.resource_data_get_all(self, resource.data)
except exception.NotFound:
@ -781,7 +783,7 @@ class Resource(object):
name = '%s-%s-%s' % (self.stack.name,
self.name,
short_id.get_id(self.id))
short_id.get_id(self.uuid))
if self.physical_resource_name_limit:
name = self.reduce_physical_resource_name(
@ -906,6 +908,7 @@ class Resource(object):
new_rs = db_api.resource_create(self.context, rs)
self.id = new_rs.id
self.uuid = new_rs.uuid
self.created_time = new_rs.created_at
self._rsrc_metadata = metadata
except Exception as ex:

View File

@ -54,10 +54,10 @@ class LaunchConfigurationTest(common.HeatTestCase):
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
# use physical_resource_name when rsrc.id is not None
self.assertIsNotNone(rsrc.id)
self.assertIsNotNone(rsrc.uuid)
expected = '%s-%s-%s' % (rsrc.stack.name,
rsrc.name,
short_id.get_id(rsrc.id))
short_id.get_id(rsrc.uuid))
self.assertEqual(expected, rsrc.FnGetRefId())
# otherwise use parent method

View File

@ -418,6 +418,93 @@ class HeatMigrationsCheckers(test_migrations.WalkVersionsMixin,
self.assertColumnExists(engine, 'stack', 'current_traversal')
self.assertColumnExists(engine, 'stack', 'current_deps')
def _pre_upgrade_057(self, engine):
# template
raw_template = utils.get_table(engine, 'raw_template')
templ = [dict(id=11, template='{}', files='{}')]
engine.execute(raw_template.insert(), templ)
# credentials
user_creds = utils.get_table(engine, 'user_creds')
user = [dict(id=11, username='steve', password='notthis',
tenant='mine', auth_url='bla',
tenant_id=str(uuid.uuid4()),
trust_id='',
trustor_user_id='')]
engine.execute(user_creds.insert(), user)
# stack
stack = utils.get_table(engine, 'stack')
stack_data = [dict(id='867aaefb-152e-505d-b13a-35d4c816390c',
name='s1',
raw_template_id=templ[0]['id'],
user_creds_id=user[0]['id'],
username='steve', disable_rollback=True)]
engine.execute(stack.insert(), stack_data)
# resource
resource = utils.get_table(engine, 'resource')
res_data = [dict(id='167aaefb-152e-505d-b13a-35d4c816390c',
name='res-4',
stack_id=stack_data[0]['id'],
user_creds_id=user[0]['id']),
dict(id='177aaefb-152e-505d-b13a-35d4c816390c',
name='res-5',
stack_id=stack_data[0]['id'],
user_creds_id=user[0]['id'])]
engine.execute(resource.insert(), res_data)
# resource_data
resource_data = utils.get_table(engine, 'resource_data')
rd_data = [dict(key='fruit',
value='blueberries',
reduct=False,
resource_id=res_data[0]['id']),
dict(key='fruit',
value='apples',
reduct=False,
resource_id=res_data[1]['id'])]
engine.execute(resource_data.insert(), rd_data)
return {'resource': res_data, 'resource_data': rd_data}
def _check_057(self, engine, data):
def uuid_in_res_data(res_uuid):
for rd in data['resource']:
if rd['id'] == res_uuid:
return True
return False
def rd_matches_old_data(key, value, res_uuid):
for rd in data['resource_data']:
if (rd['resource_id'] == res_uuid and rd['key'] == key
and rd['value'] == value):
return True
return False
self.assertColumnIsNotNullable(engine, 'resource', 'id')
res_table = utils.get_table(engine, 'resource')
res_in_db = list(res_table.select().execute())
# confirm the resource.id is an int and the uuid field has been
# copied from the old id.
for r in res_in_db:
# now sqlalchemy returns `long` for mysql
# need more convenient way for type check
if engine.name == 'mysql':
self.assertEqual(long, type(r.id))
else:
self.assertEqual(int, type(r.id))
self.assertTrue(uuid_in_res_data(r.uuid))
# confirm that the new resource_id points to the correct resource.
rd_table = utils.get_table(engine, 'resource_data')
rd_in_db = list(rd_table.select().execute())
for rd in rd_in_db:
for r in res_in_db:
if rd.resource_id == r.id:
self.assertTrue(rd_matches_old_data(rd.key, rd.value,
r.uuid))
class TestHeatMigrationsMySQL(HeatMigrationsCheckers,
test_base.MySQLOpportunisticTestCase):

View File

@ -54,7 +54,8 @@ class CloudConfigTest(common.HeatTestCase):
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
value = {'id': config_id}
self.rpc_client.create_software_config.return_value = value
self.config.id = uuid.uuid4().hex
self.config.id = 5
self.config.uuid = uuid.uuid4().hex
self.config.handle_create()
self.assertEqual(config_id, self.config.resource_id)
kwargs = self.rpc_client.create_software_config.call_args[1]

View File

@ -58,7 +58,8 @@ class MultipartMimeTest(common.HeatTestCase):
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
sc = {'id': config_id}
self.rpc_client.create_software_config.return_value = sc
self.config.id = uuid.uuid4().hex
self.config.id = 55
self.config.uuid = uuid.uuid4().hex
self.config.handle_create()
self.assertEqual(config_id, self.config.resource_id)
kwargs = self.rpc_client.create_software_config.call_args[1]

View File

@ -170,7 +170,7 @@ class ResourceTest(common.HeatTestCase):
self.assertIsNotNone(res.id)
expected = '%s-%s-%s' % (self.stack.name,
res.name,
short_id.get_id(res.id))
short_id.get_id(res.uuid))
self.assertEqual(expected, res.physical_resource_name_or_FnGetRefId())
# otherwise use parent method

View File

@ -866,7 +866,8 @@ class SoftwareDeploymentTest(common.HeatTestCase):
self.deployment.data = mock.Mock(
return_value=dep_data)
self.deployment.id = str(uuid.uuid4())
self.deployment.id = 23
self.deployment.uuid = str(uuid.uuid4())
container = self.deployment.physical_resource_name()
temp_url = self.deployment._get_temp_url()
@ -906,7 +907,8 @@ class SoftwareDeploymentTest(common.HeatTestCase):
'heat.engine.clients.os.swift.SwiftClientPlugin._create')
scc.return_value = sc
self.deployment.id = str(uuid.uuid4())
self.deployment.id = 23
self.deployment.uuid = str(uuid.uuid4())
container = self.deployment.physical_resource_name()
self.deployment._delete_temp_url()
sc.delete_object.assert_called_once_with(container, object_name)

View File

@ -44,9 +44,6 @@ class StackUserTest(common.HeatTestCase):
generic_resource.StackUserResource)
self.fc = fakes.FakeKeystoneClient()
def tearDown(self):
super(StackUserTest, self).tearDown()
def _user_create(self, stack_name, project_id, user_id,
resource_name='user', create_project=True,
password=None):
@ -67,7 +64,7 @@ class StackUserTest(common.HeatTestCase):
rsrc._store()
self.m.StubOutWithMock(short_id, 'get_id')
short_id.get_id(rsrc.id).AndReturn('aabbcc')
short_id.get_id(rsrc.uuid).AndReturn('aabbcc')
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'create_stack_domain_user')

View File

@ -127,7 +127,7 @@ class UserTest(common.HeatTestCase):
rsrc._store()
self.m.StubOutWithMock(short_id, 'get_id')
short_id.get_id(rsrc.id).MultipleTimes().AndReturn('aabbcc')
short_id.get_id(rsrc.uuid).MultipleTimes().AndReturn('aabbcc')
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'create_stack_domain_user')