db: Remove use of 'bind' arguments

Resolve the following RemovedIn20Warning warnings:

  The MetaData.bind argument is deprecated and will be removed in
  SQLAlchemy 2.0.

  The ``bind`` argument for schema methods that invoke SQL against an
  engine or connection will be required in SQLAlchemy 2.0.

We are also able to resolve the following RemovedIn20Warning warning,
since the migration away from the bind parameter requires updating these
calls.

  The autoload parameter is deprecated and will be removed in version
  2.0.  Please use the autoload_with parameter, passing an engine or
  connection.

Similarly, we can also resolve the following RemovedIn20Warning warning,
since the calls we're updating here are the only ones affected by this
deprecation:

  The insert.values parameter will be removed in SQLAlchemy 2.0.  Please
  refer to the Insert.values() method.

Change-Id: Ic956980a03e1a0e1b6f90c492d8a03a64ea275cc
This commit is contained in:
Stephen Finucane 2023-07-18 17:33:16 +01:00
parent 6c150194c6
commit 1f5bbf0bd1
9 changed files with 290 additions and 261 deletions

View File

@ -54,8 +54,9 @@ def get_current_alembic_heads():
:param:old: Actual alembic head
:param:new: Expected alembic head to be updated
"""
meta = MetaData(engine)
alembic_version = Table('alembic_version', meta, autoload=True)
meta = MetaData()
alembic_version = Table(
'alembic_version', meta, autoload_with=engine)
alembic_version.update().values(
version_num=new).where(
alembic_version.c.version_num == old).execute()

View File

@ -28,67 +28,86 @@ def has_migrations(engine):
Note: This method can return a false positive if data migrations
are running in the background as it's being called.
"""
meta = MetaData(engine)
images = Table('images', meta, autoload=True)
meta = MetaData()
images = Table('images', meta, autoload_with=engine)
rows_with_null_visibility = (select(images.c.id)
.where(images.c.visibility == None)
.limit(1)
.execute())
with engine.connect() as conn:
rows_with_null_visibility = conn.execute(
select(images.c.id)
.where(images.c.visibility == None)
.limit(1)
)
if rows_with_null_visibility.rowcount == 1:
return True
image_members = Table('image_members', meta, autoload=True)
rows_with_pending_shared = (select(images.c.id)
.where(and_(
images.c.visibility == 'private',
images.c.id.in_(
select(image_members.c.image_id)
.distinct()
.where(not_(image_members.c.deleted))))
)
.limit(1)
.execute())
image_members = Table('image_members', meta, autoload_with=engine)
with engine.connect() as conn:
rows_with_pending_shared = conn.execute(
select(images.c.id).where(
and_(
images.c.visibility == 'private',
images.c.id.in_(
select(
image_members.c.image_id
).distinct().where(not_(image_members.c.deleted))
)
)
).limit(1)
)
if rows_with_pending_shared.rowcount == 1:
return True
return False
def _mark_all_public_images_with_public_visibility(images):
migrated_rows = (images
.update().values(visibility='public')
.where(images.c.is_public)
.execute())
def _mark_all_public_images_with_public_visibility(engine, images):
with engine.connect() as conn:
migrated_rows = conn.execute(
images.update().values(
visibility='public'
).where(images.c.is_public)
)
return migrated_rows.rowcount
def _mark_all_non_public_images_with_private_visibility(images):
migrated_rows = (images
.update().values(visibility='private')
.where(not_(images.c.is_public))
.execute())
def _mark_all_non_public_images_with_private_visibility(engine, images):
with engine.connect() as conn:
migrated_rows = conn.execute(
images
.update().values(visibility='private')
.where(not_(images.c.is_public))
)
return migrated_rows.rowcount
def _mark_all_private_images_with_members_as_shared_visibility(images,
image_members):
migrated_rows = (images
.update().values(visibility='shared')
.where(and_(images.c.visibility == 'private',
images.c.id.in_(
select(image_members.c.image_id)
.distinct()
.where(not_(image_members.c.deleted)))))
.execute())
def _mark_all_private_images_with_members_as_shared_visibility(
engine, images, image_members,
):
with engine.connect() as conn:
migrated_rows = conn.execute(
images.update().values(
visibility='shared'
)
.where(
and_(
images.c.visibility == 'private',
images.c.id.in_(
select(image_members.c.image_id).distinct().where(
not_(image_members.c.deleted)
)
)
)
)
)
return migrated_rows.rowcount
def _migrate_all(engine):
meta = MetaData(engine)
images = Table('images', meta, autoload=True)
image_members = Table('image_members', meta, autoload=True)
meta = MetaData()
images = Table('images', meta, autoload_with=engine)
image_members = Table('image_members', meta, autoload_with=engine)
num_rows = _mark_all_public_images_with_public_visibility(images)
num_rows += _mark_all_non_public_images_with_private_visibility(images)

View File

@ -22,8 +22,6 @@ Create Date: 2016-08-03 17:19:35.306161
"""
from alembic import op
from sqlalchemy import MetaData, Table, Index
# revision identifiers, used by Alembic.
revision = 'mitaka01'
@ -31,17 +29,14 @@ down_revision = 'liberty'
branch_labels = None
depends_on = None
CREATED_AT_INDEX = 'created_at_image_idx'
UPDATED_AT_INDEX = 'updated_at_image_idx'
def upgrade():
migrate_engine = op.get_bind()
meta = MetaData(bind=migrate_engine)
images = Table('images', meta, autoload=True)
created_index = Index(CREATED_AT_INDEX, images.c.created_at)
created_index.create(migrate_engine)
updated_index = Index(UPDATED_AT_INDEX, images.c.updated_at)
updated_index.create(migrate_engine)
with op.batch_alter_table('images') as batch_op:
batch_op.create_index(
'created_at_image_idx',
['created_at'],
)
batch_op.create_index(
'updated_at_image_idx',
['updated_at'],
)

View File

@ -33,10 +33,16 @@ depends_on = None
def upgrade():
migrate_engine = op.get_bind()
meta = MetaData(bind=migrate_engine)
bind = op.get_bind()
meta = MetaData()
resource_types_table = Table('metadef_resource_types', meta, autoload=True)
resource_types_table = Table(
'metadef_resource_types', meta, autoload_with=bind)
resource_types_table.update(values={'name': 'OS::Nova::Server'}).where(
resource_types_table.c.name == 'OS::Nova::Instance').execute()
op.execute(
resource_types_table.update(
values={'name': 'OS::Nova::Server'},
).where(
resource_types_table.c.name == 'OS::Nova::Instance'
)
)

View File

@ -19,7 +19,7 @@ Create Date: 2017-01-27 12:58:16.647499
"""
from alembic import op
from sqlalchemy import MetaData, Enum
from sqlalchemy import Enum
from glance.cmd import manage
from glance.db import migration
@ -46,19 +46,18 @@ def _drop_column():
batch_op.drop_column('is_public')
def _drop_triggers(engine):
engine_name = engine.engine.name
def _drop_triggers(connection):
engine_name = connection.engine.name
if engine_name == "mysql":
op.execute(MYSQL_DROP_INSERT_TRIGGER)
op.execute(MYSQL_DROP_UPDATE_TRIGGER)
def _set_nullability_and_default_on_visibility(meta):
def _set_nullability_and_default_on_visibility():
# NOTE(hemanthm): setting the default on 'visibility' column
# to 'shared'. Also, marking it as non-nullable.
# images = Table('images', meta, autoload=True)
existing_type = Enum('private', 'public', 'shared', 'community',
metadata=meta, name='image_visibility')
name='image_visibility')
with op.batch_alter_table('images') as batch_op:
batch_op.alter_column('visibility',
nullable=False,
@ -67,10 +66,9 @@ def _set_nullability_and_default_on_visibility(meta):
def upgrade():
migrate_engine = op.get_bind()
meta = MetaData(bind=migrate_engine)
bind = op.get_bind()
_drop_column()
if manage.USE_TRIGGERS:
_drop_triggers(migrate_engine)
_set_nullability_and_default_on_visibility(meta)
_drop_triggers(bind)
_set_nullability_and_default_on_visibility()

View File

@ -19,7 +19,7 @@ Create Date: 2017-01-27 12:58:16.647499
"""
from alembic import op
from sqlalchemy import Column, Enum, MetaData
from sqlalchemy import Column, Enum
from glance.cmd import manage
from glance.db import migration
@ -120,24 +120,24 @@ END;
"""
def _add_visibility_column(meta):
enum = Enum('private', 'public', 'shared', 'community', metadata=meta,
def _add_visibility_column(bind):
enum = Enum('private', 'public', 'shared', 'community',
name='image_visibility')
enum.create()
enum.create(bind=bind)
v_col = Column('visibility', enum, nullable=True, server_default=None)
op.add_column('images', v_col)
op.create_index('visibility_image_idx', 'images', ['visibility'])
def _add_triggers(engine):
if engine.engine.name == 'mysql':
def _add_triggers(connection):
if connection.engine.name == 'mysql':
op.execute(MYSQL_INSERT_TRIGGER % (ERROR_MESSAGE, ERROR_MESSAGE,
ERROR_MESSAGE))
op.execute(MYSQL_UPDATE_TRIGGER % (ERROR_MESSAGE, ERROR_MESSAGE,
ERROR_MESSAGE, ERROR_MESSAGE))
def _change_nullability_and_default_on_is_public(meta):
def _change_nullability_and_default_on_is_public():
# NOTE(hemanthm): we mark is_public as nullable so that when new versions
# add data only to be visibility column, is_public can be null.
with op.batch_alter_table('images') as batch_op:
@ -147,10 +147,9 @@ def _change_nullability_and_default_on_is_public(meta):
def upgrade():
migrate_engine = op.get_bind()
meta = MetaData(bind=migrate_engine)
bind = op.get_bind()
_add_visibility_column(meta)
_change_nullability_and_default_on_is_public(meta)
_add_visibility_column(bind)
_change_nullability_and_default_on_is_public()
if manage.USE_TRIGGERS:
_add_triggers(migrate_engine)
_add_triggers(bind)

View File

@ -1494,7 +1494,8 @@ def purge_deleted_rows(context, age_in_days, max_rows, session=None):
_validate_db_int(max_rows=max_rows)
session = session or get_session()
metadata = MetaData(get_engine())
metadata = MetaData()
engine = get_engine()
deleted_age = timeutils.utcnow() - datetime.timedelta(days=age_in_days)
tables = []
@ -1508,8 +1509,8 @@ def purge_deleted_rows(context, age_in_days, max_rows, session=None):
# are referencing soft deleted tasks/images records (e.g. task_info
# records). Then purge all soft deleted records in glance tables in the
# right order to avoid FK constraint violation.
t = Table("tasks", metadata, autoload=True)
ti = Table("task_info", metadata, autoload=True)
t = Table("tasks", metadata, autoload_with=engine)
ti = Table("task_info", metadata, autoload_with=engine)
joined_rec = ti.join(t, t.c.id == ti.c.task_id)
deleted_task_info = sql.\
select(ti.c.task_id).where(t.c.deleted_at < deleted_age).\
@ -1551,7 +1552,7 @@ def purge_deleted_rows(context, age_in_days, max_rows, session=None):
tables.append(tbl)
for tbl in tables:
tab = Table(tbl, metadata, autoload=True)
tab = Table(tbl, metadata, autoload_with=engine)
LOG.info(
_LI('Purging deleted rows older than %(age_in_days)d day(s) '
'from table %(tbl)s'),
@ -1593,11 +1594,12 @@ def purge_deleted_rows_from_images(context, age_in_days, max_rows,
_validate_db_int(max_rows=max_rows)
session = session or get_session()
metadata = MetaData(get_engine())
metadata = MetaData()
engine = get_engine()
deleted_age = timeutils.utcnow() - datetime.timedelta(days=age_in_days)
tbl = 'images'
tab = Table(tbl, metadata, autoload=True)
tab = Table(tbl, metadata, autoload_with=engine)
LOG.info(
_LI('Purging deleted rows older than %(age_in_days)d day(s) '
'from table %(tbl)s'),

View File

@ -65,134 +65,154 @@ CONF = cfg.CONF
CONF.register_opts(metadata_opts)
def get_metadef_namespaces_table(meta):
return sqlalchemy.Table('metadef_namespaces', meta, autoload=True)
def get_metadef_namespaces_table(meta, engine):
return sqlalchemy.Table('metadef_namespaces', meta, autoload_with=engine)
def get_metadef_resource_types_table(meta):
return sqlalchemy.Table('metadef_resource_types', meta, autoload=True)
def get_metadef_resource_types_table(meta, engine):
return sqlalchemy.Table('metadef_resource_types', meta,
autoload_with=engine)
def get_metadef_namespace_resource_types_table(meta):
def get_metadef_namespace_resource_types_table(meta, engine):
return sqlalchemy.Table('metadef_namespace_resource_types', meta,
autoload=True)
autoload_with=engine)
def get_metadef_properties_table(meta):
return sqlalchemy.Table('metadef_properties', meta, autoload=True)
def get_metadef_properties_table(meta, engine):
return sqlalchemy.Table('metadef_properties', meta, autoload_with=engine)
def get_metadef_objects_table(meta):
return sqlalchemy.Table('metadef_objects', meta, autoload=True)
def get_metadef_objects_table(meta, engine):
return sqlalchemy.Table('metadef_objects', meta, autoload_with=engine)
def get_metadef_tags_table(meta):
return sqlalchemy.Table('metadef_tags', meta, autoload=True)
def get_metadef_tags_table(meta, engine):
return sqlalchemy.Table('metadef_tags', meta, autoload_with=engine)
def _get_resource_type_id(meta, name):
rt_table = get_metadef_resource_types_table(meta)
resource_type = (
select(rt_table.c.id).
where(rt_table.c.name == name).
select_from(rt_table).
execute().fetchone())
if resource_type:
return resource_type[0]
def _get_resource_type_id(meta, engine, name):
rt_table = get_metadef_resource_types_table(meta, engine)
with engine.connect() as conn:
resource_type = conn.execute(
select(rt_table.c.id).where(
rt_table.c.name == name
).select_from(rt_table)
).fetchone()
if resource_type:
return resource_type[0]
return None
def _get_resource_type(meta, resource_type_id):
rt_table = get_metadef_resource_types_table(meta)
return (
rt_table.select().
where(rt_table.c.id == resource_type_id).
execute().fetchone())
def _get_resource_type(meta, engine, resource_type_id):
rt_table = get_metadef_resource_types_table(meta, engine)
with engine.connect() as conn:
return conn.execute(
rt_table.select().where(
rt_table.c.id == resource_type_id
)
).fetchone()
def _get_namespace_resource_types(meta, namespace_id):
def _get_namespace_resource_types(meta, engine, namespace_id):
namespace_resource_types_table = (
get_metadef_namespace_resource_types_table(meta))
return (
namespace_resource_types_table.select().
where(namespace_resource_types_table.c.namespace_id == namespace_id).
execute().fetchall())
get_metadef_namespace_resource_types_table(meta, engine))
with engine.connect() as conn:
return conn.execute(
namespace_resource_types_table.select().where(
namespace_resource_types_table.c.namespace_id == namespace_id
)
).fetchall()
def _get_namespace_resource_type_by_ids(meta, namespace_id, rt_id):
def _get_namespace_resource_type_by_ids(meta, engine, namespace_id, rt_id):
namespace_resource_types_table = (
get_metadef_namespace_resource_types_table(meta))
return (
namespace_resource_types_table.select().
where(and_(
namespace_resource_types_table.c.namespace_id == namespace_id,
namespace_resource_types_table.c.resource_type_id == rt_id)).
execute().fetchone())
get_metadef_namespace_resource_types_table(meta, engine))
with engine.connect() as conn:
return conn.execute(
namespace_resource_types_table.select().where(and_(
namespace_resource_types_table.c.namespace_id == namespace_id,
namespace_resource_types_table.c.resource_type_id == rt_id)
)
).fetchone()
def _get_properties(meta, namespace_id):
properties_table = get_metadef_properties_table(meta)
return (
properties_table.select().
where(properties_table.c.namespace_id == namespace_id).
execute().fetchall())
def _get_properties(meta, engine, namespace_id):
properties_table = get_metadef_properties_table(meta, engine)
with engine.connect() as conn:
return conn.execute(
properties_table.select().where(
properties_table.c.namespace_id == namespace_id
)
).fetchall()
def _get_objects(meta, namespace_id):
objects_table = get_metadef_objects_table(meta)
return (
objects_table.select().
where(objects_table.c.namespace_id == namespace_id).
execute().fetchall())
def _get_objects(meta, engine, namespace_id):
objects_table = get_metadef_objects_table(meta, engine)
with engine.connect() as conn:
return conn.execute(
objects_table.select().where(
objects_table.c.namespace_id == namespace_id)
).fetchall()
def _get_tags(meta, namespace_id):
tags_table = get_metadef_tags_table(meta)
return (
tags_table.select().
where(tags_table.c.namespace_id == namespace_id).
execute().fetchall())
def _get_tags(meta, engine, namespace_id):
tags_table = get_metadef_tags_table(meta, engine)
with engine.connect() as conn:
return conn.execute(
tags_table.select().where(
tags_table.c.namespace_id == namespace_id
)
).fetchall()
def _get_resource_id(table, namespace_id, resource_name):
resource = (
select(table.c.id).
where(and_(table.c.namespace_id == namespace_id,
table.c.name == resource_name)).
select_from(table).
execute().fetchone())
if resource:
return resource[0]
def _get_resource_id(table, engine, namespace_id, resource_name):
with engine.connect() as conn:
resource = conn.execute(
select(table.c.id).where(
and_(
table.c.namespace_id == namespace_id,
table.c.name == resource_name,
)
).select_from(table)
).fetchone()
if resource:
return resource[0]
return None
def _clear_metadata(meta):
metadef_tables = [get_metadef_properties_table(meta),
get_metadef_objects_table(meta),
get_metadef_tags_table(meta),
get_metadef_namespace_resource_types_table(meta),
get_metadef_namespaces_table(meta),
get_metadef_resource_types_table(meta)]
def _clear_metadata(meta, engine):
metadef_tables = [get_metadef_properties_table(meta, engine),
get_metadef_objects_table(meta, engine),
get_metadef_tags_table(meta, engine),
get_metadef_namespace_resource_types_table(meta, engine),
get_metadef_namespaces_table(meta, engine),
get_metadef_resource_types_table(meta, engine)]
for table in metadef_tables:
table.delete().execute()
LOG.info(_LI("Table %s has been cleared"), table)
with engine.connect() as conn:
for table in metadef_tables:
conn.execute(table.delete())
LOG.info(_LI("Table %s has been cleared"), table)
def _clear_namespace_metadata(meta, namespace_id):
metadef_tables = [get_metadef_properties_table(meta),
get_metadef_objects_table(meta),
get_metadef_tags_table(meta),
get_metadef_namespace_resource_types_table(meta)]
namespaces_table = get_metadef_namespaces_table(meta)
def _clear_namespace_metadata(meta, engine, namespace_id):
metadef_tables = [get_metadef_properties_table(meta, engine),
get_metadef_objects_table(meta, engine),
get_metadef_tags_table(meta, engine),
get_metadef_namespace_resource_types_table(meta, engine)]
namespaces_table = get_metadef_namespaces_table(meta, engine)
for table in metadef_tables:
table.delete().where(table.c.namespace_id == namespace_id).execute()
namespaces_table.delete().where(
namespaces_table.c.id == namespace_id).execute()
with engine.connect() as conn:
for table in metadef_tables:
conn.execute(
table.delete().where(table.c.namespace_id == namespace_id))
conn.execute(
namespaces_table.delete().where(
namespaces_table.c.id == namespace_id))
def _populate_metadata(meta, metadata_path=None, merge=False,
def _populate_metadata(meta, engine, metadata_path=None, merge=False,
prefer_new=False, overwrite=False):
if not metadata_path:
metadata_path = CONF.metadata_source_path
@ -213,12 +233,13 @@ def _populate_metadata(meta, metadata_path=None, merge=False,
metadata_path)
return
namespaces_table = get_metadef_namespaces_table(meta)
namespace_rt_table = get_metadef_namespace_resource_types_table(meta)
objects_table = get_metadef_objects_table(meta)
tags_table = get_metadef_tags_table(meta)
properties_table = get_metadef_properties_table(meta)
resource_types_table = get_metadef_resource_types_table(meta)
namespaces_table = get_metadef_namespaces_table(meta, engine)
namespace_rt_table = get_metadef_namespace_resource_types_table(
meta, engine)
objects_table = get_metadef_objects_table(meta, engine)
tags_table = get_metadef_tags_table(meta, engine)
properties_table = get_metadef_properties_table(meta, engine)
resource_types_table = get_metadef_resource_types_table(meta, engine)
for json_schema_file in json_schema_files:
try:
@ -241,13 +262,16 @@ def _populate_metadata(meta, metadata_path=None, merge=False,
'owner': metadata.get('owner', 'admin')
}
db_namespace = select(
namespaces_table.c.id
).where(
namespaces_table.c.namespace == values['namespace']
).select_from(
namespaces_table
).execute().fetchone()
with engine.connect() as conn:
db_namespace = conn.execute(
select(
namespaces_table.c.id
).where(
namespaces_table.c.namespace == values['namespace']
).select_from(
namespaces_table
)
).fetchone()
if db_namespace and overwrite:
LOG.info(_LI("Overwriting namespace %s"), values['namespace'])
@ -256,15 +280,18 @@ def _populate_metadata(meta, metadata_path=None, merge=False,
if not db_namespace:
values.update({'created_at': timeutils.utcnow()})
_insert_data_to_db(namespaces_table, values)
_insert_data_to_db(engine, namespaces_table, values)
db_namespace = select(
namespaces_table.c.id
).where(
namespaces_table.c.namespace == values['namespace']
).select_from(
namespaces_table
).execute().fetchone()
with engine.connect() as conn:
db_namespace = conn.execute(
select(
namespaces_table.c.id
).where(
namespaces_table.c.namespace == values['namespace']
).select_from(
namespaces_table
)
).fetchone()
elif not merge:
LOG.info(_LI("Skipping namespace %s. It already exists in the "
"database."), values['namespace'])
@ -277,15 +304,16 @@ def _populate_metadata(meta, metadata_path=None, merge=False,
namespace_id = db_namespace[0]
for resource_type in metadata.get('resource_type_associations', []):
rt_id = _get_resource_type_id(meta, resource_type['name'])
rt_id = _get_resource_type_id(meta, engine, resource_type['name'])
if not rt_id:
val = {
'name': resource_type['name'],
'created_at': timeutils.utcnow(),
'protected': True
}
_insert_data_to_db(resource_types_table, val)
rt_id = _get_resource_type_id(meta, resource_type['name'])
_insert_data_to_db(engine, resource_types_table, val)
rt_id = _get_resource_type_id(
meta, engine, resource_type['name'])
elif prefer_new:
val = {'updated_at': timeutils.utcnow()}
_update_data_in_db(resource_types_table, val,
@ -299,10 +327,10 @@ def _populate_metadata(meta, metadata_path=None, merge=False,
'prefix': resource_type.get('prefix')
}
namespace_resource_type = _get_namespace_resource_type_by_ids(
meta, namespace_id, rt_id)
meta, engine, namespace_id, rt_id)
if not namespace_resource_type:
values.update({'created_at': timeutils.utcnow()})
_insert_data_to_db(namespace_rt_table, values)
_insert_data_to_db(engine, namespace_rt_table, values)
elif prefer_new:
values.update({'updated_at': timeutils.utcnow()})
_update_rt_association(namespace_rt_table, values,
@ -315,11 +343,11 @@ def _populate_metadata(meta, metadata_path=None, merge=False,
'json_schema': json.dumps(schema)
}
property_id = _get_resource_id(
properties_table, namespace_id, name,
properties_table, engine, namespace_id, name,
)
if not property_id:
values.update({'created_at': timeutils.utcnow()})
_insert_data_to_db(properties_table, values)
_insert_data_to_db(engine, properties_table, values)
elif prefer_new:
values.update({'updated_at': timeutils.utcnow()})
_update_data_in_db(properties_table, values,
@ -333,11 +361,11 @@ def _populate_metadata(meta, metadata_path=None, merge=False,
'json_schema': json.dumps(
object.get('properties'))
}
object_id = _get_resource_id(objects_table, namespace_id,
object_id = _get_resource_id(objects_table, engine, namespace_id,
object['name'])
if not object_id:
values.update({'created_at': timeutils.utcnow()})
_insert_data_to_db(objects_table, values)
_insert_data_to_db(engine, objects_table, values)
elif prefer_new:
values.update({'updated_at': timeutils.utcnow()})
_update_data_in_db(objects_table, values,
@ -348,10 +376,11 @@ def _populate_metadata(meta, metadata_path=None, merge=False,
'name': tag.get('name'),
'namespace_id': namespace_id,
}
tag_id = _get_resource_id(tags_table, namespace_id, tag['name'])
tag_id = _get_resource_id(
tags_table, engine, namespace_id, tag['name'])
if not tag_id:
values.update({'created_at': timeutils.utcnow()})
_insert_data_to_db(tags_table, values)
_insert_data_to_db(engine, tags_table, values)
elif prefer_new:
values.update({'updated_at': timeutils.utcnow()})
_update_data_in_db(tags_table, values,
@ -362,37 +391,47 @@ def _populate_metadata(meta, metadata_path=None, merge=False,
LOG.info(_LI("Metadata loading finished"))
def _insert_data_to_db(table, values, log_exception=True):
def _insert_data_to_db(engine, table, values, log_exception=True):
try:
table.insert(values=values).execute()
with engine.connect() as conn:
conn.execute(table.insert().values(values))
except sqlalchemy.exc.IntegrityError:
if log_exception:
LOG.warning(_LW("Duplicate entry for values: %s"), values)
def _update_data_in_db(table, values, column, value):
def _update_data_in_db(engine, table, values, column, value):
try:
(table.update(values=values).
where(column == value).execute())
with engine.connect() as conn:
conn.execute(
table.update().values(values).where(column == value)
)
except sqlalchemy.exc.IntegrityError:
LOG.warning(_LW("Duplicate entry for values: %s"), values)
def _update_rt_association(table, values, rt_id, namespace_id):
def _update_rt_association(engine, table, values, rt_id, namespace_id):
try:
(table.update(values=values).
where(and_(table.c.resource_type_id == rt_id,
table.c.namespace_id == namespace_id)).execute())
with engine.connect() as conn:
conn.execute(
table.update().values(values).where(
and_(
table.c.resource_type_id == rt_id,
table.c.namespace_id == namespace_id,
)
)
)
except sqlalchemy.exc.IntegrityError:
LOG.warning(_LW("Duplicate entry for values: %s"), values)
def _export_data_to_file(meta, path):
def _export_data_to_file(meta, engine, path):
if not path:
path = CONF.metadata_source_path
namespace_table = get_metadef_namespaces_table(meta)
namespaces = namespace_table.select().execute().fetchall()
with engine.connect() as conn:
namespaces = conn.execute(namespace_table.select()).fetchall()
pattern = re.compile(r'[\W_]+', re.UNICODE)
@ -412,16 +451,16 @@ def _export_data_to_file(meta, path):
'tags': []
}
namespace_resource_types = _get_namespace_resource_types(meta,
namespace_id)
db_objects = _get_objects(meta, namespace_id)
db_properties = _get_properties(meta, namespace_id)
db_tags = _get_tags(meta, namespace_id)
namespace_resource_types = _get_namespace_resource_types(
meta, engine, namespace_id)
db_objects = _get_objects(meta, engine, namespace_id)
db_properties = _get_properties(meta, engine, namespace_id)
db_tags = _get_tags(meta, engine, namespace_id)
resource_types = []
for namespace_resource_type in namespace_resource_types:
resource_type = _get_resource_type(
meta, namespace_resource_type['resource_type_id'])
meta, engine, namespace_resource_type['resource_type_id'])
resource_types.append({
'name': resource_type['name'],
'prefix': namespace_resource_type['prefix'],
@ -476,7 +515,6 @@ def _export_data_to_file(meta, path):
def db_load_metadefs(engine, metadata_path=None, merge=False,
prefer_new=False, overwrite=False):
meta = MetaData()
meta.bind = engine
if not merge and (prefer_new or overwrite):
LOG.error(_LE("To use --prefer_new or --overwrite you need to combine "
@ -488,18 +526,17 @@ def db_load_metadefs(engine, metadata_path=None, merge=False,
"--prefer_new, --overwrite"))
return
_populate_metadata(meta, metadata_path, merge, prefer_new, overwrite)
_populate_metadata(
meta, engine, metadata_path, merge, prefer_new, overwrite)
def db_unload_metadefs(engine):
meta = MetaData()
meta.bind = engine
_clear_metadata(meta)
_clear_metadata(meta, engine)
def db_export_metadefs(engine, metadata_path=None):
meta = MetaData()
meta.bind = engine
_export_data_to_file(meta, metadata_path)
_export_data_to_file(meta, engine, metadata_path)

View File

@ -213,13 +213,6 @@ class WarningsFixture(pyfixtures.Fixture):
message='Using non-integer/slice indices on Row is deprecated ',
)
warnings.filterwarnings(
'ignore',
module='glance',
category=sqla_exc.SADeprecationWarning,
message='The MetaData.bind argument is deprecated ',
)
warnings.filterwarnings(
'ignore',
module='glance',
@ -234,13 +227,6 @@ class WarningsFixture(pyfixtures.Fixture):
message=r'The Engine.execute\(\) method is considered legacy ',
)
warnings.filterwarnings(
'ignore',
module='glance',
category=sqla_exc.SADeprecationWarning,
message=r'The autoload parameter is deprecated ',
)
warnings.filterwarnings(
'ignore',
module='glance',
@ -248,20 +234,6 @@ class WarningsFixture(pyfixtures.Fixture):
message=r'Query.values\(\) is deprecated ',
)
warnings.filterwarnings(
'ignore',
module='glance',
category=sqla_exc.SADeprecationWarning,
message='The insert.values parameter will be removed ',
)
warnings.filterwarnings(
'ignore',
module='glance',
category=sqla_exc.SADeprecationWarning,
message=r'The ``bind`` argument for schema methods ',
)
warnings.filterwarnings(
'ignore',
module='glance',