db: Use module-level imports for sqlalchemy (for real)

Change If90d9295b231166a28c2cc350d324691821a696b kicked off this effort
but only change the migrations. This change completes the job.

Change-Id: Ic0f2c326ebce8d7c89b0debf5225cbe471daca03
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2021-06-15 15:05:17 +01:00
parent e7a7fd51d1
commit 43de2421b3
15 changed files with 1128 additions and 1098 deletions

View File

@ -25,8 +25,8 @@ from oslo_config import cfg
from oslo_upgradecheck import common_checks
from oslo_upgradecheck import upgradecheck
import pkg_resources
import sqlalchemy as sa
from sqlalchemy import func as sqlfunc
from sqlalchemy import MetaData, Table, select
from nova.cmd import common as cmd_common
import nova.conf
@ -86,10 +86,10 @@ class UpgradeCommands(upgradecheck.UpgradeCommands):
# table, or by only counting compute nodes with a service version of at
# least 15 which was the highest service version when Newton was
# released.
meta = MetaData(bind=db_session.get_engine(context=context))
compute_nodes = Table('compute_nodes', meta, autoload=True)
return select([sqlfunc.count()]).select_from(compute_nodes).where(
compute_nodes.c.deleted == 0).scalar()
meta = sa.MetaData(bind=db_session.get_engine(context=context))
compute_nodes = sa.Table('compute_nodes', meta, autoload=True)
return sa.select([sqlfunc.count()]).select_from(compute_nodes).where(
compute_nodes.c.deleted == 0).scalar()
def _check_cellsv2(self):
"""Checks to see if cells v2 has been setup.
@ -102,7 +102,7 @@ class UpgradeCommands(upgradecheck.UpgradeCommands):
this on an initial install. This also has to be careful about checking
for compute nodes if there are no host mappings on a fresh install.
"""
meta = MetaData()
meta = sa.MetaData()
meta.bind = db_session.get_api_engine()
cell_mappings = self._get_cell_mappings()
@ -122,8 +122,9 @@ class UpgradeCommands(upgradecheck.UpgradeCommands):
'retry.')
return upgradecheck.Result(upgradecheck.Code.FAILURE, msg)
host_mappings = Table('host_mappings', meta, autoload=True)
count = select([sqlfunc.count()]).select_from(host_mappings).scalar()
host_mappings = sa.Table('host_mappings', meta, autoload=True)
count = sa.select([sqlfunc.count()]).select_from(host_mappings)\
.scalar()
if count == 0:
# This may be a fresh install in which case there may not be any
# compute_nodes in the cell database if the nova-compute service

View File

@ -35,26 +35,12 @@ from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy import Boolean
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import aliased
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import noload
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm import undefer
from sqlalchemy.schema import Table
from sqlalchemy import exc as sqla_exc
from sqlalchemy import orm
from sqlalchemy import schema
from sqlalchemy import sql
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import cast
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql import false
from sqlalchemy.sql import expression
from sqlalchemy.sql import func
from sqlalchemy.sql import null
from sqlalchemy.sql import true
from nova import block_device
from nova.compute import task_states
@ -94,7 +80,7 @@ def _context_manager_from_context(context):
def _joinedload_all(column):
elements = column.split('.')
joined = joinedload(elements.pop(0))
joined = orm.joinedload(elements.pop(0))
for element in elements:
joined = joined.joinedload(element)
@ -293,9 +279,10 @@ def model_query(
# us to return both our projects and unowned projects.
if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none':
query = query.\
filter(or_(model.project_id == context.project_id,
model.project_id == null()))
query = query.filter(sql.or_(
model.project_id == context.project_id,
model.project_id == sql.null()
))
else:
query = query.filter_by(project_id=context.project_id)
@ -367,7 +354,7 @@ class EqualityCondition(object):
def clauses(self, field):
# method signature requires us to return an iterable even if for OR
# operator this will actually be a single clause
return [or_(*[field == value for value in self.values])]
return [sql.or_(*[field == value for value in self.values])]
class InequalityCondition(object):
@ -395,9 +382,10 @@ def service_destroy(context, service_id):
# TODO(sbauza): Remove the service_id filter in a later release
# once we are sure that all compute nodes report the host field
model_query(context, models.ComputeNode).\
filter(or_(models.ComputeNode.service_id == service_id,
models.ComputeNode.host == service['host'])).\
soft_delete(synchronize_session=False)
filter(sql.or_(
models.ComputeNode.service_id == service_id,
models.ComputeNode.host == service['host'])).\
soft_delete(synchronize_session=False)
@pick_context_manager_reader
@ -434,7 +422,7 @@ def service_get_minimum_version(context, binaries):
func.min(models.Service.version)).\
filter(models.Service.binary.in_(binaries)).\
filter(models.Service.deleted == 0).\
filter(models.Service.forced_down == false()).\
filter(models.Service.forced_down == sql.false()).\
group_by(models.Service.binary)
return dict(min_versions)
@ -615,7 +603,7 @@ def _compute_node_select(context, filters=None, limit=None, marker=None):
select = select.limit(limit)
# Explicitly order by id, so we're not dependent on the native sort
# order of the underlying DB.
select = select.order_by(asc("id"))
select = select.order_by(expression.asc("id"))
return select
@ -921,7 +909,7 @@ def compute_node_statistics(context):
inner_sel.c.host == services_tbl.c.host,
inner_sel.c.service_id == services_tbl.c.id
),
services_tbl.c.disabled == false(),
services_tbl.c.disabled == sql.false(),
services_tbl.c.binary == 'nova-compute',
services_tbl.c.deleted == 0
)
@ -1106,9 +1094,9 @@ def virtual_interface_get_by_instance(context, instance_uuid):
:param instance_uuid: UUID of the instance to filter on.
"""
vif_refs = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
order_by(asc("created_at"), asc("id")).\
all()
filter_by(instance_uuid=instance_uuid).\
order_by(expression.asc("created_at"), expression.asc("id")).\
all()
return vif_refs
@ -1414,7 +1402,7 @@ def instance_get(context, instance_id, columns_to_join=None):
def _build_instance_get(context, columns_to_join=None):
query = model_query(context, models.Instance, project_only=True).\
options(_joinedload_all('security_groups.rules')).\
options(joinedload('info_cache'))
options(orm.joinedload('info_cache'))
if columns_to_join is None:
columns_to_join = ['metadata', 'system_metadata']
for column in columns_to_join:
@ -1422,7 +1410,7 @@ def _build_instance_get(context, columns_to_join=None):
# Already always joined above
continue
if 'extra.' in column:
query = query.options(undefer(column))
query = query.options(orm.undefer(column))
elif column in ['metadata', 'system_metadata']:
# NOTE(melwitt): We use subqueryload() instead of joinedload() for
# metadata and system_metadata because of the one-to-many
@ -1432,13 +1420,13 @@ def _build_instance_get(context, columns_to_join=None):
# in a large data transfer. Instead, the subqueryload() will
# perform additional queries to obtain metadata and system_metadata
# for the instance.
query = query.options(subqueryload(column))
query = query.options(orm.subqueryload(column))
else:
query = query.options(joinedload(column))
query = query.options(orm.joinedload(column))
# NOTE(alaski) Stop lazy loading of columns not needed.
for col in ['metadata', 'system_metadata']:
if col not in columns_to_join:
query = query.options(noload(col))
query = query.options(orm.noload(col))
# NOTE(melwitt): We need to use order_by(<unique column>) so that the
# additional queries emitted by subqueryload() include the same ordering as
# used by the parent query.
@ -1531,7 +1519,7 @@ def instance_get_all(context, columns_to_join=None):
_manual_join_columns(columns_to_join))
query = model_query(context, models.Instance)
for column in columns_to_join_new:
query = query.options(joinedload(column))
query = query.options(orm.joinedload(column))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
@ -1673,9 +1661,9 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
query_prefix = context.session.query(models.Instance)
for column in columns_to_join_new:
if 'extra.' in column:
query_prefix = query_prefix.options(undefer(column))
query_prefix = query_prefix.options(orm.undefer(column))
else:
query_prefix = query_prefix.options(joinedload(column))
query_prefix = query_prefix.options(orm.joinedload(column))
# Note: order_by is done in the sqlalchemy.utils.py paginate_query(),
# no need to do it here as well
@ -1695,7 +1683,7 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
deleted = filters.pop('deleted')
if deleted:
if filters.pop('soft_deleted', True):
delete = or_(
delete = sql.or_(
models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED
)
@ -1710,10 +1698,10 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
if not filters.pop('soft_deleted', False):
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
not_soft_deleted = or_(
not_soft_deleted = sql.or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == null()
)
models.Instance.vm_state == sql.null()
)
query_prefix = query_prefix.filter(not_soft_deleted)
if 'cleaned' in filters:
@ -1730,14 +1718,14 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
query_prefix = query_prefix.filter(models.Tag.tag == first_tag)
for tag in tags:
tag_alias = aliased(models.Tag)
tag_alias = orm.aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias,
models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag == tag)
if 'tags-any' in filters:
tags = filters.pop('tags-any')
tag_alias = aliased(models.Tag)
tag_alias = orm.aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias, models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag.in_(tags))
@ -1749,7 +1737,7 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
subq = subq.filter(models.Tag.tag == first_tag)
for tag in tags:
tag_alias = aliased(models.Tag)
tag_alias = orm.aliased(models.Tag)
subq = subq.join(tag_alias, models.Instance.tags)
subq = subq.filter(tag_alias.tag == tag)
@ -1768,14 +1756,15 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
filters['user_id'] = context.user_id
if filters.pop('hidden', False):
query_prefix = query_prefix.filter(models.Instance.hidden == true())
query_prefix = query_prefix.filter(
models.Instance.hidden == sql.true())
else:
# If the query should not include hidden instances, then
# filter instances with hidden=False or hidden=NULL because
# older records may have no value set.
query_prefix = query_prefix.filter(or_(
models.Instance.hidden == false(),
models.Instance.hidden == null()))
query_prefix = query_prefix.filter(sql.or_(
models.Instance.hidden == sql.false(),
models.Instance.hidden == sql.null()))
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
@ -1856,9 +1845,9 @@ def _model_get_uuid_by_sort_filters(context, model, sort_keys, sort_dirs,
for skey, sdir, val in zip(sort_keys, sort_dirs, values):
# Apply ordering to our query for the key, direction we're processing
if sdir == 'desc':
query = query.order_by(desc(getattr(model, skey)))
query = query.order_by(expression.desc(getattr(model, skey)))
else:
query = query.order_by(asc(getattr(model, skey)))
query = query.order_by(expression.asc(getattr(model, skey)))
# Build a list of equivalence requirements on keys we've already
# processed through the loop. In other words, if we're adding
@ -1869,8 +1858,8 @@ def _model_get_uuid_by_sort_filters(context, model, sort_keys, sort_dirs,
(getattr(model, sort_keys[equal_attr]) == values[equal_attr]))
model_attr = getattr(model, skey)
if isinstance(model_attr.type, Boolean):
model_attr = cast(model_attr, Integer)
if isinstance(model_attr.type, sa.Boolean):
model_attr = expression.cast(model_attr, sa.Integer)
val = int(val)
if skey == sort_keys[-1]:
@ -1890,11 +1879,11 @@ def _model_get_uuid_by_sort_filters(context, model, sort_keys, sort_dirs,
# AND together all the above
crit_attrs.append(crit)
criteria.append(and_(*crit_attrs))
criteria.append(sql.and_(*crit_attrs))
key_index += 1
# OR together all the ANDs
query = query.filter(or_(*criteria))
query = query.filter(sql.or_(*criteria))
# We can't raise InstanceNotFound because we don't have a uuid to
# be looking for, so just return nothing if no match.
@ -2130,12 +2119,13 @@ def instance_get_active_by_window_joined(context, begin, end=None,
for column in columns_to_join_new:
if 'extra.' in column:
query = query.options(undefer(column))
query = query.options(orm.undefer(column))
else:
query = query.options(joinedload(column))
query = query.options(orm.joinedload(column))
query = query.filter(or_(models.Instance.terminated_at == null(),
models.Instance.terminated_at > begin))
query = query.filter(sql.or_(
models.Instance.terminated_at == sql.null(),
models.Instance.terminated_at > begin))
if end:
query = query.filter(models.Instance.launched_at < end)
if project_id:
@ -2165,9 +2155,9 @@ def _instance_get_all_query(context, project_only=False, joins=None):
project_only=project_only)
for column in joins:
if 'extra.' in column:
query = query.options(undefer(column))
query = query.options(orm.undefer(column))
else:
query = query.options(joinedload(column))
query = query.options(orm.joinedload(column))
return query
@ -2581,7 +2571,7 @@ def instance_extra_get_by_instance_uuid(
columns = ['numa_topology', 'pci_requests', 'flavor', 'vcpu_model',
'trusted_certs', 'resources', 'migration_context']
for column in columns:
query = query.options(undefer(column))
query = query.options(orm.undefer(column))
instance_extra = query.first()
return instance_extra
@ -2875,7 +2865,7 @@ def _block_device_mapping_get_query(context, columns_to_join=None):
query = model_query(context, models.BlockDeviceMapping)
for column in columns_to_join:
query = query.options(joinedload(column))
query = query.options(orm.joinedload(column))
return query
@ -3408,10 +3398,13 @@ def migration_get_in_progress_by_host_and_node(context, host, node):
# and the instance is in VERIFY_RESIZE state, so the end state
# for a resize is actually 'confirmed' or 'reverted'.
return model_query(context, models.Migration).\
filter(or_(and_(models.Migration.source_compute == host,
models.Migration.source_node == node),
and_(models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(sql.or_(
sql.and_(
models.Migration.source_compute == host,
models.Migration.source_node == node),
sql.and_(
models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['confirmed', 'reverted',
'error', 'failed',
'completed', 'cancelled',
@ -3465,15 +3458,17 @@ def migration_get_all_by_filters(context, filters,
query = query.filter(models.Migration.status.in_(status))
if "host" in filters:
host = filters["host"]
query = query.filter(or_(models.Migration.source_compute == host,
models.Migration.dest_compute == host))
query = query.filter(sql.or_(
models.Migration.source_compute == host,
models.Migration.dest_compute == host))
elif "source_compute" in filters:
host = filters['source_compute']
query = query.filter(models.Migration.source_compute == host)
if "node" in filters:
node = filters['node']
query = query.filter(or_(models.Migration.source_node == node,
models.Migration.dest_node == node))
query = query.filter(sql.or_(
models.Migration.source_node == node,
models.Migration.dest_node == node))
if "migration_type" in filters:
migtype = filters["migration_type"]
query = query.filter(models.Migration.migration_type == migtype)
@ -3551,10 +3546,13 @@ def migration_get_in_progress_and_error_by_host_and_node(context, host, node):
host and node.
"""
return model_query(context, models.Migration).\
filter(or_(and_(models.Migration.source_compute == host,
models.Migration.source_node == node),
and_(models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(sql.or_(
sql.and_(
models.Migration.source_compute == host,
models.Migration.source_node == node),
sql.and_(
models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['confirmed', 'reverted',
'failed', 'completed',
'cancelled', 'done'])).\
@ -3738,11 +3736,11 @@ def bw_usage_update(
# same record is updated every time. It can be removed after adding
# unique constraint to this model.
bw_usage = model_query(context, models.BandwidthUsage,
read_deleted='yes').\
filter_by(start_period=ts_values['start_period']).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
order_by(asc(models.BandwidthUsage.id)).first()
read_deleted='yes').\
filter_by(start_period=ts_values['start_period']).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
order_by(expression.asc(models.BandwidthUsage.id)).first()
if bw_usage:
bw_usage.update(values)
@ -3770,11 +3768,12 @@ def bw_usage_update(
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return model_query(context, models.VolumeUsage, read_deleted="yes").\
filter(or_(models.VolumeUsage.tot_last_refreshed == null(),
models.VolumeUsage.tot_last_refreshed > begin,
models.VolumeUsage.curr_last_refreshed == null(),
models.VolumeUsage.curr_last_refreshed > begin,
)).all()
filter(sql.or_(
models.VolumeUsage.tot_last_refreshed == sql.null(),
models.VolumeUsage.tot_last_refreshed > begin,
models.VolumeUsage.curr_last_refreshed == sql.null(),
models.VolumeUsage.curr_last_refreshed > begin,
)).all()
@require_context
@ -3992,8 +3991,9 @@ def instance_fault_get_by_instance_uuids(
query = query.join(latest_faults,
faults_tbl.c.id == latest_faults.c.max_id)
else:
query = query.filter(models.InstanceFault.instance_uuid.in_(
instance_uuids)).order_by(desc("id"))
query = query.filter(
models.InstanceFault.instance_uuid.in_(instance_uuids)
).order_by(expression.desc("id"))
output = {}
for instance_uuid in instance_uuids:
@ -4071,18 +4071,18 @@ def action_get_by_request_id(context, instance_uuid, request_id):
def _action_get_by_request_id(context, instance_uuid, request_id):
result = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\
filter_by(request_id=request_id).\
order_by(desc("created_at"), desc("id")).\
first()
filter_by(instance_uuid=instance_uuid).\
filter_by(request_id=request_id).\
order_by(expression.desc("created_at"), expression.desc("id")).\
first()
return result
def _action_get_last_created_by_instance_uuid(context, instance_uuid):
result = (model_query(context, models.InstanceAction).
filter_by(instance_uuid=instance_uuid).
order_by(desc("created_at"), desc("id")).
first())
result = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\
order_by(expression.desc("created_at"), expression.desc("id")).\
first()
return result
@ -4180,9 +4180,9 @@ def action_event_finish(context, values):
def action_events_get(context, action_id):
"""Get the events by action id."""
events = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
order_by(desc("created_at"), desc("id")).\
all()
filter_by(action_id=action_id).\
order_by(expression.desc("created_at"), expression.desc("id")).\
all()
return events
@ -4376,9 +4376,9 @@ def _get_fk_stmts(metadata, conn, table, column, records):
# Create the shadow table for the referencing table.
fk_shadow_tablename = _SHADOW_TABLE_PREFIX + fk_table.name
try:
fk_shadow_table = Table(fk_shadow_tablename, metadata,
autoload=True)
except NoSuchTableError:
fk_shadow_table = schema.Table(
fk_shadow_tablename, metadata, autoload=True)
except sqla_exc.NoSuchTableError:
# No corresponding shadow table; skip it.
continue
@ -4472,8 +4472,8 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before,
rows_archived = 0
deleted_instance_uuids = []
try:
shadow_table = Table(shadow_tablename, metadata, autoload=True)
except NoSuchTableError:
shadow_table = schema.Table(shadow_tablename, metadata, autoload=True)
except sqla_exc.NoSuchTableError:
# No corresponding shadow table; skip it.
return rows_archived, deleted_instance_uuids, {}
@ -4589,7 +4589,7 @@ def archive_deleted_rows(context=None, max_rows=None, before=None,
table_to_rows_archived = collections.defaultdict(int)
deleted_instance_uuids = []
total_rows_archived = 0
meta = MetaData(get_engine(use_slave=True, context=context))
meta = sa.MetaData(get_engine(use_slave=True, context=context))
meta.reflect()
# Get the sorted list of tables in order of foreign key dependency.
# Process the parent tables and find their dependent records in order to
@ -4634,7 +4634,7 @@ def _purgeable_tables(metadata):
def purge_shadow_tables(context, before_date, status_fn=None):
engine = get_engine(context=context)
conn = engine.connect()
metadata = MetaData()
metadata = sa.MetaData()
metadata.bind = engine
metadata.reflect()
total_deleted = 0

View File

@ -13,28 +13,18 @@
from oslo_db.sqlalchemy import models
from oslo_log import log as logging
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy import Enum
import sqlalchemy as sa
import sqlalchemy.dialects.mysql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import orm
from sqlalchemy.orm import backref
from sqlalchemy import schema
from sqlalchemy import String
from sqlalchemy import Text
from sqlalchemy import Unicode
LOG = logging.getLogger(__name__)
def MediumText():
return Text().with_variant(MEDIUMTEXT(), 'mysql')
return sa.Text().with_variant(
sqlalchemy.dialects.mysql.MEDIUMTEXT(), 'mysql')
class _NovaAPIBase(models.ModelBase, models.TimestampMixin):
@ -52,9 +42,10 @@ class AggregateHost(API_BASE):
name="uniq_aggregate_hosts0host0aggregate_id"
),
)
id = Column(Integer, primary_key=True, autoincrement=True)
host = Column(String(255))
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
host = sa.Column(sa.String(255))
aggregate_id = sa.Column(
sa.Integer, sa.ForeignKey('aggregates.id'), nullable=False)
class AggregateMetadata(API_BASE):
@ -64,30 +55,33 @@ class AggregateMetadata(API_BASE):
schema.UniqueConstraint("aggregate_id", "key",
name="uniq_aggregate_metadata0aggregate_id0key"
),
Index('aggregate_metadata_key_idx', 'key'),
sa.Index('aggregate_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
id = sa.Column(sa.Integer, primary_key=True)
key = sa.Column(sa.String(255), nullable=False)
value = sa.Column(sa.String(255), nullable=False)
aggregate_id = sa.Column(
sa.Integer, sa.ForeignKey('aggregates.id'), nullable=False)
class Aggregate(API_BASE):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
__table_args__ = (Index('aggregate_uuid_idx', 'uuid'),
schema.UniqueConstraint(
"name", name="uniq_aggregate0name")
)
id = Column(Integer, primary_key=True, autoincrement=True)
uuid = Column(String(36))
name = Column(String(255))
_hosts = orm.relationship(AggregateHost,
primaryjoin='Aggregate.id == AggregateHost.aggregate_id',
cascade='delete')
_metadata = orm.relationship(AggregateMetadata,
primaryjoin='Aggregate.id == AggregateMetadata.aggregate_id',
cascade='delete')
__table_args__ = (
sa.Index('aggregate_uuid_idx', 'uuid'),
schema.UniqueConstraint("name", name="uniq_aggregate0name")
)
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = sa.Column(sa.String(36))
name = sa.Column(sa.String(255))
_hosts = orm.relationship(
AggregateHost,
primaryjoin='Aggregate.id == AggregateHost.aggregate_id',
cascade='delete')
_metadata = orm.relationship(
AggregateMetadata,
primaryjoin='Aggregate.id == AggregateMetadata.aggregate_id',
cascade='delete')
@property
def _extra_keys(self):
@ -111,59 +105,67 @@ class Aggregate(API_BASE):
class CellMapping(API_BASE):
"""Contains information on communicating with a cell"""
__tablename__ = 'cell_mappings'
__table_args__ = (Index('uuid_idx', 'uuid'),
schema.UniqueConstraint('uuid',
name='uniq_cell_mappings0uuid'))
__table_args__ = (
sa.Index('uuid_idx', 'uuid'),
schema.UniqueConstraint('uuid', name='uniq_cell_mappings0uuid'),
)
id = Column(Integer, primary_key=True)
uuid = Column(String(36), nullable=False)
name = Column(String(255))
transport_url = Column(Text())
database_connection = Column(Text())
disabled = Column(Boolean, default=False)
host_mapping = orm.relationship('HostMapping',
backref=backref('cell_mapping', uselist=False),
foreign_keys=id,
primaryjoin=(
'CellMapping.id == HostMapping.cell_id'))
id = sa.Column(sa.Integer, primary_key=True)
uuid = sa.Column(sa.String(36), nullable=False)
name = sa.Column(sa.String(255))
transport_url = sa.Column(sa.Text())
database_connection = sa.Column(sa.Text())
disabled = sa.Column(sa.Boolean, default=False)
host_mapping = orm.relationship(
'HostMapping',
backref=orm.backref('cell_mapping', uselist=False),
foreign_keys=id,
primaryjoin='CellMapping.id == HostMapping.cell_id')
class InstanceMapping(API_BASE):
"""Contains the mapping of an instance to which cell it is in"""
__tablename__ = 'instance_mappings'
__table_args__ = (Index('project_id_idx', 'project_id'),
Index('instance_uuid_idx', 'instance_uuid'),
schema.UniqueConstraint('instance_uuid',
name='uniq_instance_mappings0instance_uuid'),
Index('instance_mappings_user_id_project_id_idx',
'user_id', 'project_id'))
__table_args__ = (
sa.Index('project_id_idx', 'project_id'),
sa.Index('instance_uuid_idx', 'instance_uuid'),
schema.UniqueConstraint(
'instance_uuid', name='uniq_instance_mappings0instance_uuid'),
sa.Index(
'instance_mappings_user_id_project_id_idx',
'user_id',
'project_id',
),
)
id = Column(Integer, primary_key=True)
instance_uuid = Column(String(36), nullable=False)
cell_id = Column(Integer, ForeignKey('cell_mappings.id'),
nullable=True)
project_id = Column(String(255), nullable=False)
id = sa.Column(sa.Integer, primary_key=True)
instance_uuid = sa.Column(sa.String(36), nullable=False)
cell_id = sa.Column(
sa.Integer, sa.ForeignKey('cell_mappings.id'), nullable=True)
project_id = sa.Column(sa.String(255), nullable=False)
# FIXME(melwitt): This should eventually be non-nullable, but we need a
# transition period first.
user_id = Column(String(255), nullable=True)
queued_for_delete = Column(Boolean)
cell_mapping = orm.relationship('CellMapping',
backref=backref('instance_mapping', uselist=False),
foreign_keys=cell_id,
primaryjoin=('InstanceMapping.cell_id == CellMapping.id'))
user_id = sa.Column(sa.String(255), nullable=True)
queued_for_delete = sa.Column(sa.Boolean)
cell_mapping = orm.relationship(
'CellMapping',
backref=orm.backref('instance_mapping', uselist=False),
foreign_keys=cell_id,
primaryjoin='InstanceMapping.cell_id == CellMapping.id')
class HostMapping(API_BASE):
"""Contains mapping of a compute host to which cell it is in"""
__tablename__ = "host_mappings"
__table_args__ = (Index('host_idx', 'host'),
schema.UniqueConstraint('host',
name='uniq_host_mappings0host'))
__table_args__ = (
sa.Index('host_idx', 'host'),
schema.UniqueConstraint('host', name='uniq_host_mappings0host'),
)
id = Column(Integer, primary_key=True)
cell_id = Column(Integer, ForeignKey('cell_mappings.id'),
nullable=False)
host = Column(String(255), nullable=False)
id = sa.Column(sa.Integer, primary_key=True)
cell_id = sa.Column(
sa.Integer, sa.ForeignKey('cell_mappings.id'), nullable=False)
host = sa.Column(sa.String(255), nullable=False)
class RequestSpec(API_BASE):
@ -171,14 +173,14 @@ class RequestSpec(API_BASE):
__tablename__ = 'request_specs'
__table_args__ = (
Index('request_spec_instance_uuid_idx', 'instance_uuid'),
schema.UniqueConstraint('instance_uuid',
name='uniq_request_specs0instance_uuid'),
)
sa.Index('request_spec_instance_uuid_idx', 'instance_uuid'),
schema.UniqueConstraint(
'instance_uuid', name='uniq_request_specs0instance_uuid'),
)
id = Column(Integer, primary_key=True)
instance_uuid = Column(String(36), nullable=False)
spec = Column(MediumText(), nullable=False)
id = sa.Column(sa.Integer, primary_key=True)
instance_uuid = sa.Column(sa.String(36), nullable=False)
spec = sa.Column(MediumText(), nullable=False)
class Flavors(API_BASE):
@ -188,39 +190,40 @@ class Flavors(API_BASE):
schema.UniqueConstraint("flavorid", name="uniq_flavors0flavorid"),
schema.UniqueConstraint("name", name="uniq_flavors0name"))
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
memory_mb = Column(Integer, nullable=False)
vcpus = Column(Integer, nullable=False)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
flavorid = Column(String(255), nullable=False)
swap = Column(Integer, nullable=False, default=0)
rxtx_factor = Column(Float, default=1)
vcpu_weight = Column(Integer)
disabled = Column(Boolean, default=False)
is_public = Column(Boolean, default=True)
description = Column(Text)
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(255), nullable=False)
memory_mb = sa.Column(sa.Integer, nullable=False)
vcpus = sa.Column(sa.Integer, nullable=False)
root_gb = sa.Column(sa.Integer)
ephemeral_gb = sa.Column(sa.Integer)
flavorid = sa.Column(sa.String(255), nullable=False)
swap = sa.Column(sa.Integer, nullable=False, default=0)
rxtx_factor = sa.Column(sa.Float, default=1)
vcpu_weight = sa.Column(sa.Integer)
disabled = sa.Column(sa.Boolean, default=False)
is_public = sa.Column(sa.Boolean, default=True)
description = sa.Column(sa.Text)
class FlavorExtraSpecs(API_BASE):
"""Represents additional specs as key/value pairs for a flavor"""
__tablename__ = 'flavor_extra_specs'
__table_args__ = (
Index('flavor_extra_specs_flavor_id_key_idx', 'flavor_id', 'key'),
sa.Index('flavor_extra_specs_flavor_id_key_idx', 'flavor_id', 'key'),
schema.UniqueConstraint('flavor_id', 'key',
name='uniq_flavor_extra_specs0flavor_id0key'),
{'mysql_collate': 'utf8_bin'},
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
flavor_id = Column(Integer, ForeignKey('flavors.id'), nullable=False)
flavor = orm.relationship(Flavors, backref='extra_specs',
foreign_keys=flavor_id,
primaryjoin=(
'FlavorExtraSpecs.flavor_id == Flavors.id'))
id = sa.Column(sa.Integer, primary_key=True)
key = sa.Column(sa.String(255), nullable=False)
value = sa.Column(sa.String(255))
flavor_id = sa.Column(
sa.Integer, sa.ForeignKey('flavors.id'), nullable=False)
flavor = orm.relationship(
Flavors, backref='extra_specs',
foreign_keys=flavor_id,
primaryjoin='FlavorExtraSpecs.flavor_id == Flavors.id')
class FlavorProjects(API_BASE):
@ -229,13 +232,14 @@ class FlavorProjects(API_BASE):
__table_args__ = (schema.UniqueConstraint('flavor_id', 'project_id',
name='uniq_flavor_projects0flavor_id0project_id'),)
id = Column(Integer, primary_key=True)
flavor_id = Column(Integer, ForeignKey('flavors.id'), nullable=False)
project_id = Column(String(255), nullable=False)
flavor = orm.relationship(Flavors, backref='projects',
foreign_keys=flavor_id,
primaryjoin=(
'FlavorProjects.flavor_id == Flavors.id'))
id = sa.Column(sa.Integer, primary_key=True)
flavor_id = sa.Column(
sa.Integer, sa.ForeignKey('flavors.id'), nullable=False)
project_id = sa.Column(sa.String(255), nullable=False)
flavor = orm.relationship(
Flavors, backref='projects',
foreign_keys=flavor_id,
primaryjoin='FlavorProjects.flavor_id == Flavors.id')
class BuildRequest(API_BASE):
@ -243,19 +247,19 @@ class BuildRequest(API_BASE):
__tablename__ = 'build_requests'
__table_args__ = (
Index('build_requests_instance_uuid_idx', 'instance_uuid'),
Index('build_requests_project_id_idx', 'project_id'),
schema.UniqueConstraint('instance_uuid',
name='uniq_build_requests0instance_uuid'),
)
sa.Index('build_requests_instance_uuid_idx', 'instance_uuid'),
sa.Index('build_requests_project_id_idx', 'project_id'),
schema.UniqueConstraint(
'instance_uuid', name='uniq_build_requests0instance_uuid'),
)
id = Column(Integer, primary_key=True)
id = sa.Column(sa.Integer, primary_key=True)
# TODO(mriedem): instance_uuid should be nullable=False
instance_uuid = Column(String(36))
project_id = Column(String(255), nullable=False)
instance = Column(MediumText())
block_device_mappings = Column(MediumText())
tags = Column(Text())
instance_uuid = sa.Column(sa.String(36))
project_id = sa.Column(sa.String(255), nullable=False)
instance = sa.Column(MediumText())
block_device_mappings = sa.Column(MediumText())
tags = sa.Column(sa.Text())
# TODO(alaski): Drop these from the db in Ocata
# columns_to_drop = ['request_spec_id', 'user_id', 'display_name',
# 'instance_metadata', 'progress', 'vm_state', 'task_state',
@ -269,19 +273,18 @@ class KeyPair(API_BASE):
"""Represents a public key pair for ssh / WinRM."""
__tablename__ = 'key_pairs'
__table_args__ = (
schema.UniqueConstraint("user_id", "name",
name="uniq_key_pairs0user_id0name"),
schema.UniqueConstraint(
"user_id", "name", name="uniq_key_pairs0user_id0name"),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255), nullable=False)
user_id = Column(String(255), nullable=False)
fingerprint = Column(String(255))
public_key = Column(Text())
type = Column(Enum('ssh', 'x509', name='keypair_types'),
nullable=False, server_default='ssh')
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
name = sa.Column(sa.String(255), nullable=False)
user_id = sa.Column(sa.String(255), nullable=False)
fingerprint = sa.Column(sa.String(255))
public_key = sa.Column(sa.Text())
type = sa.Column(
sa.Enum('ssh', 'x509', name='keypair_types'),
nullable=False, server_default='ssh')
# TODO(stephenfin): Remove this as it's now unused post-placement split
@ -292,8 +295,8 @@ class ResourceClass(API_BASE):
schema.UniqueConstraint("name", name="uniq_resource_classes0name"),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255), nullable=False)
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
name = sa.Column(sa.String(255), nullable=False)
# TODO(stephenfin): Remove this as it's now unused post-placement split
@ -302,29 +305,28 @@ class ResourceProvider(API_BASE):
__tablename__ = "resource_providers"
__table_args__ = (
Index('resource_providers_uuid_idx', 'uuid'),
schema.UniqueConstraint('uuid',
name='uniq_resource_providers0uuid'),
Index('resource_providers_name_idx', 'name'),
Index('resource_providers_root_provider_id_idx',
'root_provider_id'),
Index('resource_providers_parent_provider_id_idx',
'parent_provider_id'),
schema.UniqueConstraint('name',
name='uniq_resource_providers0name')
sa.Index('resource_providers_uuid_idx', 'uuid'),
schema.UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),
sa.Index('resource_providers_name_idx', 'name'),
sa.Index(
'resource_providers_root_provider_id_idx', 'root_provider_id'),
sa.Index(
'resource_providers_parent_provider_id_idx', 'parent_provider_id'),
schema.UniqueConstraint(
'name', name='uniq_resource_providers0name')
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
name = Column(Unicode(200), nullable=True)
generation = Column(Integer, default=0)
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
uuid = sa.Column(sa.String(36), nullable=False)
name = sa.Column(sa.Unicode(200), nullable=True)
generation = sa.Column(sa.Integer, default=0)
# Represents the root of the "tree" that the provider belongs to
root_provider_id = Column(Integer, ForeignKey('resource_providers.id'),
nullable=True)
root_provider_id = sa.Column(
sa.Integer, sa.ForeignKey('resource_providers.id'), nullable=True)
# The immediate parent provider of this provider, or NULL if there is no
# parent. If parent_provider_id == NULL then root_provider_id == id
parent_provider_id = Column(Integer, ForeignKey('resource_providers.id'),
nullable=True)
parent_provider_id = sa.Column(
sa.Integer, sa.ForeignKey('resource_providers.id'), nullable=True)
# TODO(stephenfin): Remove this as it's now unused post-placement split
@ -333,29 +335,34 @@ class Inventory(API_BASE):
__tablename__ = "inventories"
__table_args__ = (
Index('inventories_resource_provider_id_idx',
'resource_provider_id'),
Index('inventories_resource_class_id_idx',
'resource_class_id'),
Index('inventories_resource_provider_resource_class_idx',
'resource_provider_id', 'resource_class_id'),
schema.UniqueConstraint('resource_provider_id', 'resource_class_id',
name='uniq_inventories0resource_provider_resource_class')
sa.Index(
'inventories_resource_provider_id_idx', 'resource_provider_id'),
sa.Index(
'inventories_resource_class_id_idx', 'resource_class_id'),
sa.Index(
'inventories_resource_provider_resource_class_idx',
'resource_provider_id',
'resource_class_id',
),
schema.UniqueConstraint(
'resource_provider_id',
'resource_class_id',
name='uniq_inventories0resource_provider_resource_class'
),
)
id = Column(Integer, primary_key=True, nullable=False)
resource_provider_id = Column(Integer, nullable=False)
resource_class_id = Column(Integer, nullable=False)
total = Column(Integer, nullable=False)
reserved = Column(Integer, nullable=False)
min_unit = Column(Integer, nullable=False)
max_unit = Column(Integer, nullable=False)
step_size = Column(Integer, nullable=False)
allocation_ratio = Column(Float, nullable=False)
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
resource_provider_id = sa.Column(sa.Integer, nullable=False)
resource_class_id = sa.Column(sa.Integer, nullable=False)
total = sa.Column(sa.Integer, nullable=False)
reserved = sa.Column(sa.Integer, nullable=False)
min_unit = sa.Column(sa.Integer, nullable=False)
max_unit = sa.Column(sa.Integer, nullable=False)
step_size = sa.Column(sa.Integer, nullable=False)
allocation_ratio = sa.Column(sa.Float, nullable=False)
resource_provider = orm.relationship(
"ResourceProvider",
primaryjoin=('Inventory.resource_provider_id == '
'ResourceProvider.id'),
primaryjoin='Inventory.resource_provider_id == ResourceProvider.id',
foreign_keys=resource_provider_id)
@ -365,23 +372,24 @@ class Allocation(API_BASE):
__tablename__ = "allocations"
__table_args__ = (
Index('allocations_resource_provider_class_used_idx',
'resource_provider_id', 'resource_class_id',
'used'),
Index('allocations_resource_class_id_idx',
'resource_class_id'),
Index('allocations_consumer_id_idx', 'consumer_id')
sa.Index(
'allocations_resource_provider_class_used_idx',
'resource_provider_id',
'resource_class_id',
'used',
),
sa.Index('allocations_resource_class_id_idx', 'resource_class_id'),
sa.Index('allocations_consumer_id_idx', 'consumer_id')
)
id = Column(Integer, primary_key=True, nullable=False)
resource_provider_id = Column(Integer, nullable=False)
consumer_id = Column(String(36), nullable=False)
resource_class_id = Column(Integer, nullable=False)
used = Column(Integer, nullable=False)
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
resource_provider_id = sa.Column(sa.Integer, nullable=False)
consumer_id = sa.Column(sa.String(36), nullable=False)
resource_class_id = sa.Column(sa.Integer, nullable=False)
used = sa.Column(sa.Integer, nullable=False)
resource_provider = orm.relationship(
"ResourceProvider",
primaryjoin=('Allocation.resource_provider_id == '
'ResourceProvider.id'),
primaryjoin='Allocation.resource_provider_id == ResourceProvider.id',
foreign_keys=resource_provider_id)
@ -391,12 +399,13 @@ class ResourceProviderAggregate(API_BASE):
__tablename__ = 'resource_provider_aggregates'
__table_args__ = (
Index('resource_provider_aggregates_aggregate_id_idx',
'aggregate_id'),
sa.Index(
'resource_provider_aggregates_aggregate_id_idx', 'aggregate_id'),
)
resource_provider_id = Column(Integer, primary_key=True, nullable=False)
aggregate_id = Column(Integer, primary_key=True, nullable=False)
resource_provider_id = sa.Column(
sa.Integer, primary_key=True, nullable=False)
aggregate_id = sa.Column(sa.Integer, primary_key=True, nullable=False)
# TODO(stephenfin): Remove this as it's now unused post-placement split
@ -407,33 +416,34 @@ class PlacementAggregate(API_BASE):
schema.UniqueConstraint("uuid", name="uniq_placement_aggregates0uuid"),
)
id = Column(Integer, primary_key=True, autoincrement=True)
uuid = Column(String(36), index=True)
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = sa.Column(sa.String(36), index=True)
class InstanceGroupMember(API_BASE):
"""Represents the members for an instance group."""
__tablename__ = 'instance_group_member'
__table_args__ = (
Index('instance_group_member_instance_idx', 'instance_uuid'),
sa.Index('instance_group_member_instance_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
instance_uuid = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
instance_uuid = sa.Column(sa.String(255))
group_id = sa.Column(
sa.Integer, sa.ForeignKey('instance_groups.id'), nullable=False)
class InstanceGroupPolicy(API_BASE):
"""Represents the policy type for an instance group."""
__tablename__ = 'instance_group_policy'
__table_args__ = (
Index('instance_group_policy_policy_idx', 'policy'),
sa.Index('instance_group_policy_policy_idx', 'policy'),
)
id = Column(Integer, primary_key=True, nullable=False)
policy = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
rules = Column(Text)
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
policy = sa.Column(sa.String(255))
group_id = sa.Column(
sa.Integer, sa.ForeignKey('instance_groups.id'), nullable=False)
rules = sa.Column(sa.Text)
class InstanceGroup(API_BASE):
@ -448,15 +458,17 @@ class InstanceGroup(API_BASE):
schema.UniqueConstraint('uuid', name='uniq_instance_groups0uuid'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(String(255))
project_id = Column(String(255))
uuid = Column(String(36), nullable=False)
name = Column(String(255))
_policies = orm.relationship(InstanceGroupPolicy,
primaryjoin='InstanceGroup.id == InstanceGroupPolicy.group_id')
_members = orm.relationship(InstanceGroupMember,
primaryjoin='InstanceGroup.id == InstanceGroupMember.group_id')
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
user_id = sa.Column(sa.String(255))
project_id = sa.Column(sa.String(255))
uuid = sa.Column(sa.String(36), nullable=False)
name = sa.Column(sa.String(255))
_policies = orm.relationship(
InstanceGroupPolicy,
primaryjoin='InstanceGroup.id == InstanceGroupPolicy.group_id')
_members = orm.relationship(
InstanceGroupMember,
primaryjoin='InstanceGroup.id == InstanceGroupMember.group_id')
@property
def policy(self):
@ -482,41 +494,43 @@ class Quota(API_BASE):
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
__table_args__ = (
schema.UniqueConstraint("project_id", "resource",
name="uniq_quotas0project_id0resource"
schema.UniqueConstraint(
"project_id",
"resource",
name="uniq_quotas0project_id0resource"
),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
id = sa.Column(sa.Integer, primary_key=True)
project_id = sa.Column(sa.String(255))
resource = sa.Column(sa.String(255), nullable=False)
hard_limit = sa.Column(sa.Integer)
class ProjectUserQuota(API_BASE):
"""Represents a single quota override for a user with in a project."""
__tablename__ = 'project_user_quotas'
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource"
__table_args__ = (
schema.UniqueConstraint("user_id", "project_id", "resource",
name=uniq_name),
Index('project_user_quotas_project_id_idx',
'project_id'),
Index('project_user_quotas_user_id_idx',
'user_id',)
schema.UniqueConstraint(
"user_id",
"project_id",
"resource",
name="uniq_project_user_quotas0user_id0project_id0resource",
),
sa.Index(
'project_user_quotas_project_id_idx', 'project_id'),
sa.Index(
'project_user_quotas_user_id_idx', 'user_id',)
)
id = Column(Integer, primary_key=True, nullable=False)
project_id = Column(String(255), nullable=False)
user_id = Column(String(255), nullable=False)
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
project_id = sa.Column(sa.String(255), nullable=False)
user_id = sa.Column(sa.String(255), nullable=False)
resource = sa.Column(sa.String(255), nullable=False)
hard_limit = sa.Column(sa.Integer)
class QuotaClass(API_BASE):
@ -529,14 +543,14 @@ class QuotaClass(API_BASE):
__tablename__ = 'quota_classes'
__table_args__ = (
Index('quota_classes_class_name_idx', 'class_name'),
sa.Index('quota_classes_class_name_idx', 'class_name'),
)
id = Column(Integer, primary_key=True)
id = sa.Column(sa.Integer, primary_key=True)
class_name = Column(String(255))
class_name = sa.Column(sa.String(255))
resource = Column(String(255))
hard_limit = Column(Integer)
resource = sa.Column(sa.String(255))
hard_limit = sa.Column(sa.Integer)
class QuotaUsage(API_BASE):
@ -544,23 +558,22 @@ class QuotaUsage(API_BASE):
__tablename__ = 'quota_usages'
__table_args__ = (
Index('quota_usages_project_id_idx', 'project_id'),
Index('quota_usages_user_id_idx', 'user_id'),
sa.Index('quota_usages_project_id_idx', 'project_id'),
sa.Index('quota_usages_user_id_idx', 'user_id'),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255), nullable=False)
in_use = Column(Integer, nullable=False)
reserved = Column(Integer, nullable=False)
id = sa.Column(sa.Integer, primary_key=True)
project_id = sa.Column(sa.String(255))
user_id = sa.Column(sa.String(255))
resource = sa.Column(sa.String(255), nullable=False)
in_use = sa.Column(sa.Integer, nullable=False)
reserved = sa.Column(sa.Integer, nullable=False)
@property
def total(self):
return self.in_use + self.reserved
until_refresh = Column(Integer)
until_refresh = sa.Column(sa.Integer)
class Reservation(API_BASE):
@ -568,23 +581,21 @@ class Reservation(API_BASE):
__tablename__ = 'reservations'
__table_args__ = (
Index('reservations_project_id_idx', 'project_id'),
Index('reservations_uuid_idx', 'uuid'),
Index('reservations_expire_idx', 'expire'),
Index('reservations_user_id_idx', 'user_id'),
sa.Index('reservations_project_id_idx', 'project_id'),
sa.Index('reservations_uuid_idx', 'uuid'),
sa.Index('reservations_expire_idx', 'expire'),
sa.Index('reservations_user_id_idx', 'user_id'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255))
delta = Column(Integer, nullable=False)
expire = Column(DateTime)
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
uuid = sa.Column(sa.String(36), nullable=False)
usage_id = sa.Column(
sa.Integer, sa.ForeignKey('quota_usages.id'), nullable=False)
project_id = sa.Column(sa.String(255))
user_id = sa.Column(sa.String(255))
resource = sa.Column(sa.String(255))
delta = sa.Column(sa.Integer, nullable=False)
expire = sa.Column(sa.DateTime)
usage = orm.relationship(
"QuotaUsage",
foreign_keys=usage_id,
@ -599,8 +610,9 @@ class Trait(API_BASE):
schema.UniqueConstraint('name', name='uniq_traits0name'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
name = Column(Unicode(255), nullable=False)
id = sa.Column(
sa.Integer, primary_key=True, nullable=False, autoincrement=True)
name = sa.Column(sa.Unicode(255), nullable=False)
# TODO(stephenfin): Remove this as it's now unused post-placement split
@ -609,16 +621,18 @@ class ResourceProviderTrait(API_BASE):
__tablename__ = "resource_provider_traits"
__table_args__ = (
Index('resource_provider_traits_resource_provider_trait_idx',
sa.Index('resource_provider_traits_resource_provider_trait_idx',
'resource_provider_id', 'trait_id'),
)
trait_id = Column(Integer, ForeignKey('traits.id'), primary_key=True,
nullable=False)
resource_provider_id = Column(Integer,
ForeignKey('resource_providers.id'),
primary_key=True,
nullable=False)
trait_id = sa.Column(
sa.Integer, sa.ForeignKey('traits.id'), primary_key=True,
nullable=False)
resource_provider_id = sa.Column(
sa.Integer,
sa.ForeignKey('resource_providers.id'),
primary_key=True,
nullable=False)
# TODO(stephenfin): Remove this as it's unused
@ -633,8 +647,9 @@ class Project(API_BASE):
),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
external_id = Column(String(255), nullable=False)
id = sa.Column(
sa.Integer, primary_key=True, nullable=False, autoincrement=True)
external_id = sa.Column(sa.String(255), nullable=False)
# TODO(stephenfin): Remove this as it's unused
@ -643,14 +658,12 @@ class User(API_BASE):
__tablename__ = 'users'
__table_args__ = (
schema.UniqueConstraint(
'external_id',
name='uniq_users0external_id',
),
schema.UniqueConstraint('external_id', name='uniq_users0external_id'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
external_id = Column(String(255), nullable=False)
id = sa.Column(
sa.Integer, primary_key=True, nullable=False, autoincrement=True)
external_id = sa.Column(sa.String(255), nullable=False)
# TODO(stephenfin): Remove this as it's unused
@ -659,16 +672,22 @@ class Consumer(API_BASE):
__tablename__ = 'consumers'
__table_args__ = (
Index('consumers_project_id_uuid_idx', 'project_id', 'uuid'),
Index('consumers_project_id_user_id_uuid_idx', 'project_id', 'user_id',
'uuid'),
sa.Index('consumers_project_id_uuid_idx', 'project_id', 'uuid'),
sa.Index(
'consumers_project_id_user_id_uuid_idx',
'project_id',
'user_id',
'uuid',
),
schema.UniqueConstraint('uuid', name='uniq_consumers0uuid'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
project_id = Column(Integer, nullable=False)
user_id = Column(Integer, nullable=False)
id = sa.Column(
sa.Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = sa.Column(sa.String(36), nullable=False)
project_id = sa.Column(sa.Integer, nullable=False)
user_id = sa.Column(sa.Integer, nullable=False)
# FIXME(mriedem): Change this to server_default=text("0") to match the
# 059_add_consumer_generation script once bug 1776527 is fixed.
generation = Column(Integer, nullable=False, server_default="0", default=0)
generation = sa.Column(
sa.Integer, nullable=False, server_default="0", default=0)

File diff suppressed because it is too large Load Diff

View File

@ -16,8 +16,7 @@ from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import uuidutils
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy import orm
from nova.compute import utils as compute_utils
from nova.db.sqlalchemy import api as db_api
@ -36,8 +35,8 @@ DEPRECATED_FIELDS = ['deleted', 'deleted_at']
@db_api.api_context_manager.reader
def _aggregate_get_from_db(context, aggregate_id):
query = context.session.query(api_models.Aggregate).\
options(joinedload('_hosts')).\
options(joinedload('_metadata'))
options(orm.joinedload('_hosts')).\
options(orm.joinedload('_metadata'))
query = query.filter(api_models.Aggregate.id == aggregate_id)
aggregate = query.first()
@ -51,8 +50,8 @@ def _aggregate_get_from_db(context, aggregate_id):
@db_api.api_context_manager.reader
def _aggregate_get_from_db_by_uuid(context, aggregate_uuid):
query = context.session.query(api_models.Aggregate).\
options(joinedload('_hosts')).\
options(joinedload('_metadata'))
options(orm.joinedload('_hosts')).\
options(orm.joinedload('_metadata'))
query = query.filter(api_models.Aggregate.uuid == aggregate_uuid)
aggregate = query.first()
@ -415,8 +414,8 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
@db_api.api_context_manager.reader
def _get_all_from_db(context):
query = context.session.query(api_models.Aggregate).\
options(joinedload('_hosts')).\
options(joinedload('_metadata'))
options(orm.joinedload('_hosts')).\
options(orm.joinedload('_metadata'))
return query.all()
@ -424,8 +423,8 @@ def _get_all_from_db(context):
@db_api.api_context_manager.reader
def _get_by_host_from_db(context, host, key=None):
query = context.session.query(api_models.Aggregate).\
options(joinedload('_hosts')).\
options(joinedload('_metadata'))
options(orm.joinedload('_hosts')).\
options(orm.joinedload('_metadata'))
query = query.join('_hosts')
query = query.filter(api_models.AggregateHost.host == host)
@ -445,8 +444,8 @@ def _get_by_metadata_from_db(context, key=None, value=None):
query = query.filter(api_models.AggregateMetadata.key == key)
if value is not None:
query = query.filter(api_models.AggregateMetadata.value == value)
query = query.options(contains_eager("_metadata"))
query = query.options(joinedload("_hosts"))
query = query.options(orm.contains_eager("_metadata"))
query = query.options(orm.joinedload("_hosts"))
return query.all()
@ -477,8 +476,8 @@ def _get_non_matching_by_metadata_keys_from_db(context, ignored_keys,
query = query.filter(~api_models.AggregateMetadata.key.in_(
ignored_keys))
query = query.options(contains_eager("_metadata"))
query = query.options(joinedload("_hosts"))
query = query.options(orm.contains_eager("_metadata"))
query = query.options(orm.joinedload("_hosts"))
return query.all()

View File

@ -14,9 +14,8 @@ from urllib import parse as urlparse
from oslo_log import log as logging
from oslo_utils import versionutils
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql import false
from sqlalchemy.sql import true
from sqlalchemy import sql
from sqlalchemy.sql import expression
import nova.conf
from nova.db.sqlalchemy import api as db_api
@ -250,7 +249,7 @@ class CellMappingList(base.ObjectListBase, base.NovaObject):
@db_api.api_context_manager.reader
def _get_all_from_db(context):
return context.session.query(api_models.CellMapping).order_by(
asc(api_models.CellMapping.id)).all()
expression.asc(api_models.CellMapping.id)).all()
@base.remotable_classmethod
def get_all(cls, context):
@ -261,12 +260,13 @@ class CellMappingList(base.ObjectListBase, base.NovaObject):
@db_api.api_context_manager.reader
def _get_by_disabled_from_db(context, disabled):
if disabled:
return context.session.query(api_models.CellMapping).filter_by(
disabled=true()).order_by(asc(api_models.CellMapping.id)).all()
return context.session.query(api_models.CellMapping)\
.filter_by(disabled=sql.true())\
.order_by(expression.asc(api_models.CellMapping.id)).all()
else:
return context.session.query(api_models.CellMapping).filter_by(
disabled=false()).order_by(asc(
api_models.CellMapping.id)).all()
return context.session.query(api_models.CellMapping)\
.filter_by(disabled=sql.false())\
.order_by(expression.asc(api_models.CellMapping.id)).all()
@base.remotable_classmethod
def get_by_disabled(cls, context, disabled):

View File

@ -12,12 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from oslo_utils import versionutils
from sqlalchemy import or_
from sqlalchemy.sql import null
import sqlalchemy as sa
from sqlalchemy import sql
import nova.conf
from nova.db import api as db
@ -500,13 +499,13 @@ def _get_node_empty_ratio(context, max_count):
Results are limited by ``max_count``.
"""
return context.session.query(models.ComputeNode).filter(or_(
return context.session.query(models.ComputeNode).filter(sa.or_(
models.ComputeNode.ram_allocation_ratio == '0.0',
models.ComputeNode.cpu_allocation_ratio == '0.0',
models.ComputeNode.disk_allocation_ratio == '0.0',
models.ComputeNode.ram_allocation_ratio == null(),
models.ComputeNode.cpu_allocation_ratio == null(),
models.ComputeNode.disk_allocation_ratio == null()
models.ComputeNode.ram_allocation_ratio == sql.null(),
models.ComputeNode.cpu_allocation_ratio == sql.null(),
models.ComputeNode.disk_allocation_ratio == sql.null()
)).filter(models.ComputeNode.deleted == 0).limit(max_count).all()

View File

@ -15,10 +15,10 @@
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
from sqlalchemy import or_
from sqlalchemy.orm import joinedload
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql import true
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import sql
from sqlalchemy.sql import expression
import nova.conf
from nova.db.sqlalchemy import api as db_api
@ -55,9 +55,9 @@ def _dict_with_extra_specs(flavor_model):
@db_api.api_context_manager.reader
def _get_projects_from_db(context, flavorid):
db_flavor = context.session.query(api_models.Flavors).\
filter_by(flavorid=flavorid).\
options(joinedload('projects')).\
first()
filter_by(flavorid=flavorid).\
options(orm.joinedload('projects')).\
first()
if not db_flavor:
raise exception.FlavorNotFound(flavor_id=flavorid)
return [x['project_id'] for x in db_flavor['projects']]
@ -272,13 +272,13 @@ class Flavor(base.NovaPersistentObject, base.NovaObject,
@db_api.api_context_manager.reader
def _flavor_get_query_from_db(context):
query = context.session.query(api_models.Flavors).\
options(joinedload('extra_specs'))
options(orm.joinedload('extra_specs'))
if not context.is_admin:
the_filter = [api_models.Flavors.is_public == true()]
the_filter = [api_models.Flavors.is_public == sql.true()]
the_filter.extend([
api_models.Flavors.projects.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
query = query.filter(sa.or_(*the_filter))
return query
@staticmethod
@ -309,7 +309,7 @@ class Flavor(base.NovaPersistentObject, base.NovaObject,
"""Returns a dict describing specific flavor_id."""
result = Flavor._flavor_get_query_from_db(context).\
filter_by(flavorid=flavor_id).\
order_by(asc(api_models.Flavors.id)).\
order_by(expression.asc(api_models.Flavors.id)).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
@ -609,7 +609,7 @@ def _flavor_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
the_filter.extend([api_models.Flavors.projects.any(
project_id=context.project_id)])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
query = query.filter(sa.or_(*the_filter))
else:
query = query.filter(the_filter[0])
marker_row = None

View File

@ -11,7 +11,7 @@
# under the License.
from oslo_db import exception as db_exc
from sqlalchemy.orm import joinedload
from sqlalchemy import orm
from nova import context
from nova.db.sqlalchemy import api as db_api
@ -89,9 +89,9 @@ class HostMapping(base.NovaTimestampObject, base.NovaObject):
@staticmethod
@db_api.api_context_manager.reader
def _get_by_host_from_db(context, host):
db_mapping = (context.session.query(api_models.HostMapping)
.options(joinedload('cell_mapping'))
.filter(api_models.HostMapping.host == host)).first()
db_mapping = context.session.query(api_models.HostMapping)\
.options(orm.joinedload('cell_mapping'))\
.filter(api_models.HostMapping.host == host).first()
if not db_mapping:
raise exception.HostMappingNotFound(name=host)
return db_mapping
@ -160,7 +160,7 @@ class HostMappingList(base.ObjectListBase, base.NovaObject):
@db_api.api_context_manager.reader
def _get_from_db(context, cell_id=None):
query = (context.session.query(api_models.HostMapping)
.options(joinedload('cell_mapping')))
.options(orm.joinedload('cell_mapping')))
if cell_id:
query = query.filter(api_models.HostMapping.cell_id == cell_id)
return query.all()

View File

@ -20,10 +20,9 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import versionutils
from sqlalchemy import or_
from sqlalchemy.sql import false
import sqlalchemy as sa
from sqlalchemy import sql
from sqlalchemy.sql import func
from sqlalchemy.sql import null
from nova import availability_zones as avail_zone
from nova.compute import task_states
@ -1516,9 +1515,9 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
# NOTE(melwitt): Copied from nova/db/sqlalchemy/api.py:
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
not_soft_deleted = or_(
not_soft_deleted = sa.or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == null()
models.Instance.vm_state == sql.null()
)
project_query = context.session.query(
func.count(models.Instance.id),
@ -1531,8 +1530,10 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
# non-hidden version of the instance in another cell database and the
# API will only show one of them, so we don't count the hidden copy.
project_query = project_query.filter(
or_(models.Instance.hidden == false(),
models.Instance.hidden == null()))
sa.or_(
models.Instance.hidden == sql.false(),
models.Instance.hidden == sql.null(),
))
project_result = project_query.first()
fields = ('instances', 'cores', 'ram')

View File

@ -19,8 +19,7 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from oslo_utils import versionutils
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy import orm
from nova.compute import utils as compute_utils
from nova.db.sqlalchemy import api as db_api
@ -37,8 +36,8 @@ LOG = logging.getLogger(__name__)
def _instance_group_get_query(context, id_field=None, id=None):
query = context.session.query(api_models.InstanceGroup).\
options(joinedload('_policies')).\
options(joinedload('_members'))
options(orm.joinedload('_policies')).\
options(orm.joinedload('_members'))
if not context.is_admin:
query = query.filter_by(project_id=context.project_id)
if id and id_field:
@ -89,7 +88,7 @@ def _instance_group_members_add_by_uuid(context, group_uuid, members):
outerjoin(api_models.InstanceGroupMember,
api_models.InstanceGroupMember.instance_uuid.in_(set(members))).\
filter(api_models.InstanceGroup.uuid == group_uuid).\
options(contains_eager('_members')).first()
options(orm.contains_eager('_members')).first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return _instance_group_model_add(context, api_models.InstanceGroupMember,

View File

@ -14,11 +14,10 @@ import collections
from oslo_log import log as logging
from oslo_utils import versionutils
from sqlalchemy import orm
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import false
from sqlalchemy import sql
from sqlalchemy.sql import func
from sqlalchemy.sql import or_
from nova import context as nova_context
from nova.db.sqlalchemy import api as db_api
@ -99,11 +98,10 @@ class InstanceMapping(base.NovaTimestampObject, base.NovaObject):
@staticmethod
@db_api.api_context_manager.reader
def _get_by_instance_uuid_from_db(context, instance_uuid):
db_mapping = (context.session.query(api_models.InstanceMapping)
.options(joinedload('cell_mapping'))
.filter(
api_models.InstanceMapping.instance_uuid ==
instance_uuid)).first()
db_mapping = context.session.query(api_models.InstanceMapping)\
.options(orm.joinedload('cell_mapping'))\
.filter(api_models.InstanceMapping.instance_uuid == instance_uuid)\
.first()
if not db_mapping:
raise exception.InstanceMappingNotFound(uuid=instance_uuid)
@ -313,10 +311,9 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
@staticmethod
@db_api.api_context_manager.reader
def _get_by_project_id_from_db(context, project_id):
return (context.session.query(api_models.InstanceMapping)
.options(joinedload('cell_mapping'))
.filter(
api_models.InstanceMapping.project_id == project_id)).all()
return context.session.query(api_models.InstanceMapping)\
.options(orm.joinedload('cell_mapping'))\
.filter(api_models.InstanceMapping.project_id == project_id).all()
@base.remotable_classmethod
def get_by_project_id(cls, context, project_id):
@ -328,9 +325,9 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
@staticmethod
@db_api.api_context_manager.reader
def _get_by_cell_id_from_db(context, cell_id):
return (context.session.query(api_models.InstanceMapping)
.options(joinedload('cell_mapping'))
.filter(api_models.InstanceMapping.cell_id == cell_id)).all()
return context.session.query(api_models.InstanceMapping)\
.options(orm.joinedload('cell_mapping'))\
.filter(api_models.InstanceMapping.cell_id == cell_id).all()
@base.remotable_classmethod
def get_by_cell_id(cls, context, cell_id):
@ -341,10 +338,10 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
@staticmethod
@db_api.api_context_manager.reader
def _get_by_instance_uuids_from_db(context, uuids):
return (context.session.query(api_models.InstanceMapping)
.options(joinedload('cell_mapping'))
.filter(api_models.InstanceMapping.instance_uuid.in_(uuids))
.all())
return context.session.query(api_models.InstanceMapping)\
.options(orm.joinedload('cell_mapping'))\
.filter(api_models.InstanceMapping.instance_uuid.in_(uuids))\
.all()
@base.remotable_classmethod
def get_by_instance_uuids(cls, context, uuids):
@ -376,11 +373,11 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
# queued_for_delete was not run) and False (cases when the online
# data migration for queued_for_delete was run) are assumed to mean
# that the instance is not queued for deletion.
query = (query.filter(or_(
api_models.InstanceMapping.queued_for_delete == false(),
query = (query.filter(sql.or_(
api_models.InstanceMapping.queued_for_delete == sql.false(),
api_models.InstanceMapping.queued_for_delete.is_(None)))
.join('cell_mapping')
.options(joinedload('cell_mapping'))
.options(orm.joinedload('cell_mapping'))
.filter(api_models.CellMapping.uuid == cell_uuid))
if limit is not None:
query = query.limit(limit)

View File

@ -20,10 +20,7 @@ import copy
from oslo_log import log as logging
from oslo_utils import importutils
from sqlalchemy.sql import and_
from sqlalchemy.sql import false
from sqlalchemy.sql import null
from sqlalchemy.sql import or_
from sqlalchemy import sql
import nova.conf
from nova import context as nova_context
@ -1077,13 +1074,13 @@ def _user_id_queued_for_delete_populated(context, project_id=None):
:returns: True if user_id is set for all non-deleted instances and
queued_for_delete is set for all instances, else False
"""
user_id_not_populated = and_(
api_models.InstanceMapping.user_id == null(),
api_models.InstanceMapping.queued_for_delete == false())
user_id_not_populated = sql.and_(
api_models.InstanceMapping.user_id == sql.null(),
api_models.InstanceMapping.queued_for_delete == sql.false())
# If either queued_for_delete or user_id are unmigrated, we will return
# False.
unmigrated_filter = or_(
api_models.InstanceMapping.queued_for_delete == null(),
unmigrated_filter = sql.or_(
api_models.InstanceMapping.queued_for_delete == sql.null(),
user_id_not_populated)
query = context.session.query(api_models.InstanceMapping).filter(
unmigrated_filter)

View File

@ -18,9 +18,8 @@ import re
from dateutil import parser as dateutil_parser
from oslo_utils import fixture as osloutils_fixture
from oslo_utils import timeutils
import sqlalchemy as sa
from sqlalchemy import func
from sqlalchemy import MetaData
from sqlalchemy import select
from nova import context
from nova.db import api as db
@ -179,13 +178,13 @@ class TestDatabaseArchive(integrated_helpers._IntegratedTestBase):
def _get_table_counts(self):
engine = sqlalchemy_api.get_engine()
conn = engine.connect()
meta = MetaData(engine)
meta = sa.MetaData(engine)
meta.reflect()
shadow_tables = sqlalchemy_api._purgeable_tables(meta)
results = {}
for table in shadow_tables:
r = conn.execute(
select([func.count()]).select_from(table)).fetchone()
sa.select([func.count()]).select_from(table)).fetchone()
results[table.name] = r[0]
return results

View File

@ -36,16 +36,12 @@ from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
from oslo_utils import uuidutils
from sqlalchemy import Column
from sqlalchemy.exc import OperationalError
from sqlalchemy.exc import SQLAlchemyError
import sqlalchemy as sa
from sqlalchemy import exc as sqla_exc
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy.orm import query
from sqlalchemy.orm import session as sqla_session
from sqlalchemy import sql
from sqlalchemy import Table
from nova import block_device
from nova.compute import rpcapi as compute_rpcapi
@ -173,7 +169,7 @@ class DbTestCase(test.TestCase):
class HelperTestCase(test.TestCase):
@mock.patch.object(sqlalchemy_api, 'joinedload')
@mock.patch('sqlalchemy.orm.joinedload')
def test_joinedload_helper(self, mock_jl):
query = sqlalchemy_api._joinedload_all('foo.bar.baz')
@ -190,7 +186,7 @@ class HelperTestCase(test.TestCase):
self.assertEqual(column3.joinedload.return_value, query)
@mock.patch.object(sqlalchemy_api, 'joinedload')
@mock.patch('sqlalchemy.orm.joinedload')
def test_joinedload_helper_single(self, mock_jl):
query = sqlalchemy_api._joinedload_all('foo')
@ -1757,8 +1753,8 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
instances = db.instance_get_all_by_filters_sort(self.ctxt, filters)
self.assertEqual([], instances)
@mock.patch('nova.db.sqlalchemy.api.undefer')
@mock.patch('nova.db.sqlalchemy.api.joinedload')
@mock.patch('sqlalchemy.orm.undefer')
@mock.patch('sqlalchemy.orm.joinedload')
def test_instance_get_all_by_filters_extra_columns(self,
mock_joinedload,
mock_undefer):
@ -1768,8 +1764,8 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
mock_joinedload.assert_called_once_with('info_cache')
mock_undefer.assert_called_once_with('extra.pci_requests')
@mock.patch('nova.db.sqlalchemy.api.undefer')
@mock.patch('nova.db.sqlalchemy.api.joinedload')
@mock.patch('sqlalchemy.orm.undefer')
@mock.patch('sqlalchemy.orm.joinedload')
def test_instance_get_active_by_window_extra_columns(self,
mock_joinedload,
mock_undefer):
@ -2942,8 +2938,9 @@ class InstanceExtraTestCase(test.TestCase):
self.ctxt, self.instance['uuid'],
columns=['numa_topology', 'vcpu_model', 'trusted_certs',
'resources'])
self.assertRaises(SQLAlchemyError,
extra.__getitem__, 'pci_requests')
self.assertRaises(
sqla_exc.SQLAlchemyError,
extra.__getitem__, 'pci_requests')
self.assertIn('numa_topology', extra)
self.assertIn('vcpu_model', extra)
self.assertIn('trusted_certs', extra)
@ -5899,7 +5896,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ArchiveTestCase, self).setUp()
self.engine = get_engine()
self.metadata = MetaData(self.engine)
self.metadata = sa.MetaData(self.engine)
self.conn = self.engine.connect()
self.instance_id_mappings = models.InstanceIdMapping.__table__
self.shadow_instance_id_mappings = sqlalchemyutils.get_table(
@ -5931,7 +5928,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
except for specificially named exceptions, are empty. This
makes sure that archiving isn't moving unexpected content.
"""
metadata = MetaData(bind=self.engine)
metadata = sa.MetaData(bind=self.engine)
metadata.reflect()
for table in metadata.tables:
if table.startswith("shadow_") and table not in exceptions:
@ -5943,7 +5940,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
Shadow tables should have an identical schema to the main table.
"""
metadata = MetaData(bind=self.engine)
metadata = sa.MetaData(bind=self.engine)
metadata.reflect()
for table_name in metadata.tables:
# some tables don't have shadow tables so skip these
@ -5961,8 +5958,8 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
shadow_table_name = f'shadow_{table_name}'
table = Table(table_name, metadata, autoload=True)
shadow_table = Table(shadow_table_name, metadata, autoload=True)
table = sa.Table(table_name, metadata, autoload=True)
shadow_table = sa.Table(shadow_table_name, metadata, autoload=True)
columns = {c.name: c for c in table.columns}
shadow_columns = {c.name: c for c in shadow_table.columns}
@ -6159,7 +6156,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
ins_stmt = main_table.insert().values(uuid=uuidstr)
try:
self.conn.execute(ins_stmt)
except (db_exc.DBError, OperationalError):
except (db_exc.DBError, sqla_exc.OperationalError):
# This table has constraints that require a table-specific
# insert, so skip it.
return 2
@ -6640,12 +6637,12 @@ class TestSqlalchemyTypesRepr(
super(TestSqlalchemyTypesRepr, self).setUp()
self.engine = enginefacade.writer.get_engine()
meta = MetaData(bind=self.engine)
self.table = Table(
meta = sa.MetaData(bind=self.engine)
self.table = sa.Table(
'cidr_tbl',
meta,
Column('id', Integer, primary_key=True),
Column('addr', col_types.CIDR())
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('addr', col_types.CIDR())
)
self.table.create()
self.addCleanup(meta.drop_all)