Remove pep8 whitespace ignores

We had carried over the full list of pep8 whitespace ignores from nova.
Trying to address them all in the entire nova repository would have been
too big a task; but it's tenable here in placement.

Do it now rather than letting these whitespace issues compound.

This change removes the E* whitespace ignores and fixes the pep8 issues
they were masking.

Change-Id: Icbabdb9b56fd5a3e9fd14ab537abf3d0d6456fee
This commit is contained in:
Eric Fried 2019-03-01 17:12:20 -06:00
parent d158cbc8a4
commit 849c89d0e5
35 changed files with 615 additions and 566 deletions

View File

@ -15,14 +15,16 @@
from oslo_config import cfg
api_group = cfg.OptGroup('api',
api_group = cfg.OptGroup(
'api',
title='API options',
help="""
Options under this group are used to define Placement API.
""")
api_opts = [
cfg.StrOpt("auth_strategy",
cfg.StrOpt(
"auth_strategy",
default="keystone",
choices=("keystone", "noauth2"),
deprecated_group="DEFAULT",

View File

@ -21,9 +21,10 @@ from oslo_config import cfg
ALL_OPTS = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../')),
cfg.StrOpt(
'pybasedir',
default=os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../')),
sample_default='<Path>',
help="""
The directory where the Placement python modules are installed.
@ -40,7 +41,8 @@ Related options:
* ``state_path``
"""),
cfg.StrOpt('state_path',
cfg.StrOpt(
'state_path',
default='$pybasedir',
help="""
The top-level directory for maintaining state used in Placement.

View File

@ -30,7 +30,8 @@ depends_on = None
def upgrade():
op.create_table('allocations',
op.create_table(
'allocations',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
@ -38,17 +39,20 @@ def upgrade():
sa.Column('consumer_id', sa.String(length=36), nullable=False),
sa.Column('resource_class_id', sa.Integer(), nullable=False),
sa.Column('used', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.PrimaryKeyConstraint('id')
)
op.create_index('allocations_resource_provider_class_used_idx',
'allocations', ['resource_provider_id', 'resource_class_id',
'used'], unique=False)
op.create_index('allocations_resource_class_id_idx', 'allocations',
op.create_index(
'allocations_resource_provider_class_used_idx', 'allocations',
['resource_provider_id', 'resource_class_id', 'used'], unique=False)
op.create_index(
'allocations_resource_class_id_idx', 'allocations',
['resource_class_id'], unique=False)
op.create_index('allocations_consumer_id_idx', 'allocations',
['consumer_id'], unique=False)
op.create_index(
'allocations_consumer_id_idx', 'allocations', ['consumer_id'],
unique=False)
op.create_table('consumers',
op.create_table(
'consumers',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
@ -60,12 +64,15 @@ def upgrade():
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('uuid', name='uniq_consumers0uuid'),
)
op.create_index('consumers_project_id_user_id_uuid_idx', 'consumers',
op.create_index(
'consumers_project_id_user_id_uuid_idx', 'consumers',
['project_id', 'user_id', 'uuid'], unique=False)
op.create_index('consumers_project_id_uuid_idx', 'consumers',
op.create_index(
'consumers_project_id_uuid_idx', 'consumers',
['project_id', 'uuid'], unique=False)
op.create_table('inventories',
op.create_table(
'inventories',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
@ -78,18 +85,23 @@ def upgrade():
sa.Column('step_size', sa.Integer(), nullable=False),
sa.Column('allocation_ratio', sa.Float(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('resource_provider_id', 'resource_class_id',
sa.UniqueConstraint(
'resource_provider_id', 'resource_class_id',
name='uniq_inventories0resource_provider_resource_class'),
)
op.create_index('inventories_resource_class_id_idx', 'inventories',
op.create_index(
'inventories_resource_class_id_idx', 'inventories',
['resource_class_id'], unique=False)
op.create_index('inventories_resource_provider_id_idx', 'inventories',
op.create_index(
'inventories_resource_provider_id_idx', 'inventories',
['resource_provider_id'], unique=False)
op.create_index('inventories_resource_provider_resource_class_idx',
op.create_index(
'inventories_resource_provider_resource_class_idx',
'inventories', ['resource_provider_id', 'resource_class_id'],
unique=False)
op.create_table('placement_aggregates',
op.create_table(
'placement_aggregates',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
@ -100,17 +112,18 @@ def upgrade():
op.create_index(op.f('ix_placement_aggregates_uuid'),
'placement_aggregates', ['uuid'], unique=False)
op.create_table('projects',
op.create_table(
'projects',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('external_id', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('external_id',
name='uniq_projects0external_id'),
sa.UniqueConstraint('external_id', name='uniq_projects0external_id'),
)
op.create_table('resource_classes',
op.create_table(
'resource_classes',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
@ -119,17 +132,20 @@ def upgrade():
sa.UniqueConstraint('name', name='uniq_resource_classes0name'),
)
op.create_table('resource_provider_aggregates',
op.create_table(
'resource_provider_aggregates',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('resource_provider_id', sa.Integer(), nullable=False),
sa.Column('aggregate_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('resource_provider_id', 'aggregate_id'),
)
op.create_index('resource_provider_aggregates_aggregate_id_idx',
op.create_index(
'resource_provider_aggregates_aggregate_id_idx',
'resource_provider_aggregates', ['aggregate_id'], unique=False)
op.create_table('resource_providers',
op.create_table(
'resource_providers',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
@ -146,16 +162,21 @@ def upgrade():
sa.UniqueConstraint('name', name='uniq_resource_providers0name'),
sa.UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),
)
op.create_index('resource_providers_name_idx', 'resource_providers',
['name'], unique=False)
op.create_index('resource_providers_parent_provider_id_idx',
'resource_providers', ['parent_provider_id'], unique=False)
op.create_index('resource_providers_root_provider_id_idx',
op.create_index(
'resource_providers_name_idx', 'resource_providers', ['name'],
unique=False)
op.create_index(
'resource_providers_parent_provider_id_idx', 'resource_providers',
['parent_provider_id'], unique=False)
op.create_index(
'resource_providers_root_provider_id_idx',
'resource_providers', ['root_provider_id'], unique=False)
op.create_index('resource_providers_uuid_idx', 'resource_providers',
['uuid'], unique=False)
op.create_index(
'resource_providers_uuid_idx', 'resource_providers', ['uuid'],
unique=False)
op.create_table('traits',
op.create_table(
'traits',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
@ -164,7 +185,8 @@ def upgrade():
sa.UniqueConstraint('name', name='uniq_traits0name'),
)
op.create_table('users',
op.create_table(
'users',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
@ -173,7 +195,8 @@ def upgrade():
sa.UniqueConstraint('external_id', name='uniq_users0external_id'),
)
op.create_table('resource_provider_traits',
op.create_table(
'resource_provider_traits',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('trait_id', sa.Integer(), nullable=False),
@ -183,6 +206,7 @@ def upgrade():
sa.ForeignKeyConstraint(['trait_id'], ['traits.id'], ),
sa.PrimaryKeyConstraint('trait_id', 'resource_provider_id'),
)
op.create_index('resource_provider_traits_resource_provider_trait_idx',
op.create_index(
'resource_provider_traits_resource_provider_trait_idx',
'resource_provider_traits', ['resource_provider_id', 'trait_id'],
unique=False)

View File

@ -51,15 +51,13 @@ class ResourceProvider(BASE):
__tablename__ = "resource_providers"
__table_args__ = (
Index('resource_providers_uuid_idx', 'uuid'),
schema.UniqueConstraint('uuid',
name='uniq_resource_providers0uuid'),
schema.UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),
Index('resource_providers_name_idx', 'name'),
Index('resource_providers_root_provider_id_idx',
'root_provider_id'),
Index('resource_providers_parent_provider_id_idx',
'parent_provider_id'),
schema.UniqueConstraint('name',
name='uniq_resource_providers0name')
schema.UniqueConstraint('name', name='uniq_resource_providers0name')
)
id = Column(Integer, primary_key=True, nullable=False)
@ -67,12 +65,12 @@ class ResourceProvider(BASE):
name = Column(Unicode(200), nullable=True)
generation = Column(Integer, default=0)
# Represents the root of the "tree" that the provider belongs to
root_provider_id = Column(Integer, ForeignKey('resource_providers.id'),
nullable=True)
root_provider_id = Column(
Integer, ForeignKey('resource_providers.id'), nullable=True)
# The immediate parent provider of this provider, or NULL if there is no
# parent. If parent_provider_id == NULL then root_provider_id == id
parent_provider_id = Column(Integer, ForeignKey('resource_providers.id'),
nullable=True)
parent_provider_id = Column(
Integer, ForeignKey('resource_providers.id'), nullable=True)
class Inventory(BASE):
@ -86,7 +84,8 @@ class Inventory(BASE):
'resource_class_id'),
Index('inventories_resource_provider_resource_class_idx',
'resource_provider_id', 'resource_class_id'),
schema.UniqueConstraint('resource_provider_id', 'resource_class_id',
schema.UniqueConstraint(
'resource_provider_id', 'resource_class_id',
name='uniq_inventories0resource_provider_resource_class')
)

View File

@ -409,9 +409,9 @@ def _set_allocations_for_consumer(req, schema):
# NOTE(jaypipes): This will only occur 1.28+. The JSONSchema will
# prevent an empty allocations object from being passed when there is
# no consumer generation, so this is safe to do.
data_util.ensure_consumer(context, consumer_uuid,
data.get('project_id'), data.get('user_id'),
data.get('consumer_generation'), want_version)
data_util.ensure_consumer(
context, consumer_uuid, data.get('project_id'),
data.get('user_id'), data.get('consumer_generation'), want_version)
allocations = alloc_obj.get_all_by_consumer_id(context, consumer_uuid)
for allocation in allocations:
allocation.used = 0
@ -448,9 +448,8 @@ def _set_allocations_for_consumer(req, schema):
# capacity limits have been exceeded.
except exception.NotFound as exc:
raise webob.exc.HTTPBadRequest(
_("Unable to allocate inventory for consumer "
"%(consumer_uuid)s: %(error)s") %
{'consumer_uuid': consumer_uuid, 'error': exc})
_("Unable to allocate inventory for consumer %(consumer_uuid)s: "
"%(error)s") % {'consumer_uuid': consumer_uuid, 'error': exc})
except exception.InvalidInventory as exc:
raise webob.exc.HTTPConflict(
_('Unable to allocate inventory: %(error)s') % {'error': exc})
@ -558,8 +557,8 @@ def delete_allocations(req):
# activity. In that case, delete_all() will throw a NotFound exception.
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_("Allocation for consumer with id %(id)s not found."
"error: %(error)s") %
_("Allocation for consumer with id %(id)s not found. error: "
"%(error)s") %
{'id': consumer_uuid, 'error': exc})
else:
raise webob.exc.HTTPNotFound(

View File

@ -398,8 +398,8 @@ def delete_inventories(req):
raise webob.exc.HTTPConflict(
_('Unable to delete inventory for resource provider '
'%(rp_uuid)s because the inventory was updated by '
'another process. Please retry your request.')
% {'rp_uuid': resource_provider.uuid},
'another process. Please retry your request.') %
{'rp_uuid': resource_provider.uuid},
comment=errors.CONCURRENT_UPDATE)
except exception.InventoryInUse as ex:
raise webob.exc.HTTPConflict(ex.format_message(),

View File

@ -70,7 +70,7 @@ def _serialize_providers(environ, resource_providers, want_version):
provider_data = _serialize_provider(environ, provider, want_version)
output.append(provider_data)
last_modified = last_modified or timeutils.utcnow(with_timezone=True)
return ({"resource_providers": output}, last_modified)
return {"resource_providers": output}, last_modified
@wsgi_wrapper.PlacementWsgify
@ -106,8 +106,8 @@ def create_resource_provider(req):
# Whether exc.columns has one or two entries (in the event
# of both fields being duplicates) appears to be database
# dependent, so going with the complete solution here.
duplicate = ', '.join(['%s: %s' % (column, data[column])
for column in exc.columns])
duplicate = ', '.join(
['%s: %s' % (column, data[column]) for column in exc.columns])
raise webob.exc.HTTPConflict(
_('Conflicting resource provider %(duplicate)s already exists.') %
{'duplicate': duplicate},
@ -152,10 +152,10 @@ def delete_resource_provider(req):
_('Unable to delete resource provider %(rp_uuid)s: %(error)s') %
{'rp_uuid': uuid, 'error': exc},
comment=errors.PROVIDER_IN_USE)
except exception.NotFound as exc:
except exception.NotFound:
raise webob.exc.HTTPNotFound(
_("No resource provider with uuid %s found for delete") % uuid)
except exception.CannotDeleteParentResourceProvider as exc:
except exception.CannotDeleteParentResourceProvider:
raise webob.exc.HTTPConflict(
_("Unable to delete parent resource provider %(rp_uuid)s: "
"It has child resource providers.") % {'rp_uuid': uuid},
@ -287,7 +287,7 @@ def update_resource_provider(req):
try:
resource_provider.save()
except db_exc.DBDuplicateEntry as exc:
except db_exc.DBDuplicateEntry:
raise webob.exc.HTTPConflict(
_('Conflicting resource provider %(name)s already exists.') %
{'name': data['name']},

View File

@ -144,8 +144,7 @@ class RequestGroup(object):
for suff, group in by_suffix.items():
forbidden = [trait for trait in group.required_traits
if trait.startswith('!')]
group.required_traits = (
group.required_traits - set(forbidden))
group.required_traits = group.required_traits - set(forbidden)
group.forbidden_traits = set([trait.lstrip('!') for trait in
forbidden])
conflicts = group.forbidden_traits & group.required_traits

View File

@ -134,10 +134,12 @@ def _check_capacity_exceeded(ctx, allocs):
_ALLOC_TBL.c.resource_class_id)
usage = sa.alias(usage, name='usage')
inv_join = sql.join(_RP_TBL, _INV_TBL,
inv_join = sql.join(
_RP_TBL, _INV_TBL,
sql.and_(_RP_TBL.c.id == _INV_TBL.c.resource_provider_id,
_INV_TBL.c.resource_class_id.in_(rc_ids)))
primary_join = sql.outerjoin(inv_join, usage,
primary_join = sql.outerjoin(
inv_join, usage,
sql.and_(
_INV_TBL.c.resource_provider_id == usage.c.resource_provider_id,
_INV_TBL.c.resource_class_id == usage.c.resource_class_id)
@ -176,8 +178,8 @@ def _check_capacity_exceeded(ctx, allocs):
class_str = ', '.join([rc_cache.RC_CACHE.string_from_id(rc_id)
for rc_id in rc_ids])
provider_str = ', '.join(missing_provs)
raise exception.InvalidInventory(resource_class=class_str,
resource_provider=provider_str)
raise exception.InvalidInventory(
resource_class=class_str, resource_provider=provider_str)
res_providers = {}
rp_resource_class_sum = collections.defaultdict(

View File

@ -216,7 +216,8 @@ def _delete_inventory_from_provider(ctx, rp, to_delete):
delete.
"""
allocation_query = sa.select(
[_ALLOC_TBL.c.resource_class_id.label('resource_class')]).where(
[_ALLOC_TBL.c.resource_class_id.label('resource_class')]
).where(
sa.and_(_ALLOC_TBL.c.resource_provider_id == rp.id,
_ALLOC_TBL.c.resource_class_id.in_(to_delete))
).group_by(_ALLOC_TBL.c.resource_class_id)
@ -228,7 +229,8 @@ def _delete_inventory_from_provider(ctx, rp, to_delete):
raise exception.InventoryInUse(resource_classes=resource_classes,
resource_provider=rp.uuid)
del_stmt = _INV_TBL.delete().where(sa.and_(
del_stmt = _INV_TBL.delete().where(
sa.and_(
_INV_TBL.c.resource_provider_id == rp.id,
_INV_TBL.c.resource_class_id.in_(to_delete)))
res = ctx.session.execute(del_stmt)
@ -282,13 +284,15 @@ def _update_inventory_for_provider(ctx, rp, inv_list, to_update):
_ALLOC_TBL.c.resource_provider_id == rp.id,
_ALLOC_TBL.c.resource_class_id == rc_id))
allocations = ctx.session.execute(allocation_query).first()
if (allocations
and allocations['usage'] is not None
and allocations['usage'] > inv_record.capacity):
if (allocations and
allocations['usage'] is not None and
allocations['usage'] > inv_record.capacity):
exceeded.append((rp.uuid, rc_str))
upd_stmt = _INV_TBL.update().where(sa.and_(
upd_stmt = _INV_TBL.update().where(
sa.and_(
_INV_TBL.c.resource_provider_id == rp.id,
_INV_TBL.c.resource_class_id == rc_id)).values(
_INV_TBL.c.resource_class_id == rc_id)
).values(
total=inv_record.total,
reserved=inv_record.reserved,
min_unit=inv_record.min_unit,
@ -413,7 +417,8 @@ def _get_provider_by_uuid(context, uuid):
# TODO(jaypipes): Change this to an inner join when we are sure all
# root_provider_id values are NOT NULL
rp_to_root = sa.outerjoin(rpt, root, rpt.c.root_provider_id == root.c.id)
rp_to_parent = sa.outerjoin(rp_to_root, parent,
rp_to_parent = sa.outerjoin(
rp_to_root, parent,
rpt.c.parent_provider_id == parent.c.id)
cols = [
rpt.c.id,
@ -597,7 +602,8 @@ def _set_aggregates(context, resource_provider, provided_aggregates,
pass
for agg_id, agg_uuid in aggs_to_disassociate.items():
del_stmt = _RP_AGG_TBL.delete().where(sa.and_(
del_stmt = _RP_AGG_TBL.delete().where(
sa.and_(
_RP_AGG_TBL.c.resource_provider_id == rp_id,
_RP_AGG_TBL.c.aggregate_id == agg_id))
context.session.execute(del_stmt)
@ -786,7 +792,8 @@ def _provider_ids_from_rp_ids(context, rp_ids):
# TODO(jaypipes): Change this to an inner join when we are sure all
# root_provider_id values are NOT NULL
me_to_root = sa.outerjoin(me, root, me.c.root_provider_id == root.c.id)
me_to_parent = sa.outerjoin(me_to_root, parent,
me_to_parent = sa.outerjoin(
me_to_root, parent,
me.c.parent_provider_id == parent.c.id)
sel = sa.select(cols).select_from(me_to_parent)
sel = sel.where(me.c.id.in_(rp_ids))
@ -837,7 +844,8 @@ def _provider_ids_from_uuid(context, uuid):
# TODO(jaypipes): Change this to an inner join when we are sure all
# root_provider_id values are NOT NULL
me_to_root = sa.outerjoin(me, root, me.c.root_provider_id == root.c.id)
me_to_parent = sa.outerjoin(me_to_root, parent,
me_to_parent = sa.outerjoin(
me_to_root, parent,
me.c.parent_provider_id == parent.c.id)
sel = sa.select(cols).select_from(me_to_parent)
sel = sel.where(me.c.uuid == uuid)
@ -1095,9 +1103,9 @@ class ResourceProvider(object):
if parent_uuid == self.uuid:
raise exception.ObjectActionError(
action='create',
reason=_('parent provider UUID cannot be same as '
'UUID. Please set parent provider UUID to '
'None if there is no parent.'))
reason=_('parent provider UUID cannot be same as UUID. '
'Please set parent provider UUID to None if '
'there is no parent.'))
parent_ids = _provider_ids_from_uuid(context, parent_uuid)
if parent_ids is None:
@ -1140,9 +1148,8 @@ class ResourceProvider(object):
raise exception.CannotDeleteParentResourceProvider()
# Don't delete the resource provider if it has allocations.
rp_allocations = context.session.query(models.Allocation).\
filter(models.Allocation.resource_provider_id == _id).\
count()
rp_allocations = context.session.query(models.Allocation).filter(
models.Allocation.resource_provider_id == _id).count()
if rp_allocations:
raise exception.ResourceProviderInUse()
# Delete any inventory associated with the resource provider
@ -1152,12 +1159,12 @@ class ResourceProvider(object):
# Delete any aggregate associations for the resource provider
# The name substitution on the next line is needed to satisfy pep8
RPA_model = models.ResourceProviderAggregate
context.session.query(RPA_model).\
filter(RPA_model.resource_provider_id == _id).delete()
context.session.query(RPA_model).filter(
RPA_model.resource_provider_id == _id).delete()
# delete any trait associations for the resource provider
RPT_model = models.ResourceProviderTrait
context.session.query(RPT_model).\
filter(RPT_model.resource_provider_id == _id).delete()
context.session.query(RPT_model).filter(
RPT_model.resource_provider_id == _id).delete()
# set root_provider_id to null to make deletion possible
context.session.query(models.ResourceProvider).\
filter(models.ResourceProvider.id == _id,
@ -1203,8 +1210,8 @@ class ResourceProvider(object):
my_ids.parent_id != parent_ids.id):
raise exception.ObjectActionError(
action='update',
reason=_('re-parenting a provider is not '
'currently allowed.'))
reason=_('re-parenting a provider is not currently '
'allowed.'))
if my_ids.parent_uuid is None:
# So the user specifies a parent for an RP that doesn't
# have one. We have to check that by this new parent we
@ -1230,8 +1237,8 @@ class ResourceProvider(object):
if my_ids.parent_id is not None:
raise exception.ObjectActionError(
action='update',
reason=_('un-parenting a provider is not '
'currently allowed.'))
reason=_('un-parenting a provider is not currently '
'allowed.'))
db_rp = context.session.query(models.ResourceProvider).filter_by(
id=id).first()
@ -1462,9 +1469,11 @@ class ResourceProviderList(common_obj.ObjectList):
# TODO(jaypipes): Convert this to an inner join once all
# root_provider_id values are NOT NULL
rp_to_root = sa.outerjoin(rp, root_rp,
rp_to_root = sa.outerjoin(
rp, root_rp,
rp.c.root_provider_id == root_rp.c.id)
rp_to_parent = sa.outerjoin(rp_to_root, parent_rp,
rp_to_parent = sa.outerjoin(
rp_to_root, parent_rp,
rp.c.parent_provider_id == parent_rp.c.id)
query = sa.select(cols).select_from(rp_to_parent)
@ -1490,7 +1499,8 @@ class ResourceProviderList(common_obj.ObjectList):
# TODO(jaypipes): Remove this OR condition when root_provider_id
# is not nullable in the database and all resource provider records
# have populated the root provider ID.
where_cond = sa.or_(rp.c.id == root_id,
where_cond = sa.or_(
rp.c.id == root_id,
rp.c.root_provider_id == root_id)
query = query.where(where_cond)
@ -1549,13 +1559,15 @@ class ResourceProviderList(common_obj.ObjectList):
# comes from the above filters
# First JOIN between inventories and RPs is here
inv_join = sa.join(rp_to_parent, _INV_TBL,
inv_join = sa.join(
rp_to_parent,
_INV_TBL,
rp.c.id == _INV_TBL.c.resource_provider_id)
# Now, below is the LEFT JOIN for getting the allocations usage
usage = _usage_select(list(resources))
usage_join = sa.outerjoin(inv_join, usage,
sa.and_(
usage_join = sa.outerjoin(
inv_join, usage, sa.and_(
usage.c.resource_provider_id == (
_INV_TBL.c.resource_provider_id),
usage.c.resource_class_id == _INV_TBL.c.resource_class_id))
@ -1986,11 +1998,13 @@ class TraitList(common_obj.ObjectList):
models.Trait.name.like(six.text_type(filters['prefix'] + '%')))
if 'associated' in filters:
if filters['associated']:
query = query.join(models.ResourceProviderTrait,
query = query.join(
models.ResourceProviderTrait,
models.Trait.id == models.ResourceProviderTrait.trait_id
).distinct()
else:
query = query.outerjoin(models.ResourceProviderTrait,
query = query.outerjoin(
models.ResourceProviderTrait,
models.Trait.id == models.ResourceProviderTrait.trait_id
).filter(models.ResourceProviderTrait.trait_id == null())
@ -2019,9 +2033,9 @@ class AllocationRequestResource(object):
self.amount = amount
def __eq__(self, other):
return (self.resource_provider.id == other.resource_provider.id) and (
self.resource_class == other.resource_class) and (
self.amount == other.amount)
return ((self.resource_provider.id == other.resource_provider.id) and
(self.resource_class == other.resource_class) and
(self.amount == other.amount))
def __hash__(self):
return hash((self.resource_provider.id,
@ -2639,7 +2653,8 @@ def _get_trees_matching_all(ctx, resources, required_traits, forbidden_traits,
rc_provs_with_inv = _anchors_for_sharing_providers(
ctx, sharing_providers, get_id=True)
provs_with_inv_rc.add_rps(rc_provs_with_inv, rc_id)
LOG.debug("considering %d sharing providers with %d %s, "
LOG.debug(
"considering %d sharing providers with %d %s, "
"now we've got %d provider trees",
len(sharing_providers), amount, rc_name,
len(provs_with_inv_rc.trees))
@ -2649,7 +2664,8 @@ def _get_trees_matching_all(ctx, resources, required_traits, forbidden_traits,
# for this resource class. Here "tree" includes sharing providers
# in its terminology
provs_with_inv.merge_common_trees(provs_with_inv_rc)
LOG.debug("found %d providers under %d trees after filtering by "
LOG.debug(
"found %d providers under %d trees after filtering by "
"previous result",
len(provs_with_inv.rps), len(provs_with_inv_rc.trees))
if not provs_with_inv:
@ -2915,8 +2931,9 @@ def _alloc_candidates_single_provider(ctx, requested_resources, rp_tuples):
return alloc_requests, list(summaries.values())
def _alloc_candidates_multiple_providers(ctx, requested_resources,
required_traits, forbidden_traits, rp_candidates):
def _alloc_candidates_multiple_providers(
ctx, requested_resources, required_traits, forbidden_traits,
rp_candidates):
"""Returns a tuple of (allocation requests, provider summaries) for a
supplied set of requested resource amounts and tuples of
(rp_id, root_id, rc_id). The supplied resource provider trees have
@ -3004,8 +3021,9 @@ def _alloc_candidates_multiple_providers(ctx, requested_resources,
# (ARR(rc1, ss2), ARR(rc2, ss1), ARR(rc3, ss1)),
# (ARR(rc1, ss2), ARR(rc2, ss2), ARR(rc3, ss1))]
for res_requests in itertools.product(*request_groups):
if not _check_traits_for_alloc_request(res_requests,
summaries, prov_traits, required_traits, forbidden_traits):
if not _check_traits_for_alloc_request(
res_requests, summaries, prov_traits, required_traits,
forbidden_traits):
# This combination doesn't satisfy trait constraints
continue
root_alloc_reqs.add(
@ -3432,21 +3450,21 @@ class AllocationCandidates(object):
context, required_trait_map)
if not trait_rps:
return [], []
rp_candidates = _get_trees_matching_all(context, resources,
required_trait_map, forbidden_trait_map,
rp_candidates = _get_trees_matching_all(
context, resources, required_trait_map, forbidden_trait_map,
sharing_providers, member_of, tree_root_id)
return _alloc_candidates_multiple_providers(context, resources,
required_trait_map, forbidden_trait_map, rp_candidates)
return _alloc_candidates_multiple_providers(
context, resources, required_trait_map, forbidden_trait_map,
rp_candidates)
# Either we are processing a single-RP request group, or there are no
# sharing providers that (help) satisfy the request. Get a list of
# tuples of (internal provider ID, root provider ID) that have ALL
# the requested resources and more efficiently construct the
# allocation requests.
rp_tuples = _get_provider_ids_matching(context, resources,
required_trait_map,
forbidden_trait_map, member_of,
tree_root_id)
rp_tuples = _get_provider_ids_matching(
context, resources, required_trait_map, forbidden_trait_map,
member_of, tree_root_id)
return _alloc_candidates_single_provider(context, resources, rp_tuples)
@classmethod

View File

@ -48,8 +48,8 @@ class RequestLog(object):
@staticmethod
def _get_uri(environ):
req_uri = (environ.get('SCRIPT_NAME', '')
+ environ.get('PATH_INFO', ''))
req_uri = (environ.get('SCRIPT_NAME', '') +
environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
req_uri += '?' + environ['QUERY_STRING']
return req_uri

View File

@ -149,9 +149,9 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
empty_root_id = None
# Run it!
res = rp_obj._get_provider_ids_matching(self.ctx, resources,
empty_req_traits, empty_forbidden_traits, empty_agg,
empty_root_id)
res = rp_obj._get_provider_ids_matching(
self.ctx, resources, empty_req_traits, empty_forbidden_traits,
empty_agg, empty_root_id)
# We should get all the incl_* RPs
expected = [incl_biginv_noalloc, incl_extra_full]
@ -165,39 +165,44 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
# _get_provider_ids_matching()'s required_traits and forbidden_traits
# arguments maps, keyed by trait name, of the trait internal ID
req_traits = {os_traits.HW_CPU_X86_AVX2: avx2_t.id}
res = rp_obj._get_provider_ids_matching(self.ctx, resources,
req_traits, empty_forbidden_traits, empty_agg, empty_root_id)
res = rp_obj._get_provider_ids_matching(
self.ctx, resources, req_traits, empty_forbidden_traits, empty_agg,
empty_root_id)
self.assertEqual([], res)
# Next let's set the required trait to an excl_* RPs.
# This should result in no results returned as well.
excl_big_md_noalloc.set_traits([avx2_t])
res = rp_obj._get_provider_ids_matching(self.ctx, resources,
req_traits, empty_forbidden_traits, empty_agg, empty_root_id)
res = rp_obj._get_provider_ids_matching(
self.ctx, resources, req_traits, empty_forbidden_traits, empty_agg,
empty_root_id)
self.assertEqual([], res)
# OK, now add the trait to one of the incl_* providers and verify that
# provider now shows up in our results
incl_biginv_noalloc.set_traits([avx2_t])
res = rp_obj._get_provider_ids_matching(self.ctx, resources,
req_traits, empty_forbidden_traits, empty_agg, empty_root_id)
res = rp_obj._get_provider_ids_matching(
self.ctx, resources, req_traits, empty_forbidden_traits, empty_agg,
empty_root_id)
rp_ids = [r[0] for r in res]
self.assertEqual([incl_biginv_noalloc.id], rp_ids)
# Let's see if the tree_root_id filter works
root_id = incl_biginv_noalloc.id
res = rp_obj._get_provider_ids_matching(self.ctx, resources,
empty_req_traits, empty_forbidden_traits, empty_agg, root_id)
res = rp_obj._get_provider_ids_matching(
self.ctx, resources, empty_req_traits, empty_forbidden_traits,
empty_agg, root_id)
rp_ids = [r[0] for r in res]
self.assertEqual([incl_biginv_noalloc.id], rp_ids)
# We don't get anything if the specified tree doesn't satisfy the
# requirements in the first place
root_id = excl_allused.id
res = rp_obj._get_provider_ids_matching(self.ctx, resources,
empty_req_traits, empty_forbidden_traits, empty_agg, root_id)
res = rp_obj._get_provider_ids_matching(
self.ctx, resources, empty_req_traits, empty_forbidden_traits,
empty_agg, root_id)
self.assertEqual([], res)
def test_get_provider_ids_matching_with_multiple_forbidden(self):
@ -219,8 +224,9 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
member_of = [[uuids.agg1]]
empty_root_id = None
res = rp_obj._get_provider_ids_matching(self.ctx, resources,
empty_req_traits, forbidden_traits, member_of, empty_root_id)
res = rp_obj._get_provider_ids_matching(
self.ctx, resources, empty_req_traits, forbidden_traits, member_of,
empty_root_id)
self.assertEqual({(rp1.id, rp1.id)}, set(res))
def test_get_provider_ids_having_all_traits(self):
@ -1089,8 +1095,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
for cn in (cn1, cn2, cn3):
tb.set_traits(cn, os_traits.HW_CPU_X86_AVX2)
alloc_cands = self._get_allocation_candidates(requests={'':
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
requests={'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
@ -1137,16 +1143,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
tb.set_traits(cn3, os_traits.HW_CPU_X86_AVX2,
os_traits.STORAGE_DISK_SSD)
alloc_cands = self._get_allocation_candidates(
{'':
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates({
'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([
os_traits.HW_CPU_X86_AVX2, os_traits.STORAGE_DISK_SSD
]),
)}
)
})
# There should be only cn3 in the returned allocation candidates
expected = [
@ -2213,8 +2218,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# function with the required trait no longer has any inventory.
self.allocate_from_provider(pf1, orc.SRIOV_NET_VF, 8)
alloc_cands = self._get_allocation_candidates(
{'':
alloc_cands = self._get_allocation_candidates({
'':
placement_lib.RequestGroup(
use_same_provider=False,
resources={
@ -2223,8 +2228,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
orc.SRIOV_NET_VF: 1,
},
required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE],
)}
)
})
self._validate_allocation_requests([], alloc_cands)
self._validate_provider_summary_resources({}, alloc_cands)
@ -2263,8 +2268,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# Before we even set up any providers, verify that the short-circuits
# work to return empty lists
trees = rp_obj._get_trees_matching_all(self.ctx, resources, req_traits,
forbidden_traits, sharing, member_of, tree_root_id)
trees = rp_obj._get_trees_matching_all(
self.ctx, resources, req_traits, forbidden_traits, sharing,
member_of, tree_root_id)
self.assertEqual([], trees)
# We are setting up 3 trees of providers that look like this:
@ -2307,8 +2313,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# has inventory we will use...
tb.set_traits(cn, os_traits.HW_NIC_OFFLOAD_GENEVE)
trees = rp_obj._get_trees_matching_all(self.ctx, resources, req_traits,
forbidden_traits, sharing, member_of, tree_root_id)
trees = rp_obj._get_trees_matching_all(
self.ctx, resources, req_traits, forbidden_traits, sharing,
member_of, tree_root_id)
# trees is an instance of `RPCandidateList`.
# extract root provider ids from here.
tree_root_ids = trees.trees
@ -2325,8 +2332,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# Let's see if the tree_root_id filter works
tree_root_id = self.get_provider_id_by_name('cn1')
trees = rp_obj._get_trees_matching_all(self.ctx, resources, req_traits,
forbidden_traits, sharing, member_of, tree_root_id)
trees = rp_obj._get_trees_matching_all(
self.ctx, resources, req_traits, forbidden_traits, sharing,
member_of, tree_root_id)
tree_root_ids = trees.trees
self.assertEqual(1, len(tree_root_ids))
@ -2349,8 +2357,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
uuids.cn2_numa1_pf1)
self.allocate_from_provider(cn2_pf1, orc.SRIOV_NET_VF, 8)
trees = rp_obj._get_trees_matching_all(self.ctx, resources, req_traits,
forbidden_traits, sharing, member_of, tree_root_id)
trees = rp_obj._get_trees_matching_all(
self.ctx, resources, req_traits, forbidden_traits, sharing,
member_of, tree_root_id)
tree_root_ids = trees.trees
self.assertEqual(2, len(tree_root_ids))
@ -2378,8 +2387,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
req_traits = {
geneve_t.name: geneve_t.id,
}
trees = rp_obj._get_trees_matching_all(self.ctx, resources, req_traits,
forbidden_traits, sharing, member_of, tree_root_id)
trees = rp_obj._get_trees_matching_all(
self.ctx, resources, req_traits, forbidden_traits, sharing,
member_of, tree_root_id)
tree_root_ids = trees.trees
self.assertEqual(1, len(tree_root_ids))
@ -2409,8 +2419,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
geneve_t.name: geneve_t.id,
avx2_t.name: avx2_t.id,
}
trees = rp_obj._get_trees_matching_all(self.ctx, resources, req_traits,
forbidden_traits, sharing, member_of, tree_root_id)
trees = rp_obj._get_trees_matching_all(
self.ctx, resources, req_traits, forbidden_traits, sharing,
member_of, tree_root_id)
tree_root_ids = trees.trees
self.assertEqual(0, len(tree_root_ids))
@ -2422,8 +2433,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
forbidden_traits = {
avx2_t.name: avx2_t.id,
}
trees = rp_obj._get_trees_matching_all(self.ctx, resources, req_traits,
forbidden_traits, sharing, member_of, tree_root_id)
trees = rp_obj._get_trees_matching_all(
self.ctx, resources, req_traits, forbidden_traits, sharing,
member_of, tree_root_id)
tree_root_ids = trees.trees
self.assertEqual(1, len(tree_root_ids))
@ -2461,8 +2473,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
uuids.cn3_numa1_pf1)
self.allocate_from_provider(cn3_pf1, orc.SRIOV_NET_VF, 8)
trees = rp_obj._get_trees_matching_all(self.ctx, resources, req_traits,
forbidden_traits, sharing, member_of, tree_root_id)
trees = rp_obj._get_trees_matching_all(
self.ctx, resources, req_traits, forbidden_traits, sharing,
member_of, tree_root_id)
self.assertEqual([], trees)
def test_simple_tree_with_shared_provider(self):
@ -2528,16 +2541,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
tb.set_traits(ss1, 'MISC_SHARES_VIA_AGGREGATE')
tb.set_traits(ss2, 'MISC_SHARES_VIA_AGGREGATE')
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates({
'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
orc.VCPU: 2,
orc.SRIOV_NET_VF: 1,
orc.DISK_GB: 1500,
})
}
)
})
# cn2 is not in the allocation candidates because it doesn't have
# enough DISK_GB resource with shared providers.
@ -2572,8 +2584,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# Now add required traits to the mix and verify we still get the
# inventory of SRIOV_NET_VF.
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates({
'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
orc.VCPU: 2,
@ -2581,8 +2593,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
orc.DISK_GB: 1500,
},
required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE])
}
)
})
# cn1_numa0_pf0 is not in the allocation candidates because it
# doesn't have the required trait.

View File

@ -146,8 +146,8 @@ class PlacementDbBaseTestCase(base.TestCase):
consumer_id = alloc_dict.pop('consumer_id')
consumer = ensure_consumer(
self.ctx, self.user_obj, self.project_obj, consumer_id)
alloc = alloc_obj.Allocation(resource_provider=rp,
consumer=consumer, **alloc_dict)
alloc = alloc_obj.Allocation(
resource_provider=rp, consumer=consumer, **alloc_dict)
alloc_obj.replace_all(self.ctx, [alloc])
return rp, alloc

View File

@ -33,7 +33,8 @@ ALLOC_TBL = rp_obj._ALLOC_TBL
class ConsumerTestCase(tb.PlacementDbBaseTestCase):
def test_non_existing_consumer(self):
self.assertRaises(exception.ConsumerNotFound,
self.assertRaises(
exception.ConsumerNotFound,
consumer_obj.Consumer.get_by_uuid, self.ctx,
uuids.non_existing_consumer)

View File

@ -339,9 +339,8 @@ class ReshapeTestCase(tb.PlacementDbBaseTestCase):
# Reduce the amount of storage to 2000, from 100000.
new_ss_inv = rp_obj.InventoryList(objects=[
rp_obj.Inventory(
resource_provider=ss_threadB,
resource_class='DISK_GB', total=2000, reserved=0,
max_unit=1000, min_unit=1, step_size=1,
resource_provider=ss_threadB, resource_class='DISK_GB',
total=2000, reserved=0, max_unit=1000, min_unit=1, step_size=1,
allocation_ratio=1.0)])
ss_threadB.set_inventory(new_ss_inv)
# Double check our storage provider's generation is now greater than

View File

@ -40,8 +40,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
self.ctx, [])
def test_create_resource_provider_requires_uuid(self):
resource_provider = rp_obj.ResourceProvider(
context = self.ctx)
resource_provider = rp_obj.ResourceProvider(context=self.ctx)
self.assertRaises(exception.ObjectActionError,
resource_provider.create)
@ -855,14 +854,12 @@ class ResourceProviderListTestCase(tb.PlacementDbBaseTestCase):
# given it has enough disk but we also need to make sure that the
# first RP is not acceptable because of the VCPU request
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, {'resources': {orc.VCPU: 2,
orc.DISK_GB: 1022}})
self.ctx, {'resources': {orc.VCPU: 2, orc.DISK_GB: 1022}})
self.assertEqual(1, len(resource_providers))
# Now, we are asking for both disk and VCPU resources that all the RPs
# can't accept (as the 2nd RP is having a reserved size)
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, {'resources': {orc.VCPU: 2,
orc.DISK_GB: 1024}})
self.ctx, {'resources': {orc.VCPU: 2, orc.DISK_GB: 1024}})
self.assertEqual(0, len(resource_providers))
# We also want to verify that asking for a specific RP can also be
@ -914,25 +911,25 @@ class ResourceProviderListTestCase(tb.PlacementDbBaseTestCase):
self.assertNotIn('rp_name_4', names)
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, filters={'member_of':
[[uuidsentinel.agg_a, uuidsentinel.agg_b]]})
self.ctx,
filters={'member_of': [[uuidsentinel.agg_a, uuidsentinel.agg_b]]})
self.assertEqual(2, len(resource_providers))
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, filters={'member_of':
[[uuidsentinel.agg_a, uuidsentinel.agg_b]],
self.ctx,
filters={'member_of': [[uuidsentinel.agg_a, uuidsentinel.agg_b]],
'name': u'rp_name_1'})
self.assertEqual(1, len(resource_providers))
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, filters={'member_of':
[[uuidsentinel.agg_a, uuidsentinel.agg_b]],
self.ctx,
filters={'member_of': [[uuidsentinel.agg_a, uuidsentinel.agg_b]],
'name': u'barnabas'})
self.assertEqual(0, len(resource_providers))
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, filters={'member_of':
[[uuidsentinel.agg_1, uuidsentinel.agg_2]]})
self.ctx,
filters={'member_of': [[uuidsentinel.agg_1, uuidsentinel.agg_2]]})
self.assertEqual(0, len(resource_providers))
def test_get_all_by_required(self):
@ -1132,8 +1129,9 @@ class TestResourceProviderAggregates(tb.PlacementDbBaseTestCase):
[(s5.id, rp.id) for rp in (r1, r2, s1, s5)]
)
self.assertItemsEqual(
expected, rp_obj._anchors_for_sharing_providers(self.ctx,
[s1.id, s2.id, s3.id, s4.id, s5.id], get_id=True))
expected,
rp_obj._anchors_for_sharing_providers(
self.ctx, [s1.id, s2.id, s3.id, s4.id, s5.id], get_id=True))
class TestAllocation(tb.PlacementDbBaseTestCase):
@ -1600,7 +1598,8 @@ class ResourceProviderTraitTestCase(tb.PlacementDbBaseTestCase):
self.assertEqual(t.name, 'CUSTOM_TRAIT_A')
def test_trait_get_non_existed_trait(self):
self.assertRaises(exception.TraitNotFound,
self.assertRaises(
exception.TraitNotFound,
rp_obj.Trait.get_by_name, self.ctx, 'CUSTOM_TRAIT_A')
def test_bug_1760322(self):
@ -1652,12 +1651,14 @@ class ResourceProviderTraitTestCase(tb.PlacementDbBaseTestCase):
t.name = name
t.create()
traits = rp_obj.TraitList.get_all(self.ctx,
traits = rp_obj.TraitList.get_all(
self.ctx,
filters={'name_in': ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B']})
self._assert_traits(['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B'], traits)
def test_traits_get_all_with_non_existed_name(self):
traits = rp_obj.TraitList.get_all(self.ctx,
traits = rp_obj.TraitList.get_all(
self.ctx,
filters={'name_in': ['CUSTOM_TRAIT_X', 'CUSTOM_TRAIT_Y']})
self.assertEqual(0, len(traits))
@ -1668,15 +1669,15 @@ class ResourceProviderTraitTestCase(tb.PlacementDbBaseTestCase):
t.name = name
t.create()
traits = rp_obj.TraitList.get_all(self.ctx,
filters={'prefix': 'CUSTOM'})
traits = rp_obj.TraitList.get_all(
self.ctx, filters={'prefix': 'CUSTOM'})
self._assert_traits(
['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B', 'CUSTOM_TRAIT_C'],
traits)
def test_traits_get_all_with_non_existed_prefix(self):
traits = rp_obj.TraitList.get_all(self.ctx,
filters={"prefix": "NOT_EXISTED"})
traits = rp_obj.TraitList.get_all(
self.ctx, filters={"prefix": "NOT_EXISTED"})
self.assertEqual(0, len(traits))
def test_set_traits_for_resource_provider(self):
@ -1693,8 +1694,8 @@ class ResourceProviderTraitTestCase(tb.PlacementDbBaseTestCase):
generation = rp.generation
trait_names.remove('CUSTOM_TRAIT_A')
updated_traits = rp_obj.TraitList.get_all(self.ctx,
filters={'name_in': trait_names})
updated_traits = rp_obj.TraitList.get_all(
self.ctx, filters={'name_in': trait_names})
self._assert_traits(trait_names, updated_traits)
tb.set_traits(rp, *trait_names)
rp_traits = rp_obj.TraitList.get_all_by_resource_provider(self.ctx, rp)
@ -1748,13 +1749,14 @@ class ResourceProviderTraitTestCase(tb.PlacementDbBaseTestCase):
t.name = name
t.create()
associated_traits = rp_obj.TraitList.get_all(self.ctx,
associated_traits = rp_obj.TraitList.get_all(
self.ctx,
filters={'name_in': ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B']})
rp1.set_traits(associated_traits)
rp2.set_traits(associated_traits)
self._assert_traits(['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B'],
rp_obj.TraitList.get_all(self.ctx,
filters={'associated': True}))
self._assert_traits(
['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B'],
rp_obj.TraitList.get_all(self.ctx, filters={'associated': True}))
def test_traits_get_all_with_associated_false(self):
rp1 = self._create_provider('fake_resource_provider1')
@ -1765,13 +1767,14 @@ class ResourceProviderTraitTestCase(tb.PlacementDbBaseTestCase):
t.name = name
t.create()
associated_traits = rp_obj.TraitList.get_all(self.ctx,
associated_traits = rp_obj.TraitList.get_all(
self.ctx,
filters={'name_in': ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B']})
rp1.set_traits(associated_traits)
rp2.set_traits(associated_traits)
self._assert_traits_in(['CUSTOM_TRAIT_C'],
rp_obj.TraitList.get_all(self.ctx,
filters={'associated': False}))
self._assert_traits_in(
['CUSTOM_TRAIT_C'],
rp_obj.TraitList.get_all(self.ctx, filters={'associated': False}))
class SharedProviderTestCase(tb.PlacementDbBaseTestCase):

View File

@ -52,10 +52,9 @@ class TestCommandParsers(testtools.TestCase):
('db_sync', ['db', 'sync']),
('db_stamp', ['db', 'stamp', 'b4ed3a175331']),
('db_online_data_migrations',
['db', 'online_data_migrations']),
]:
with mock.patch('placement.cmd.manage.DbCommands.'
+ command) as mock_command:
['db', 'online_data_migrations'])]:
with mock.patch('placement.cmd.manage.DbCommands.' +
command) as mock_command:
self.conf(args, default_config_files=[])
self.conf.command.func()
mock_command.assert_called_once_with()

View File

@ -72,16 +72,16 @@ class TestAllocationListNoDB(base.TestCase):
@mock.patch('placement.objects.allocation.'
'_get_allocations_by_provider_id',
return_value=[_ALLOCATION_DB])
def test_get_all_by_resource_provider(self, mock_get_allocations_from_db,
mock_create_consumers):
def test_get_all_by_resource_provider(
self, mock_get_allocations_from_db, mock_create_consumers):
rp = rp_obj.ResourceProvider(self.context,
id=_RESOURCE_PROVIDER_ID,
uuid=uuids.resource_provider)
allocations = alloc_obj.get_all_by_resource_provider(self.context, rp)
self.assertEqual(1, len(allocations))
mock_get_allocations_from_db.assert_called_once_with(self.context,
rp.id)
mock_get_allocations_from_db.assert_called_once_with(
self.context, rp.id)
self.assertEqual(_ALLOCATION_DB['used'], allocations[0].used)
self.assertEqual(_ALLOCATION_DB['created_at'],
allocations[0].created_at)

View File

@ -195,7 +195,8 @@ class TestInventoryList(base.TestCase):
def test_find(self):
rp = resource_provider.ResourceProvider(
self.context, uuid=uuids.rp_uuid)
inv_list = resource_provider.InventoryList(objects=[
inv_list = resource_provider.InventoryList(
objects=[
resource_provider.Inventory(
resource_provider=rp,
resource_class=orc.VCPU,

View File

@ -39,7 +39,8 @@ class DeployTest(testtools.TestCase):
auth_token_opts = auth_token.AUTH_TOKEN_OPTS[0][1]
conf_fixture.register_opts(auth_token_opts, group='keystone_authtoken')
www_authenticate_uri = 'http://example.com/identity'
conf_fixture.config(www_authenticate_uri=www_authenticate_uri,
conf_fixture.config(
www_authenticate_uri=www_authenticate_uri,
group='keystone_authtoken')
# ensure that the auth_token middleware is chosen
conf_fixture.config(auth_strategy='keystone', group='api')

View File

@ -62,5 +62,4 @@ class TestFaultWrapper(testtools.TestCase):
def test_fault_log(self, mocked_log):
self.fail_app(self.environ, self.start_response_mock)
mocked_log.exception.assert_called_once_with(
'Placement API unexpected error: %s',
mock.ANY)
'Placement API unexpected error: %s', mock.ANY)

View File

@ -147,7 +147,8 @@ class MicroversionSequentialTest(testtools.TestCase):
previous_min_version = microversion_parse.parse_version_string(
'%s.%s' % (previous_min_version.major,
previous_min_version.minor - 1))
self.assertEqual(previous_min_version, method[1],
self.assertEqual(
previous_min_version, method[1],
"The microversions aren't sequential in the mehtod %s" %
method_name)
previous_min_version = method[0]

View File

@ -762,8 +762,8 @@ class TestParseQsRequestGroups(testtools.TestCase):
'Conflicting required and forbidden traits found '
'in the following traits keys: required: (CUSTOM_PHYSNET1)')
exc = self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs,
version=(1, 22))
exc = self.assertRaises(
webob.exc.HTTPBadRequest, self.do_parse, qs, version=(1, 22))
self.assertEqual(expected_message, six.text_type(exc))
def test_forbidden_two_groups(self):

17
tox.ini
View File

@ -146,21 +146,10 @@ envdir = {toxworkdir}/shared
commands = bandit -r placement -x tests -n 5 -ll
[flake8]
# E125 is deliberately excluded. See
# https://github.com/jcrocholl/pep8/issues/126. It's just wrong.
#
# Most of the whitespace related rules (E12* and E131) are excluded
# because while they are often useful guidelines, strict adherence to
# them ends up causing some really odd code formatting and forced
# extra line breaks. Updating code to enforce these will be a hard sell.
#
# H405 is another one that is good as a guideline, but sometimes
# multiline doc strings just don't have a natural summary
# line. Rejecting code for this reason is wrong.
#
# E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301
enable-extensions = H106,H203,H904
ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405
# H405 is a good guideline, but sometimes multiline doc strings just don't have
# a natural summary line. Rejecting code for this reason is wrong.
ignore = H405
exclude = .venv,.git,.tox,dist,*lib/python*,*egg,build,releasenotes
# To get a list of functions that have a complexity of 15 or more, set
# max-complexity to 15 and run 'tox -epep8'.