Use external placement in functional tests

Adjust the fixtures used by the functional tests so they
use placement database and web fixtures defined by placement
code. To avoid making redundant changes, the solely placement-
related unit and functional tests are removed, but the placement
code itself is not (yet).

openstack-placement is required by the functional tests. It is not
added to test-requirements as we do not want unit tests to depend
on placement in any way, and we enforce this by not having placement
in the test env.

The concept of tox-siblings is used to ensure that the
placement requirement will be satisfied correctly if there is a
depends-on. To make this happen, the functional jobs defined in
.zuul.yaml are updated to require openstack/placement.

tox.ini has to be updated to use a envdir that is the same
name as job. Otherwise the tox siblings role in ansible cannot work.

The handling of the placement fixtures is moved out of nova/test.py
into the functional tests that actually use it because we do not
want unit tests (which get the base test class out of test.py) to
have anything to do with placement. This requires adjusting some
test files to use absolute import.

Similarly, a test of the comparison function for the api samples tests
is moved into functional, because it depends on placement functionality,

TestUpgradeCheckResourceProviders in unit.cmd.test_status is moved into
a new test file: nova/tests/functional/test_nova_status.py. This is done
because it requires the PlacementFixture, which is only available to
functional tests. A MonkeyPatch is required in the test to make sure that
the right context managers are used at the right time in the command
itself (otherwise some tables do no exist). In the test itself, to avoid
speaking directly to the placement database, which would require
manipulating the RequestContext objects, resource providers are now
created over the API.

Co-Authored-By: Balazs Gibizer <balazs.gibizer@ericsson.com>
Change-Id: Idaed39629095f86d24a54334c699a26c218c6593
This commit is contained in:
Chris Dent 2018-11-14 14:02:25 +01:00
parent c72dafad80
commit 787bb33606
125 changed files with 562 additions and 18013 deletions

View File

@ -48,6 +48,8 @@
Run tox-based functional tests for the OpenStack Nova project with Nova
specific irrelevant-files list. Uses tox with the ``functional``
environment.
required-projects:
- openstack/placement
irrelevant-files: &functional-irrelevant-files
- ^.*\.rst$
- ^api-.*$
@ -56,6 +58,7 @@
- ^releasenotes/.*$
vars:
tox_envlist: functional
tox_install_siblings: true
timeout: 3600
- job:
@ -65,9 +68,12 @@
Run tox-based functional tests for the OpenStack Nova project
under cPython version 3.5. with Nova specific irrelevant-files list.
Uses tox with the ``functional-py35`` environment.
required-projects:
- openstack/placement
irrelevant-files: *functional-irrelevant-files
vars:
tox_envlist: functional-py35
tox_install_siblings: true
timeout: 3600
- job:

View File

@ -45,6 +45,7 @@ import six
import six.moves.urllib.parse as urlparse
from sqlalchemy.engine import url as sqla_url
# FIXME(cdent): This is a speedbump in the extraction process
from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.cmd import common as cmd_common
from nova.compute import api as compute_api
@ -416,6 +417,7 @@ class DbCommands(object):
# need to be populated if it was not specified during boot time.
instance_obj.populate_missing_availability_zones,
# Added in Rocky
# FIXME(cdent): This is a factor that needs to be addressed somehow
consumer_obj.create_incomplete_consumers,
# Added in Rocky
instance_mapping_obj.populate_queued_for_delete,
@ -1987,6 +1989,7 @@ class PlacementCommands(object):
return num_processed
# FIXME(cdent): This needs to be addressed as part of extraction.
@action_description(
_("Iterates over non-cell0 cells looking for instances which do "
"not have allocations in the Placement service, or have incomplete "

View File

@ -251,6 +251,7 @@ class UpgradeCommands(object):
# and resource class, so we can simply count the number of inventories
# records for the given resource class and those will uniquely identify
# the number of resource providers we care about.
# FIXME(cdent): This will be a different project soon.
meta = MetaData(bind=placement_db.get_placement_engine())
inventories = Table('inventories', meta, autoload=True)
return select([sqlfunc.count()]).select_from(

View File

@ -49,7 +49,6 @@ from oslotest import moxstubout
import six
import testtools
from nova.api.openstack.placement.objects import resource_provider
from nova import context
from nova.db import api as db
from nova import exception
@ -260,7 +259,6 @@ class TestCase(testtools.TestCase):
# NOTE(danms): Full database setup involves a cell0, cell1,
# and the relevant mappings.
self.useFixture(nova_fixtures.Database(database='api'))
self.useFixture(nova_fixtures.Database(database='placement'))
self._setup_cells()
self.useFixture(nova_fixtures.DefaultFlavorsFixture())
elif not self.USES_DB_SELF:
@ -281,12 +279,6 @@ class TestCase(testtools.TestCase):
# caching of that value.
utils._IS_NEUTRON = None
# Reset the traits sync and rc cache flags
def _reset_traits():
resource_provider._TRAITS_SYNCED = False
_reset_traits()
self.addCleanup(_reset_traits)
resource_provider._RC_CACHE = None
# Reset the global QEMU version flag.
images.QEMU_VERSION = None
@ -296,8 +288,6 @@ class TestCase(testtools.TestCase):
self.addCleanup(self._clear_attrs)
self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
self.policy = self.useFixture(policy_fixture.PolicyFixture())
self.placement_policy = self.useFixture(
policy_fixture.PlacementPolicyFixture())
self.useFixture(nova_fixtures.PoisonFunctions())

View File

@ -26,8 +26,6 @@ import random
import warnings
import fixtures
from keystoneauth1 import adapter as ka
from keystoneauth1 import session as ks
import mock
from neutronclient.common import exceptions as neutron_client_exc
from oslo_concurrency import lockutils
@ -41,7 +39,6 @@ from requests import adapters
from wsgi_intercept import interceptor
from nova.api.openstack.compute import tenant_networks
from nova.api.openstack.placement import db_api as placement_db
from nova.api.openstack import wsgi_app
from nova.api import wsgi
from nova.compute import rpcapi as compute_rpcapi
@ -57,12 +54,11 @@ from nova import quota as nova_quota
from nova import rpc
from nova import service
from nova.tests.functional.api import client
from nova.tests.functional.api.openstack.placement.fixtures import placement
_TRUE_VALUES = ('True', 'true', '1', 'yes')
CONF = cfg.CONF
DB_SCHEMA = {'main': "", 'api': "", 'placement': ""}
DB_SCHEMA = {'main': "", 'api': ""}
SESSION_CONFIGURED = False
@ -631,7 +627,7 @@ class Database(fixtures.Fixture):
def __init__(self, database='main', connection=None):
"""Create a database fixture.
:param database: The type of database, 'main', 'api' or 'placement'
:param database: The type of database, 'main', or 'api'
:param connection: The connection string to use
"""
super(Database, self).__init__()
@ -640,7 +636,6 @@ class Database(fixtures.Fixture):
global SESSION_CONFIGURED
if not SESSION_CONFIGURED:
session.configure(CONF)
placement_db.configure(CONF)
SESSION_CONFIGURED = True
self.database = database
if database == 'main':
@ -652,8 +647,6 @@ class Database(fixtures.Fixture):
self.get_engine = session.get_engine
elif database == 'api':
self.get_engine = session.get_api_engine
elif database == 'placement':
self.get_engine = placement_db.get_placement_engine
def _cache_schema(self):
global DB_SCHEMA
@ -687,7 +680,7 @@ class DatabaseAtVersion(fixtures.Fixture):
"""Create a database fixture.
:param version: Max version to sync to (or None for current)
:param database: The type of database, 'main', 'api', 'placement'
:param database: The type of database, 'main', 'api'
"""
super(DatabaseAtVersion, self).__init__()
self.database = database
@ -696,8 +689,6 @@ class DatabaseAtVersion(fixtures.Fixture):
self.get_engine = session.get_engine
elif database == 'api':
self.get_engine = session.get_api_engine
elif database == 'placement':
self.get_engine = placement_db.get_placement_engine
def cleanup(self):
engine = self.get_engine()
@ -1853,136 +1844,6 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
fake_get_all_volume_types)
class PlacementApiClient(object):
def __init__(self, placement_fixture):
self.fixture = placement_fixture
def get(self, url, **kwargs):
return client.APIResponse(self.fixture._fake_get(None, url, **kwargs))
def put(self, url, body, **kwargs):
return client.APIResponse(
self.fixture._fake_put(None, url, body, **kwargs))
def post(self, url, body, **kwargs):
return client.APIResponse(
self.fixture._fake_post(None, url, body, **kwargs))
class PlacementFixture(placement.PlacementFixture):
"""A fixture to placement operations.
Runs a local WSGI server bound on a free port and having the Placement
application with NoAuth middleware.
This fixture also prevents calling the ServiceCatalog for getting the
endpoint.
It's possible to ask for a specific token when running the fixtures so
all calls would be passing this token.
Most of the time users of this fixture will also want the placement
database fixture (called first) as well:
self.useFixture(nova_fixtures.Database(database='placement'))
That is left as a manual step so tests may have fine grain control, and
because it is likely that these fixtures will continue to evolve as
the separation of nova and placement continues.
"""
def setUp(self):
super(PlacementFixture, self).setUp()
# Turn off manipulation of socket_options in TCPKeepAliveAdapter
# to keep wsgi-intercept happy. Replace it with the method
# from its superclass.
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.TCPKeepAliveAdapter.init_poolmanager',
adapters.HTTPAdapter.init_poolmanager))
self._client = ka.Adapter(ks.Session(auth=None), raise_exc=False)
# NOTE(sbauza): We need to mock the scheduler report client because
# we need to fake Keystone by directly calling the endpoint instead
# of looking up the service catalog, like we did for the OSAPIFixture.
self.useFixture(fixtures.MonkeyPatch(
'nova.scheduler.client.report.SchedulerReportClient.get',
self._fake_get))
self.useFixture(fixtures.MonkeyPatch(
'nova.scheduler.client.report.SchedulerReportClient.post',
self._fake_post))
self.useFixture(fixtures.MonkeyPatch(
'nova.scheduler.client.report.SchedulerReportClient.put',
self._fake_put))
self.useFixture(fixtures.MonkeyPatch(
'nova.scheduler.client.report.SchedulerReportClient.delete',
self._fake_delete))
self.api = PlacementApiClient(self)
@staticmethod
def _update_headers_with_version(headers, **kwargs):
version = kwargs.get("version")
if version is not None:
# TODO(mriedem): Perform some version discovery at some point.
headers.update({
'OpenStack-API-Version': 'placement %s' % version
})
def _fake_get(self, *args, **kwargs):
(url,) = args[1:]
# TODO(sbauza): The current placement NoAuthMiddleware returns a 401
# in case a token is not provided. We should change that by creating
# a fake token so we could remove adding the header below.
headers = {'x-auth-token': self.token}
self._update_headers_with_version(headers, **kwargs)
return self._client.get(
url,
endpoint_override=self.endpoint,
headers=headers)
def _fake_post(self, *args, **kwargs):
(url, data) = args[1:]
# NOTE(sdague): using json= instead of data= sets the
# media type to application/json for us. Placement API is
# more sensitive to this than other APIs in the OpenStack
# ecosystem.
# TODO(sbauza): The current placement NoAuthMiddleware returns a 401
# in case a token is not provided. We should change that by creating
# a fake token so we could remove adding the header below.
headers = {'x-auth-token': self.token}
self._update_headers_with_version(headers, **kwargs)
return self._client.post(
url, json=data,
endpoint_override=self.endpoint,
headers=headers)
def _fake_put(self, *args, **kwargs):
(url, data) = args[1:]
# NOTE(sdague): using json= instead of data= sets the
# media type to application/json for us. Placement API is
# more sensitive to this than other APIs in the OpenStack
# ecosystem.
# TODO(sbauza): The current placement NoAuthMiddleware returns a 401
# in case a token is not provided. We should change that by creating
# a fake token so we could remove adding the header below.
headers = {'x-auth-token': self.token}
self._update_headers_with_version(headers, **kwargs)
return self._client.put(
url, json=data,
endpoint_override=self.endpoint,
headers=headers)
def _fake_delete(self, *args, **kwargs):
(url,) = args[1:]
# TODO(sbauza): The current placement NoAuthMiddleware returns a 401
# in case a token is not provided. We should change that by creating
# a fake token so we could remove adding the header below.
return self._client.delete(
url,
endpoint_override=self.endpoint,
headers={'x-auth-token': self.token})
class UnHelperfulClientChannel(privsep_daemon._ClientChannel):
def __init__(self, context):
raise Exception('You have attempted to start a privsep helper. '

View File

@ -1,69 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslotest import output
import testtools
from nova.api.openstack.placement import context
from nova.api.openstack.placement import deploy
from nova.api.openstack.placement.objects import resource_provider
from nova.tests import fixtures
from nova.tests.functional.api.openstack.placement.fixtures import capture
from nova.tests.unit import policy_fixture
CONF = cfg.CONF
class TestCase(testtools.TestCase):
"""A base test case for placement functional tests.
Sets up minimum configuration for database and policy handling
and establishes the placement database.
"""
def setUp(self):
super(TestCase, self).setUp()
# Manage required configuration
conf_fixture = self.useFixture(config_fixture.Config(CONF))
# The Database fixture will get confused if only one of the databases
# is configured.
for group in ('placement_database', 'api_database', 'database'):
conf_fixture.config(
group=group,
connection='sqlite://',
sqlite_synchronous=False)
CONF([], default_config_files=[])
self.useFixture(policy_fixture.PlacementPolicyFixture())
self.useFixture(capture.Logging())
self.useFixture(output.CaptureOutput())
# Filter ignorable warnings during test runs.
self.useFixture(capture.WarningsFixture())
self.placement_db = self.useFixture(
fixtures.Database(database='placement'))
self._reset_database()
self.context = context.RequestContext()
# Do database syncs, such as traits sync.
deploy.update_database()
self.addCleanup(self._reset_database)
@staticmethod
def _reset_database():
"""Reset database sync flags to base state."""
resource_provider._TRAITS_SYNCED = False
resource_provider._RC_CACHE = None

View File

@ -1,129 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class and convenience utilities for functional placement tests."""
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.api.openstack.placement.objects import project as project_obj
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.objects import user as user_obj
from nova.tests.functional.api.openstack.placement import base
def create_provider(context, name, *aggs, **kwargs):
parent = kwargs.get('parent')
root = kwargs.get('root')
uuid = kwargs.get('uuid', getattr(uuids, name))
rp = rp_obj.ResourceProvider(context, name=name, uuid=uuid)
if parent:
rp.parent_provider_uuid = parent
if root:
rp.root_provider_uuid = root
rp.create()
if aggs:
rp.set_aggregates(aggs)
return rp
def add_inventory(rp, rc, total, **kwargs):
kwargs.setdefault('max_unit', total)
inv = rp_obj.Inventory(rp._context, resource_provider=rp,
resource_class=rc, total=total, **kwargs)
inv.obj_set_defaults()
rp.add_inventory(inv)
return inv
def set_traits(rp, *traits):
tlist = []
for tname in traits:
try:
trait = rp_obj.Trait.get_by_name(rp._context, tname)
except exception.TraitNotFound:
trait = rp_obj.Trait(rp._context, name=tname)
trait.create()
tlist.append(trait)
rp.set_traits(rp_obj.TraitList(objects=tlist))
return tlist
def ensure_consumer(ctx, user, project, consumer_id=None):
# NOTE(efried): If not specified, use a random consumer UUID - we don't
# want to override any existing allocations from the test case.
consumer_id = consumer_id or uuidutils.generate_uuid()
try:
consumer = consumer_obj.Consumer.get_by_uuid(ctx, consumer_id)
except exception.NotFound:
consumer = consumer_obj.Consumer(
ctx, uuid=consumer_id, user=user, project=project)
consumer.create()
return consumer
def set_allocation(ctx, rp, consumer, rc_used_dict):
alloc = [
rp_obj.Allocation(
ctx, resource_provider=rp, resource_class=rc,
consumer=consumer, used=used)
for rc, used in rc_used_dict.items()
]
alloc_list = rp_obj.AllocationList(ctx, objects=alloc)
alloc_list.replace_all()
return alloc_list
class PlacementDbBaseTestCase(base.TestCase):
def setUp(self):
super(PlacementDbBaseTestCase, self).setUp()
# we use context in some places and ctx in other. We should only use
# context, but let's paper over that for now.
self.ctx = self.context
self.user_obj = user_obj.User(self.ctx, external_id='fake-user')
self.user_obj.create()
self.project_obj = project_obj.Project(
self.ctx, external_id='fake-project')
self.project_obj.create()
# For debugging purposes, populated by _create_provider and used by
# _validate_allocation_requests to make failure results more readable.
self.rp_uuid_to_name = {}
def _create_provider(self, name, *aggs, **kwargs):
rp = create_provider(self.ctx, name, *aggs, **kwargs)
self.rp_uuid_to_name[rp.uuid] = name
return rp
def allocate_from_provider(self, rp, rc, used, consumer_id=None,
consumer=None):
if consumer is None:
consumer = ensure_consumer(
self.ctx, self.user_obj, self.project_obj, consumer_id)
alloc_list = set_allocation(self.ctx, rp, consumer, {rc: used})
return alloc_list
def _make_allocation(self, inv_dict, alloc_dict):
rp = self._create_provider('allocation_resource_provider')
disk_inv = rp_obj.Inventory(context=self.ctx,
resource_provider=rp, **inv_dict)
inv_list = rp_obj.InventoryList(objects=[disk_inv])
rp.set_inventory(inv_list)
consumer_id = alloc_dict['consumer_id']
consumer = ensure_consumer(
self.ctx, self.user_obj, self.project_obj, consumer_id)
alloc = rp_obj.Allocation(self.ctx, resource_provider=rp,
consumer=consumer, **alloc_dict)
alloc_list = rp_obj.AllocationList(self.ctx, objects=[alloc])
alloc_list.replace_all()
return rp, alloc

View File

@ -1,329 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel as uuids
import sqlalchemy as sa
from nova.api.openstack.placement import db_api
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.api.openstack.placement.objects import project as project_obj
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.objects import user as user_obj
from nova import rc_fields as fields
from nova.tests.functional.api.openstack.placement import base
from nova.tests.functional.api.openstack.placement.db import test_base as tb
CONF = cfg.CONF
CONSUMER_TBL = consumer_obj.CONSUMER_TBL
PROJECT_TBL = project_obj.PROJECT_TBL
USER_TBL = user_obj.USER_TBL
ALLOC_TBL = rp_obj._ALLOC_TBL
class ConsumerTestCase(tb.PlacementDbBaseTestCase):
def test_non_existing_consumer(self):
self.assertRaises(exception.ConsumerNotFound,
consumer_obj.Consumer.get_by_uuid, self.ctx,
uuids.non_existing_consumer)
def test_create_and_get(self):
u = user_obj.User(self.ctx, external_id='another-user')
u.create()
p = project_obj.Project(self.ctx, external_id='another-project')
p.create()
c = consumer_obj.Consumer(
self.ctx, uuid=uuids.consumer, user=u, project=p)
c.create()
c = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer)
self.assertEqual(1, c.id)
# Project ID == 1 is fake-project created in setup
self.assertEqual(2, c.project.id)
# User ID == 1 is fake-user created in setup
self.assertEqual(2, c.user.id)
self.assertRaises(exception.ConsumerExists, c.create)
def test_update(self):
"""Tests the scenario where a user supplies a different project/user ID
for an allocation's consumer and we call Consumer.update() to save that
information to the consumers table.
"""
# First, create the consumer with the "fake-user" and "fake-project"
# user/project in the base test class's setUp
c = consumer_obj.Consumer(
self.ctx, uuid=uuids.consumer, user=self.user_obj,
project=self.project_obj)
c.create()
c = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer)
self.assertEqual(self.project_obj.id, c.project.id)
self.assertEqual(self.user_obj.id, c.user.id)
# Now change the consumer's project and user to a different project
another_user = user_obj.User(self.ctx, external_id='another-user')
another_user.create()
another_proj = project_obj.Project(
self.ctx, external_id='another-project')
another_proj.create()
c.project = another_proj
c.user = another_user
c.update()
c = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer)
self.assertEqual(another_proj.id, c.project.id)
self.assertEqual(another_user.id, c.user.id)
@db_api.placement_context_manager.reader
def _get_allocs_with_no_consumer_relationship(ctx):
alloc_to_consumer = sa.outerjoin(
ALLOC_TBL, CONSUMER_TBL,
ALLOC_TBL.c.consumer_id == CONSUMER_TBL.c.uuid)
sel = sa.select([ALLOC_TBL.c.consumer_id])
sel = sel.select_from(alloc_to_consumer)
sel = sel.where(CONSUMER_TBL.c.id.is_(None))
return ctx.session.execute(sel).fetchall()
# NOTE(jaypipes): The tb.PlacementDbBaseTestCase creates a project and user
# which is why we don't base off that. We want a completely bare DB for this
# test.
class CreateIncompleteConsumersTestCase(base.TestCase):
def setUp(self):
super(CreateIncompleteConsumersTestCase, self).setUp()
self.ctx = self.context
@db_api.placement_context_manager.writer
def _create_incomplete_allocations(self, ctx, num_of_consumer_allocs=1):
# Create some allocations with consumers that don't exist in the
# consumers table to represent old allocations that we expect to be
# "cleaned up" with consumers table records that point to the sentinel
# project/user records.
c1_missing_uuid = uuids.c1_missing
c2_missing_uuid = uuids.c2_missing
c3_missing_uuid = uuids.c3_missing
for c_uuid in (c1_missing_uuid, c2_missing_uuid, c3_missing_uuid):
# Create $num_of_consumer_allocs allocations per consumer with
# different resource classes.
for resource_class_id in range(num_of_consumer_allocs):
ins_stmt = ALLOC_TBL.insert().values(
resource_provider_id=1,
resource_class_id=resource_class_id,
consumer_id=c_uuid, used=1)
ctx.session.execute(ins_stmt)
# Verify there are no records in the projects/users table
project_count = ctx.session.scalar(
sa.select([sa.func.count('*')]).select_from(PROJECT_TBL))
self.assertEqual(0, project_count)
user_count = ctx.session.scalar(
sa.select([sa.func.count('*')]).select_from(USER_TBL))
self.assertEqual(0, user_count)
# Verify there are no consumer records for the missing consumers
sel = CONSUMER_TBL.select(
CONSUMER_TBL.c.uuid.in_([c1_missing_uuid, c2_missing_uuid]))
res = ctx.session.execute(sel).fetchall()
self.assertEqual(0, len(res))
@db_api.placement_context_manager.reader
def _check_incomplete_consumers(self, ctx):
incomplete_project_id = CONF.placement.incomplete_consumer_project_id
# Verify we have a record in projects for the missing sentinel
sel = PROJECT_TBL.select(
PROJECT_TBL.c.external_id == incomplete_project_id)
rec = ctx.session.execute(sel).first()
self.assertEqual(incomplete_project_id, rec['external_id'])
incomplete_proj_id = rec['id']
# Verify we have a record in users for the missing sentinel
incomplete_user_id = CONF.placement.incomplete_consumer_user_id
sel = user_obj.USER_TBL.select(
USER_TBL.c.external_id == incomplete_user_id)
rec = ctx.session.execute(sel).first()
self.assertEqual(incomplete_user_id, rec['external_id'])
incomplete_user_id = rec['id']
# Verify there are records in the consumers table for our old
# allocation records created in the pre-migration setup and that the
# projects and users referenced in those consumer records point to the
# incomplete project/user
sel = CONSUMER_TBL.select(CONSUMER_TBL.c.uuid == uuids.c1_missing)
missing_c1 = ctx.session.execute(sel).first()
self.assertEqual(incomplete_proj_id, missing_c1['project_id'])
self.assertEqual(incomplete_user_id, missing_c1['user_id'])
sel = CONSUMER_TBL.select(CONSUMER_TBL.c.uuid == uuids.c2_missing)
missing_c2 = ctx.session.execute(sel).first()
self.assertEqual(incomplete_proj_id, missing_c2['project_id'])
self.assertEqual(incomplete_user_id, missing_c2['user_id'])
# Ensure there are no more allocations with incomplete consumers
res = _get_allocs_with_no_consumer_relationship(ctx)
self.assertEqual(0, len(res))
def test_create_incomplete_consumers(self):
"""Test the online data migration that creates incomplete consumer
records along with the incomplete consumer project/user records.
"""
self._create_incomplete_allocations(self.ctx)
# We do a "really online" online data migration for incomplete
# consumers when calling AllocationList.get_all_by_consumer_id() and
# AllocationList.get_all_by_resource_provider() and there are still
# incomplete consumer records. So, to simulate a situation where the
# operator has yet to run the nova-manage online_data_migration CLI
# tool completely, we first call
# consumer_obj.create_incomplete_consumers() with a batch size of 1.
# This should mean there will be two allocation records still remaining
# with a missing consumer record (since we create 3 total to begin
# with). We then query the allocations table directly to grab that
# consumer UUID in the allocations table that doesn't refer to a
# consumer table record and call
# AllocationList.get_all_by_consumer_id() with that consumer UUID. This
# should create the remaining missing consumer record "inline" in the
# AllocationList.get_all_by_consumer_id() method.
# After that happens, there should still be a single allocation record
# that is missing a relation to the consumers table. We call the
# AllocationList.get_all_by_resource_provider() method and verify that
# method cleans up the remaining incomplete consumers relationship.
res = consumer_obj.create_incomplete_consumers(self.ctx, 1)
self.assertEqual((1, 1), res)
# Grab the consumer UUID for the allocation record with a
# still-incomplete consumer record.
res = _get_allocs_with_no_consumer_relationship(self.ctx)
self.assertEqual(2, len(res))
still_missing = res[0][0]
rp_obj.AllocationList.get_all_by_consumer_id(self.ctx, still_missing)
# There should still be a single missing consumer relationship. Let's
# grab that and call AllocationList.get_all_by_resource_provider()
# which should clean that last one up for us.
res = _get_allocs_with_no_consumer_relationship(self.ctx)
self.assertEqual(1, len(res))
still_missing = res[0][0]
rp1 = rp_obj.ResourceProvider(self.ctx, id=1)
rp_obj.AllocationList.get_all_by_resource_provider(self.ctx, rp1)
# get_all_by_resource_provider() should have auto-completed the still
# missing consumer record and _check_incomplete_consumers() should
# assert correctly that there are no more incomplete consumer records.
self._check_incomplete_consumers(self.ctx)
res = consumer_obj.create_incomplete_consumers(self.ctx, 10)
self.assertEqual((0, 0), res)
def test_create_incomplete_consumers_multiple_allocs_per_consumer(self):
"""Tests that missing consumer records are created when listing
allocations against a resource provider or running the online data
migration routine when the consumers have multiple allocations on the
same provider.
"""
self._create_incomplete_allocations(self.ctx, num_of_consumer_allocs=2)
# Run the online data migration to migrate one consumer. The batch size
# needs to be large enough to hit more than one consumer for this test
# where each consumer has two allocations.
res = consumer_obj.create_incomplete_consumers(self.ctx, 2)
self.assertEqual((2, 2), res)
# Migrate the rest by listing allocations on the resource provider.
rp1 = rp_obj.ResourceProvider(self.ctx, id=1)
rp_obj.AllocationList.get_all_by_resource_provider(self.ctx, rp1)
self._check_incomplete_consumers(self.ctx)
res = consumer_obj.create_incomplete_consumers(self.ctx, 10)
self.assertEqual((0, 0), res)
class DeleteConsumerIfNoAllocsTestCase(tb.PlacementDbBaseTestCase):
def test_delete_consumer_if_no_allocs(self):
"""AllocationList.replace_all() should attempt to delete consumers that
no longer have any allocations. Due to the REST API not having any way
to query for consumers directly (only via the GET
/allocations/{consumer_uuid} endpoint which returns an empty dict even
when no consumer record exists for the {consumer_uuid}) we need to do
this functional test using only the object layer.
"""
# We will use two consumers in this test, only one of which will get
# all of its allocations deleted in a transaction (and we expect that
# consumer record to be deleted)
c1 = consumer_obj.Consumer(
self.ctx, uuid=uuids.consumer1, user=self.user_obj,
project=self.project_obj)
c1.create()
c2 = consumer_obj.Consumer(
self.ctx, uuid=uuids.consumer2, user=self.user_obj,
project=self.project_obj)
c2.create()
# Create some inventory that we will allocate
cn1 = self._create_provider('cn1')
tb.add_inventory(cn1, fields.ResourceClass.VCPU, 8)
tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048)
tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2000)
# Now allocate some of that inventory to two different consumers
allocs = [
rp_obj.Allocation(
self.ctx, consumer=c1, resource_provider=cn1,
resource_class=fields.ResourceClass.VCPU, used=1),
rp_obj.Allocation(
self.ctx, consumer=c1, resource_provider=cn1,
resource_class=fields.ResourceClass.MEMORY_MB, used=512),
rp_obj.Allocation(
self.ctx, consumer=c2, resource_provider=cn1,
resource_class=fields.ResourceClass.VCPU, used=1),
rp_obj.Allocation(
self.ctx, consumer=c2, resource_provider=cn1,
resource_class=fields.ResourceClass.MEMORY_MB, used=512),
]
alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
alloc_list.replace_all()
# Validate that we have consumer records for both consumers
for c_uuid in (uuids.consumer1, uuids.consumer2):
c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, c_uuid)
self.assertIsNotNone(c_obj)
# OK, now "remove" the allocation for consumer2 by setting the used
# value for both allocated resources to 0 and re-running the
# AllocationList.replace_all(). This should end up deleting the
# consumer record for consumer2
allocs = [
rp_obj.Allocation(
self.ctx, consumer=c2, resource_provider=cn1,
resource_class=fields.ResourceClass.VCPU, used=0),
rp_obj.Allocation(
self.ctx, consumer=c2, resource_provider=cn1,
resource_class=fields.ResourceClass.MEMORY_MB, used=0),
]
alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
alloc_list.replace_all()
# consumer1 should still exist...
c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer1)
self.assertIsNotNone(c_obj)
# but not consumer2...
self.assertRaises(
exception.NotFound, consumer_obj.Consumer.get_by_uuid,
self.ctx, uuids.consumer2)
# DELETE /allocations/{consumer_uuid} is the other place where we
# delete all allocations for a consumer. Let's delete all for consumer1
# and check that the consumer record is deleted
alloc_list = rp_obj.AllocationList.get_all_by_consumer_id(
self.ctx, uuids.consumer1)
alloc_list.delete_all()
# consumer1 should no longer exist in the DB since we just deleted all
# of its allocations
self.assertRaises(
exception.NotFound, consumer_obj.Consumer.get_by_uuid,
self.ctx, uuids.consumer1)

View File

@ -1,31 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import project as project_obj
from nova.tests.functional.api.openstack.placement.db import test_base as tb
class ProjectTestCase(tb.PlacementDbBaseTestCase):
def test_non_existing_project(self):
self.assertRaises(
exception.ProjectNotFound, project_obj.Project.get_by_external_id,
self.ctx, uuids.non_existing_project)
def test_create_and_get(self):
p = project_obj.Project(self.ctx, external_id='another-project')
p.create()
p = project_obj.Project.get_by_external_id(self.ctx, 'another-project')
# Project ID == 1 is fake-project created in setup
self.assertEqual(2, p.id)
self.assertRaises(exception.ProjectExists, p.create)

View File

@ -1,359 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.tests.functional.api.openstack.placement.db import test_base as tb
def alloc_for_rc(alloc_list, rc):
for alloc in alloc_list:
if alloc.resource_class == rc:
return alloc
class ReshapeTestCase(tb.PlacementDbBaseTestCase):
"""Test 'replace the world' reshape transaction."""
def test_reshape(self):
"""We set up the following scenario:
BEFORE: single compute node setup
A single compute node with:
- VCPU, MEMORY_MB, DISK_GB inventory
- Two instances consuming CPU, RAM and DISK from that compute node
AFTER: hierarchical + shared storage setup
A compute node parent provider with:
- MEMORY_MB
Two NUMA node child providers containing:
- VCPU
Shared storage provider with:
- DISK_GB
Both instances have their resources split among the providers and
shared storage accordingly
"""
# First create our consumers
i1_uuid = uuids.instance1
i1_consumer = consumer_obj.Consumer(
self.ctx, uuid=i1_uuid, user=self.user_obj,
project=self.project_obj)
i1_consumer.create()
i2_uuid = uuids.instance2
i2_consumer = consumer_obj.Consumer(
self.ctx, uuid=i2_uuid, user=self.user_obj,
project=self.project_obj)
i2_consumer.create()
cn1 = self._create_provider('cn1')
tb.add_inventory(cn1, 'VCPU', 16)
tb.add_inventory(cn1, 'MEMORY_MB', 32768)
tb.add_inventory(cn1, 'DISK_GB', 1000)
# Allocate both instances against the single compute node
for consumer in (i1_consumer, i2_consumer):
allocs = [
rp_obj.Allocation(
self.ctx, resource_provider=cn1,
resource_class='VCPU', consumer=consumer, used=2),
rp_obj.Allocation(
self.ctx, resource_provider=cn1,
resource_class='MEMORY_MB', consumer=consumer, used=1024),
rp_obj.Allocation(
self.ctx, resource_provider=cn1,
resource_class='DISK_GB', consumer=consumer, used=100),
]
alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
alloc_list.replace_all()
# Verify we have the allocations we expect for the BEFORE scenario
before_allocs_i1 = rp_obj.AllocationList.get_all_by_consumer_id(
self.ctx, i1_uuid)
self.assertEqual(3, len(before_allocs_i1))
self.assertEqual(cn1.uuid, before_allocs_i1[0].resource_provider.uuid)
before_allocs_i2 = rp_obj.AllocationList.get_all_by_consumer_id(
self.ctx, i2_uuid)
self.assertEqual(3, len(before_allocs_i2))
self.assertEqual(cn1.uuid, before_allocs_i2[2].resource_provider.uuid)
# Before we issue the actual reshape() call, we need to first create
# the child providers and sharing storage provider. These are actions
# that the virt driver or external agent is responsible for performing
# *before* attempting any reshape activity.
cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid)
cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid)
ss = self._create_provider('ss')
# OK, now emulate the call to POST /reshaper that will be triggered by
# a virt driver wanting to replace the world and change its modeling
# from a single provider to a nested provider tree along with a sharing
# storage provider.
after_inventories = {
# cn1 keeps the RAM only
cn1: rp_obj.InventoryList(self.ctx, objects=[
rp_obj.Inventory(
self.ctx, resource_provider=cn1,
resource_class='MEMORY_MB', total=32768, reserved=0,
max_unit=32768, min_unit=1, step_size=1,
allocation_ratio=1.0),
]),
# each NUMA node gets half of the CPUs
cn1_numa0: rp_obj.InventoryList(self.ctx, objects=[
rp_obj.Inventory(
self.ctx, resource_provider=cn1_numa0,
resource_class='VCPU', total=8, reserved=0,
max_unit=8, min_unit=1, step_size=1,
allocation_ratio=1.0),
]),
cn1_numa1: rp_obj.InventoryList(self.ctx, objects=[
rp_obj.Inventory(
self.ctx, resource_provider=cn1_numa1,
resource_class='VCPU', total=8, reserved=0,
max_unit=8, min_unit=1, step_size=1,
allocation_ratio=1.0),
]),
# The sharing provider gets a bunch of disk
ss: rp_obj.InventoryList(self.ctx, objects=[
rp_obj.Inventory(
self.ctx, resource_provider=ss,
resource_class='DISK_GB', total=100000, reserved=0,
max_unit=1000, min_unit=1, step_size=1,
allocation_ratio=1.0),
]),
}
# We do a fetch from the DB for each instance to get its latest
# generation. This would be done by the resource tracker or scheduler
# report client before issuing the call to reshape() because the
# consumers representing the two instances above will have had their
# generations incremented in the original call to PUT
# /allocations/{consumer_uuid}
i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid)
i2_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i2_uuid)
after_allocs = rp_obj.AllocationList(self.ctx, objects=[
# instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB
# from the sharing storage provider
rp_obj.Allocation(
self.ctx, resource_provider=cn1_numa0, resource_class='VCPU',
consumer=i1_consumer, used=2),
rp_obj.Allocation(
self.ctx, resource_provider=cn1, resource_class='MEMORY_MB',
consumer=i1_consumer, used=1024),
rp_obj.Allocation(
self.ctx, resource_provider=ss, resource_class='DISK_GB',
consumer=i1_consumer, used=100),
# instance2 gets VCPU from NUMA1, MEMORY_MB from cn1 and DISK_GB
# from the sharing storage provider
rp_obj.Allocation(
self.ctx, resource_provider=cn1_numa1, resource_class='VCPU',
consumer=i2_consumer, used=2),
rp_obj.Allocation(
self.ctx, resource_provider=cn1, resource_class='MEMORY_MB',
consumer=i2_consumer, used=1024),
rp_obj.Allocation(
self.ctx, resource_provider=ss, resource_class='DISK_GB',
consumer=i2_consumer, used=100),
])
rp_obj.reshape(self.ctx, after_inventories, after_allocs)
# Verify that the inventories have been moved to the appropriate
# providers in the AFTER scenario
# The root compute node should only have MEMORY_MB, nothing else
cn1_inv = rp_obj.InventoryList.get_all_by_resource_provider(
self.ctx, cn1)
self.assertEqual(1, len(cn1_inv))
self.assertEqual('MEMORY_MB', cn1_inv[0].resource_class)
self.assertEqual(32768, cn1_inv[0].total)
# Each NUMA node should only have half the original VCPU, nothing else
numa0_inv = rp_obj.InventoryList.get_all_by_resource_provider(
self.ctx, cn1_numa0)
self.assertEqual(1, len(numa0_inv))
self.assertEqual('VCPU', numa0_inv[0].resource_class)
self.assertEqual(8, numa0_inv[0].total)
numa1_inv = rp_obj.InventoryList.get_all_by_resource_provider(
self.ctx, cn1_numa1)
self.assertEqual(1, len(numa1_inv))
self.assertEqual('VCPU', numa1_inv[0].resource_class)
self.assertEqual(8, numa1_inv[0].total)
# The sharing storage provider should only have DISK_GB, nothing else
ss_inv = rp_obj.InventoryList.get_all_by_resource_provider(
self.ctx, ss)
self.assertEqual(1, len(ss_inv))
self.assertEqual('DISK_GB', ss_inv[0].resource_class)
self.assertEqual(100000, ss_inv[0].total)
# Verify we have the allocations we expect for the AFTER scenario
after_allocs_i1 = rp_obj.AllocationList.get_all_by_consumer_id(
self.ctx, i1_uuid)
self.assertEqual(3, len(after_allocs_i1))
# Our VCPU allocation should be in the NUMA0 node
vcpu_alloc = alloc_for_rc(after_allocs_i1, 'VCPU')
self.assertIsNotNone(vcpu_alloc)
self.assertEqual(cn1_numa0.uuid, vcpu_alloc.resource_provider.uuid)
# Our DISK_GB allocation should be in the sharing provider
disk_alloc = alloc_for_rc(after_allocs_i1, 'DISK_GB')
self.assertIsNotNone(disk_alloc)
self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid)
# And our MEMORY_MB should remain on the root compute node
ram_alloc = alloc_for_rc(after_allocs_i1, 'MEMORY_MB')
self.assertIsNotNone(ram_alloc)
self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)
after_allocs_i2 = rp_obj.AllocationList.get_all_by_consumer_id(
self.ctx, i2_uuid)
self.assertEqual(3, len(after_allocs_i2))
# Our VCPU allocation should be in the NUMA1 node
vcpu_alloc = alloc_for_rc(after_allocs_i2, 'VCPU')
self.assertIsNotNone(vcpu_alloc)
self.assertEqual(cn1_numa1.uuid, vcpu_alloc.resource_provider.uuid)
# Our DISK_GB allocation should be in the sharing provider
disk_alloc = alloc_for_rc(after_allocs_i2, 'DISK_GB')
self.assertIsNotNone(disk_alloc)
self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid)
# And our MEMORY_MB should remain on the root compute node
ram_alloc = alloc_for_rc(after_allocs_i2, 'MEMORY_MB')
self.assertIsNotNone(ram_alloc)
self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)
def test_reshape_concurrent_inventory_update(self):
"""Valid failure scenario for reshape(). We test a situation where the
virt driver has constructed it's "after inventories and allocations"
and sent those to the POST /reshape endpoint. The reshape POST handler
does a quick check of the resource provider generations sent in the
payload and they all check out.
However, right before the call to resource_provider.reshape(), another
thread legitimately changes the inventory of one of the providers
involved in the reshape transaction. We should get a
ConcurrentUpdateDetected in this case.
"""
# First create our consumers
i1_uuid = uuids.instance1
i1_consumer = consumer_obj.Consumer(
self.ctx, uuid=i1_uuid, user=self.user_obj,
project=self.project_obj)
i1_consumer.create()
# then all our original providers
cn1 = self._create_provider('cn1')
tb.add_inventory(cn1, 'VCPU', 16)
tb.add_inventory(cn1, 'MEMORY_MB', 32768)
tb.add_inventory(cn1, 'DISK_GB', 1000)
# Allocate an instance on our compute node
allocs = [
rp_obj.Allocation(
self.ctx, resource_provider=cn1,
resource_class='VCPU', consumer=i1_consumer, used=2),
rp_obj.Allocation(
self.ctx, resource_provider=cn1,
resource_class='MEMORY_MB', consumer=i1_consumer, used=1024),
rp_obj.Allocation(
self.ctx, resource_provider=cn1,
resource_class='DISK_GB', consumer=i1_consumer, used=100),
]
alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
alloc_list.replace_all()
# Before we issue the actual reshape() call, we need to first create
# the child providers and sharing storage provider. These are actions
# that the virt driver or external agent is responsible for performing
# *before* attempting any reshape activity.
cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid)
cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid)
ss = self._create_provider('ss')
# OK, now emulate the call to POST /reshaper that will be triggered by
# a virt driver wanting to replace the world and change its modeling
# from a single provider to a nested provider tree along with a sharing
# storage provider.
after_inventories = {
# cn1 keeps the RAM only
cn1: rp_obj.InventoryList(self.ctx, objects=[
rp_obj.Inventory(
self.ctx, resource_provider=cn1,
resource_class='MEMORY_MB', total=32768, reserved=0,
max_unit=32768, min_unit=1, step_size=1,
allocation_ratio=1.0),
]),
# each NUMA node gets half of the CPUs
cn1_numa0: rp_obj.InventoryList(self.ctx, objects=[
rp_obj.Inventory(
self.ctx, resource_provider=cn1_numa0,
resource_class='VCPU', total=8, reserved=0,
max_unit=8, min_unit=1, step_size=1,
allocation_ratio=1.0),
]),
cn1_numa1: rp_obj.InventoryList(self.ctx, objects=[
rp_obj.Inventory(
self.ctx, resource_provider=cn1_numa1,
resource_class='VCPU', total=8, reserved=0,
max_unit=8, min_unit=1, step_size=1,
allocation_ratio=1.0),
]),
# The sharing provider gets a bunch of disk
ss: rp_obj.InventoryList(self.ctx, objects=[
rp_obj.Inventory(
self.ctx, resource_provider=ss,
resource_class='DISK_GB', total=100000, reserved=0,
max_unit=1000, min_unit=1, step_size=1,
allocation_ratio=1.0),
]),
}
# We do a fetch from the DB for each instance to get its latest
# generation. This would be done by the resource tracker or scheduler
# report client before issuing the call to reshape() because the
# consumers representing the two instances above will have had their
# generations incremented in the original call to PUT
# /allocations/{consumer_uuid}
i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid)
after_allocs = rp_obj.AllocationList(self.ctx, objects=[
# instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB
# from the sharing storage provider
rp_obj.Allocation(
self.ctx, resource_provider=cn1_numa0, resource_class='VCPU',
consumer=i1_consumer, used=2),
rp_obj.Allocation(
self.ctx, resource_provider=cn1, resource_class='MEMORY_MB',
consumer=i1_consumer, used=1024),
rp_obj.Allocation(
self.ctx, resource_provider=ss, resource_class='DISK_GB',
consumer=i1_consumer, used=100),
])
# OK, now before we call reshape(), here we emulate another thread
# changing the inventory for the sharing storage provider in between
# the time in the REST handler when the sharing storage provider's
# generation was validated and the actual call to reshape()
ss_threadB = rp_obj.ResourceProvider.get_by_uuid(self.ctx, ss.uuid)
# Reduce the amount of storage to 2000, from 100000.
new_ss_inv = rp_obj.InventoryList(self.ctx, objects=[
rp_obj.Inventory(
self.ctx, resource_provider=ss_threadB,
resource_class='DISK_GB', total=2000, reserved=0,
max_unit=1000, min_unit=1, step_size=1,
allocation_ratio=1.0)])
ss_threadB.set_inventory(new_ss_inv)
# Double check our storage provider's generation is now greater than
# the original storage provider record being sent to reshape()
self.assertGreater(ss_threadB.generation, ss.generation)
# And we should legitimately get a failure now to reshape() due to
# another thread updating one of the involved provider's generations
self.assertRaises(
exception.ConcurrentUpdateDetected,
rp_obj.reshape, self.ctx, after_inventories, after_allocs)

View File

@ -1,145 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_utils import timeutils
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import resource_class_cache as rc_cache
from nova import rc_fields as fields
from nova.tests.functional.api.openstack.placement import base
class TestResourceClassCache(base.TestCase):
def setUp(self):
super(TestResourceClassCache, self).setUp()
db = self.placement_db
self.context = mock.Mock()
sess_mock = mock.Mock()
sess_mock.connection.side_effect = db.get_engine().connect
self.context.session = sess_mock
@mock.patch('sqlalchemy.select')
def test_rc_cache_std_no_db(self, sel_mock):
"""Test that looking up either an ID or a string in the resource class
cache for a standardized resource class does not result in a DB
call.
"""
cache = rc_cache.ResourceClassCache(self.context)
self.assertEqual('VCPU', cache.string_from_id(0))
self.assertEqual('MEMORY_MB', cache.string_from_id(1))
self.assertEqual(0, cache.id_from_string('VCPU'))
self.assertEqual(1, cache.id_from_string('MEMORY_MB'))
self.assertFalse(sel_mock.called)
def test_standards(self):
cache = rc_cache.ResourceClassCache(self.context)
standards = cache.STANDARDS
self.assertEqual(len(standards), len(fields.ResourceClass.STANDARD))
names = (rc['name'] for rc in standards)
for name in fields.ResourceClass.STANDARD:
self.assertIn(name, names)
cache = rc_cache.ResourceClassCache(self.context)
standards2 = cache.STANDARDS
self.assertEqual(id(standards), id(standards2))
def test_standards_have_time_fields(self):
cache = rc_cache.ResourceClassCache(self.context)
standards = cache.STANDARDS
first_standard = standards[0]
self.assertIn('updated_at', first_standard)
self.assertIn('created_at', first_standard)
self.assertIsNone(first_standard['updated_at'])
self.assertIsNone(first_standard['created_at'])
def test_standard_has_time_fields(self):
cache = rc_cache.ResourceClassCache(self.context)
vcpu_class = cache.all_from_string('VCPU')
expected = {'id': 0, 'name': 'VCPU', 'updated_at': None,
'created_at': None}
self.assertEqual(expected, vcpu_class)
def test_rc_cache_custom(self):
"""Test that non-standard, custom resource classes hit the database and
return appropriate results, caching the results after a single
query.
"""
cache = rc_cache.ResourceClassCache(self.context)
# Haven't added anything to the DB yet, so should raise
# ResourceClassNotFound
self.assertRaises(exception.ResourceClassNotFound,
cache.string_from_id, 1001)
self.assertRaises(exception.ResourceClassNotFound,
cache.id_from_string, "IRON_NFV")
# Now add to the database and verify appropriate results...
with self.context.session.connection() as conn:
ins_stmt = rc_cache._RC_TBL.insert().values(
id=1001,
name='IRON_NFV'
)
conn.execute(ins_stmt)
self.assertEqual('IRON_NFV', cache.string_from_id(1001))
self.assertEqual(1001, cache.id_from_string('IRON_NFV'))
# Try same again and verify we don't hit the DB.
with mock.patch('sqlalchemy.select') as sel_mock:
self.assertEqual('IRON_NFV', cache.string_from_id(1001))
self.assertEqual(1001, cache.id_from_string('IRON_NFV'))
self.assertFalse(sel_mock.called)
# Verify all fields available from all_from_string
iron_nfv_class = cache.all_from_string('IRON_NFV')
self.assertEqual(1001, iron_nfv_class['id'])
self.assertEqual('IRON_NFV', iron_nfv_class['name'])
# updated_at not set on insert
self.assertIsNone(iron_nfv_class['updated_at'])
self.assertIsInstance(iron_nfv_class['created_at'], datetime.datetime)
# Update IRON_NFV (this is a no-op but will set updated_at)
with self.context.session.connection() as conn:
# NOTE(cdent): When using explict SQL that names columns,
# the automatic timestamp handling provided by the oslo_db
# TimestampMixin is not provided. created_at is a default
# but updated_at is an onupdate.
upd_stmt = rc_cache._RC_TBL.update().where(
rc_cache._RC_TBL.c.id == 1001).values(
name='IRON_NFV', updated_at=timeutils.utcnow())
conn.execute(upd_stmt)
# reset cache
cache = rc_cache.ResourceClassCache(self.context)
iron_nfv_class = cache.all_from_string('IRON_NFV')
# updated_at set on update
self.assertIsInstance(iron_nfv_class['updated_at'], datetime.datetime)
def test_rc_cache_miss(self):
"""Test that we raise ResourceClassNotFound if an unknown resource
class ID or string is searched for.
"""
cache = rc_cache.ResourceClassCache(self.context)
self.assertRaises(exception.ResourceClassNotFound,
cache.string_from_id, 99999999)
self.assertRaises(exception.ResourceClassNotFound,
cache.id_from_string, 'UNKNOWN')

View File

@ -1,31 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import user as user_obj
from nova.tests.functional.api.openstack.placement.db import test_base as tb
class UserTestCase(tb.PlacementDbBaseTestCase):
def test_non_existing_user(self):
self.assertRaises(
exception.UserNotFound, user_obj.User.get_by_external_id,
self.ctx, uuids.non_existing_user)
def test_create_and_get(self):
u = user_obj.User(self.ctx, external_id='another-user')
u.create()
u = user_obj.User.get_by_external_id(self.ctx, 'another-user')
# User ID == 1 is fake-user created in setup
self.assertEqual(2, u.id)
self.assertRaises(exception.UserExists, u.create)

View File

@ -1,81 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import warnings
import fixtures
from oslotest import log
class NullHandler(logging.Handler):
"""custom default NullHandler to attempt to format the record.
Used in conjunction with Logging below to detect formatting errors
in debug logs.
"""
def handle(self, record):
self.format(record)
def emit(self, record):
pass
def createLock(self):
self.lock = None
class Logging(log.ConfigureLogging):
"""A logging fixture providing two important fixtures.
One is to capture logs for later inspection.
The other is to make sure that DEBUG logs, even if not captured,
are formatted.
"""
def __init__(self):
super(Logging, self).__init__()
# If level was not otherwise set, default to INFO.
if self.level is None:
self.level = logging.INFO
# Always capture logs, unlike the parent.
self.capture_logs = True
def setUp(self):
super(Logging, self).setUp()
if self.level > logging.DEBUG:
handler = NullHandler()
self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
handler.setLevel(logging.DEBUG)
class WarningsFixture(fixtures.Fixture):
"""Filter or escalates certain warnings during test runs.
Add additional entries as required. Remove when obsolete.
"""
def setUp(self):
super(WarningsFixture, self).setUp()
# Ignore policy scope warnings.
warnings.filterwarnings('ignore',
message="Policy .* failed scope check",
category=UserWarning)
# The UUIDFields emits a warning if the value is not a valid UUID.
# Let's escalate that to an exception in the test to prevent adding
# violations.
warnings.filterwarnings('error', message=".*invalid UUID.*")
self.addCleanup(warnings.resetwarnings)

View File

@ -1,431 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from gabbi import fixture
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_middleware import cors
from oslo_policy import opts as policy_opts
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
from oslotest import output
from nova.api.openstack.placement import context
from nova.api.openstack.placement import deploy
from nova.api.openstack.placement.objects import project as project_obj
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.objects import user as user_obj
from nova.api.openstack.placement import policies
from nova import rc_fields as fields
from nova.tests import fixtures
from nova.tests.functional.api.openstack.placement.db import test_base as tb
from nova.tests.functional.api.openstack.placement.fixtures import capture
from nova.tests.unit import policy_fixture
CONF = cfg.CONF
def setup_app():
return deploy.loadapp(CONF)
class APIFixture(fixture.GabbiFixture):
"""Setup the required backend fixtures for a basic placement service."""
def start_fixture(self):
# Set up stderr and stdout captures by directly driving the
# existing nova fixtures that do that. This captures the
# output that happens outside individual tests (for
# example database migrations).
self.standard_logging_fixture = capture.Logging()
self.standard_logging_fixture.setUp()
self.output_stream_fixture = output.CaptureOutput()
self.output_stream_fixture.setUp()
# Filter ignorable warnings during test runs.
self.warnings_fixture = capture.WarningsFixture()
self.warnings_fixture.setUp()
self.conf_fixture = config_fixture.Config(CONF)
self.conf_fixture.setUp()
# The Database fixture will get confused if only one of the databases
# is configured.
for group in ('placement_database', 'api_database', 'database'):
self.conf_fixture.config(
group=group,
connection='sqlite://',
sqlite_synchronous=False)
self.conf_fixture.config(
group='api', auth_strategy='noauth2')
self.context = context.RequestContext()
# Register CORS opts, but do not set config. This has the
# effect of exercising the "don't use cors" path in
# deploy.py. Without setting some config the group will not
# be present.
CONF.register_opts(cors.CORS_OPTS, 'cors')
# Set default policy opts, otherwise the deploy module can
# NoSuchOptError.
policy_opts.set_defaults(CONF)
# Make sure default_config_files is an empty list, not None.
# If None /etc/nova/nova.conf is read and confuses results.
CONF([], default_config_files=[])
self._reset_db_flags()
self.placement_db_fixture = fixtures.Database('placement')
self.placement_db_fixture.setUp()
# Do this now instead of waiting for the WSGI app to start so that
# fixtures can have traits.
deploy.update_database()
os.environ['RP_UUID'] = uuidutils.generate_uuid()
os.environ['RP_NAME'] = uuidutils.generate_uuid()
os.environ['CUSTOM_RES_CLASS'] = 'CUSTOM_IRON_NFV'
os.environ['PROJECT_ID'] = uuidutils.generate_uuid()
os.environ['USER_ID'] = uuidutils.generate_uuid()
os.environ['PROJECT_ID_ALT'] = uuidutils.generate_uuid()
os.environ['USER_ID_ALT'] = uuidutils.generate_uuid()
os.environ['INSTANCE_UUID'] = uuidutils.generate_uuid()
os.environ['MIGRATION_UUID'] = uuidutils.generate_uuid()
os.environ['CONSUMER_UUID'] = uuidutils.generate_uuid()
os.environ['PARENT_PROVIDER_UUID'] = uuidutils.generate_uuid()
os.environ['ALT_PARENT_PROVIDER_UUID'] = uuidutils.generate_uuid()
def stop_fixture(self):
self.placement_db_fixture.cleanUp()
# Since we clean up the DB, we need to reset the traits sync
# flag to make sure the next run will recreate the traits and
# reset the _RC_CACHE so that any cached resource classes
# are flushed.
self._reset_db_flags()
self.warnings_fixture.cleanUp()
self.output_stream_fixture.cleanUp()
self.standard_logging_fixture.cleanUp()
self.conf_fixture.cleanUp()
@staticmethod
def _reset_db_flags():
rp_obj._TRAITS_SYNCED = False
rp_obj._RC_CACHE = None
class AllocationFixture(APIFixture):
"""An APIFixture that has some pre-made Allocations.
+----- same user----+ alt_user
| | |
+----+----------+ +------+-----+ +-----+---------+
| consumer1 | | consumer2 | | alt_consumer |
| DISK_GB:1000 | | VCPU: 6 | | VCPU: 1 |
| | | | | DISK_GB:20 |
+-------------+-+ +------+-----+ +-+-------------+
| | |
+-+----------+---------+-+
| rp |
| VCPU: 10 |
| DISK_GB:2048 |
+------------------------+
"""
def start_fixture(self):
super(AllocationFixture, self).start_fixture()
# For use creating and querying allocations/usages
os.environ['ALT_USER_ID'] = uuidutils.generate_uuid()
project_id = os.environ['PROJECT_ID']
user_id = os.environ['USER_ID']
alt_user_id = os.environ['ALT_USER_ID']
user = user_obj.User(self.context, external_id=user_id)
user.create()
alt_user = user_obj.User(self.context, external_id=alt_user_id)
alt_user.create()
project = project_obj.Project(self.context, external_id=project_id)
project.create()
# Stealing from the super
rp_name = os.environ['RP_NAME']
rp_uuid = os.environ['RP_UUID']
# Create the rp with VCPU and DISK_GB inventory
rp = tb.create_provider(self.context, rp_name, uuid=rp_uuid)
tb.add_inventory(rp, 'DISK_GB', 2048,
step_size=10, min_unit=10, max_unit=1000)
tb.add_inventory(rp, 'VCPU', 10, max_unit=10)
# Create a first consumer for the DISK_GB allocations
consumer1 = tb.ensure_consumer(self.context, user, project)
tb.set_allocation(self.context, rp, consumer1, {'DISK_GB': 1000})
os.environ['CONSUMER_0'] = consumer1.uuid
# Create a second consumer for the VCPU allocations
consumer2 = tb.ensure_consumer(self.context, user, project)
tb.set_allocation(self.context, rp, consumer2, {'VCPU': 6})
os.environ['CONSUMER_ID'] = consumer2.uuid
# Create a consumer object for a different user
alt_consumer = tb.ensure_consumer(self.context, alt_user, project)
os.environ['ALT_CONSUMER_ID'] = alt_consumer.uuid
# Create a couple of allocations for a different user.
tb.set_allocation(self.context, rp, alt_consumer,
{'DISK_GB': 20, 'VCPU': 1})
# The ALT_RP_XXX variables are for a resource provider that has
# not been created in the Allocation fixture
os.environ['ALT_RP_UUID'] = uuidutils.generate_uuid()
os.environ['ALT_RP_NAME'] = uuidutils.generate_uuid()
class SharedStorageFixture(APIFixture):
"""An APIFixture that has some two compute nodes without local storage
associated by aggregate to a provider of shared storage. Both compute
nodes have respectively two numa node resource providers, each of
which has a pf resource provider.
+-------------------------------------+
| sharing storage (ss) |
| DISK_GB:2000 |
| traits: MISC_SHARES_VIA_AGGREGATE |
+-----------------+-------------------+
| aggregate
+--------------------------+ | +------------------------+
| compute node (cn1) |---+---| compute node (cn2) |
| CPU: 24 | | CPU: 24 |
| MEMORY_MB: 128*1024 | | MEMORY_MB: 128*1024 |
| traits: HW_CPU_X86_SSE, | | |
| HW_CPU_X86_SSE2 | | |
+--------------------------+ +------------------------+
| | | |
+---------+ +---------+ +---------+ +---------+
| numa1_1 | | numa1_2 | | numa2_1 | | numa2_2 |
+---------+ +---------+ +---------+ +---------+
| | | |
+---------------++---------------++---------------++----------------+
| pf1_1 || pf1_2 || pf2_1 || pf2_2 |
| SRIOV_NET_VF:8|| SRIOV_NET_VF:8|| SRIOV_NET_VF:8|| SRIOV_NET_VF:8 |
+---------------++---------------++---------------++----------------+
"""
def start_fixture(self):
super(SharedStorageFixture, self).start_fixture()
agg_uuid = uuidutils.generate_uuid()
cn1 = tb.create_provider(self.context, 'cn1', agg_uuid)
cn2 = tb.create_provider(self.context, 'cn2', agg_uuid)
ss = tb.create_provider(self.context, 'ss', agg_uuid)
numa1_1 = tb.create_provider(self.context, 'numa1_1', parent=cn1.uuid)
numa1_2 = tb.create_provider(self.context, 'numa1_2', parent=cn1.uuid)
numa2_1 = tb.create_provider(self.context, 'numa2_1', parent=cn2.uuid)
numa2_2 = tb.create_provider(self.context, 'numa2_2', parent=cn2.uuid)
pf1_1 = tb.create_provider(self.context, 'pf1_1', parent=numa1_1.uuid)
pf1_2 = tb.create_provider(self.context, 'pf1_2', parent=numa1_2.uuid)
pf2_1 = tb.create_provider(self.context, 'pf2_1', parent=numa2_1.uuid)
pf2_2 = tb.create_provider(self.context, 'pf2_2', parent=numa2_2.uuid)
os.environ['AGG_UUID'] = agg_uuid
os.environ['CN1_UUID'] = cn1.uuid
os.environ['CN2_UUID'] = cn2.uuid
os.environ['SS_UUID'] = ss.uuid
os.environ['NUMA1_1_UUID'] = numa1_1.uuid
os.environ['NUMA1_2_UUID'] = numa1_2.uuid
os.environ['NUMA2_1_UUID'] = numa2_1.uuid
os.environ['NUMA2_2_UUID'] = numa2_2.uuid
os.environ['PF1_1_UUID'] = pf1_1.uuid
os.environ['PF1_2_UUID'] = pf1_2.uuid
os.environ['PF2_1_UUID'] = pf2_1.uuid
os.environ['PF2_2_UUID'] = pf2_2.uuid
# Populate compute node inventory for VCPU and RAM
for cn in (cn1, cn2):
tb.add_inventory(cn, fields.ResourceClass.VCPU, 24,
allocation_ratio=16.0)
tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 128 * 1024,
allocation_ratio=1.5)
tb.set_traits(cn1, 'HW_CPU_X86_SSE', 'HW_CPU_X86_SSE2')
# Populate shared storage provider with DISK_GB inventory and
# mark it shared among any provider associated via aggregate
tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000,
reserved=100, allocation_ratio=1.0)
tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE')
# Populate PF inventory for VF
for pf in (pf1_1, pf1_2, pf2_1, pf2_2):
tb.add_inventory(pf, fields.ResourceClass.SRIOV_NET_VF,
8, allocation_ratio=1.0)
class NonSharedStorageFixture(APIFixture):
"""An APIFixture that has two compute nodes with local storage that do not
use shared storage.
"""
def start_fixture(self):
super(NonSharedStorageFixture, self).start_fixture()
aggA_uuid = uuidutils.generate_uuid()
aggB_uuid = uuidutils.generate_uuid()
aggC_uuid = uuidutils.generate_uuid()
os.environ['AGGA_UUID'] = aggA_uuid
os.environ['AGGB_UUID'] = aggB_uuid
os.environ['AGGC_UUID'] = aggC_uuid
cn1 = tb.create_provider(self.context, 'cn1')
cn2 = tb.create_provider(self.context, 'cn2')
os.environ['CN1_UUID'] = cn1.uuid
os.environ['CN2_UUID'] = cn2.uuid
# Populate compute node inventory for VCPU, RAM and DISK
for cn in (cn1, cn2):
tb.add_inventory(cn, 'VCPU', 24)
tb.add_inventory(cn, 'MEMORY_MB', 128 * 1024)
tb.add_inventory(cn, 'DISK_GB', 2000)
class CORSFixture(APIFixture):
"""An APIFixture that turns on CORS."""
def start_fixture(self):
super(CORSFixture, self).start_fixture()
# NOTE(cdent): If we remove this override, then the cors
# group ends up not existing in the conf, so when deploy.py
# wants to load the CORS middleware, it will not.
self.conf_fixture.config(
group='cors',
allowed_origin='http://valid.example.com')
class GranularFixture(APIFixture):
"""An APIFixture that sets up the following provider environment for
testing granular resource requests.
+========================++========================++========================+
|cn_left ||cn_middle ||cn_right |
|VCPU: 8 ||VCPU: 8 ||VCPU: 8 |
|MEMORY_MB: 4096 ||MEMORY_MB: 4096 ||MEMORY_MB: 4096 |
|DISK_GB: 500 ||SRIOV_NET_VF: 8 ||DISK_GB: 500 |
|VGPU: 8 ||CUSTOM_NET_MBPS: 4000 ||VGPU: 8 |
|SRIOV_NET_VF: 8 ||traits: HW_CPU_X86_AVX, || - max_unit: 2 |
|CUSTOM_NET_MBPS: 4000 || HW_CPU_X86_AVX2,||traits: HW_CPU_X86_MMX, |
|traits: HW_CPU_X86_AVX, || HW_CPU_X86_SSE, || HW_GPU_API_DXVA,|
| HW_CPU_X86_AVX2,|| HW_NIC_ACCEL_TLS|| CUSTOM_DISK_SSD,|
| HW_GPU_API_DXVA,|+=+=====+================++==+========+============+
| HW_NIC_DCB_PFC, | : : : : a
| CUSTOM_FOO +..+ +--------------------+ : g
+========================+ : a : : g
: g : : C
+========================+ : g : +===============+======+
|shr_disk_1 | : A : |shr_net |
|DISK_GB: 1000 +..+ : |SRIOV_NET_VF: 16 |
|traits: CUSTOM_DISK_SSD,| : : a |CUSTOM_NET_MBPS: 40000|
| MISC_SHARES_VIA_AGG...| : : g |traits: MISC_SHARES...|
+========================+ : : g +======================+
+=======================+ : : B
|shr_disk_2 +...+ :
|DISK_GB: 1000 | :
|traits: MISC_SHARES... +.........+
+=======================+
"""
def start_fixture(self):
super(GranularFixture, self).start_fixture()
rp_obj.ResourceClass(
context=self.context, name='CUSTOM_NET_MBPS').create()
os.environ['AGGA'] = uuids.aggA
os.environ['AGGB'] = uuids.aggB
os.environ['AGGC'] = uuids.aggC
cn_left = tb.create_provider(self.context, 'cn_left', uuids.aggA)
os.environ['CN_LEFT'] = cn_left.uuid
tb.add_inventory(cn_left, 'VCPU', 8)
tb.add_inventory(cn_left, 'MEMORY_MB', 4096)
tb.add_inventory(cn_left, 'DISK_GB', 500)
tb.add_inventory(cn_left, 'VGPU', 8)
tb.add_inventory(cn_left, 'SRIOV_NET_VF', 8)
tb.add_inventory(cn_left, 'CUSTOM_NET_MBPS', 4000)
tb.set_traits(cn_left, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2',
'HW_GPU_API_DXVA', 'HW_NIC_DCB_PFC', 'CUSTOM_FOO')
cn_middle = tb.create_provider(
self.context, 'cn_middle', uuids.aggA, uuids.aggB)
os.environ['CN_MIDDLE'] = cn_middle.uuid
tb.add_inventory(cn_middle, 'VCPU', 8)
tb.add_inventory(cn_middle, 'MEMORY_MB', 4096)
tb.add_inventory(cn_middle, 'SRIOV_NET_VF', 8)
tb.add_inventory(cn_middle, 'CUSTOM_NET_MBPS', 4000)
tb.set_traits(cn_middle, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2',
'HW_CPU_X86_SSE', 'HW_NIC_ACCEL_TLS')
cn_right = tb.create_provider(
self.context, 'cn_right', uuids.aggB, uuids.aggC)
os.environ['CN_RIGHT'] = cn_right.uuid
tb.add_inventory(cn_right, 'VCPU', 8)
tb.add_inventory(cn_right, 'MEMORY_MB', 4096)
tb.add_inventory(cn_right, 'DISK_GB', 500)
tb.add_inventory(cn_right, 'VGPU', 8, max_unit=2)
tb.set_traits(cn_right, 'HW_CPU_X86_MMX', 'HW_GPU_API_DXVA',
'CUSTOM_DISK_SSD')
shr_disk_1 = tb.create_provider(self.context, 'shr_disk_1', uuids.aggA)
os.environ['SHR_DISK_1'] = shr_disk_1.uuid
tb.add_inventory(shr_disk_1, 'DISK_GB', 1000)
tb.set_traits(shr_disk_1, 'MISC_SHARES_VIA_AGGREGATE',
'CUSTOM_DISK_SSD')
shr_disk_2 = tb.create_provider(
self.context, 'shr_disk_2', uuids.aggA, uuids.aggB)
os.environ['SHR_DISK_2'] = shr_disk_2.uuid
tb.add_inventory(shr_disk_2, 'DISK_GB', 1000)
tb.set_traits(shr_disk_2, 'MISC_SHARES_VIA_AGGREGATE')
shr_net = tb.create_provider(self.context, 'shr_net', uuids.aggC)
os.environ['SHR_NET'] = shr_net.uuid
tb.add_inventory(shr_net, 'SRIOV_NET_VF', 16)
tb.add_inventory(shr_net, 'CUSTOM_NET_MBPS', 40000)
tb.set_traits(shr_net, 'MISC_SHARES_VIA_AGGREGATE')
class OpenPolicyFixture(APIFixture):
"""An APIFixture that changes all policy rules to allow non-admins."""
def start_fixture(self):
super(OpenPolicyFixture, self).start_fixture()
self.placement_policy_fixture = policy_fixture.PlacementPolicyFixture()
self.placement_policy_fixture.setUp()
# Get all of the registered rules and set them to '@' to allow any
# user to have access. The nova policy "admin_or_owner" concept does
# not really apply to most of placement resources since they do not
# have a user_id/project_id attribute.
rules = {}
for rule in policies.list_rules():
name = rule.name
# Ignore "base" rules for role:admin.
if name in ['placement', 'admin_api']:
continue
rules[name] = '@'
self.placement_policy_fixture.set_rules(rules)
def stop_fixture(self):
super(OpenPolicyFixture, self).stop_fixture()
self.placement_policy_fixture.cleanUp()

View File

@ -1,49 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_utils import uuidutils
from wsgi_intercept import interceptor
from nova.api.openstack.placement import deploy
CONF = cfg.CONF
class PlacementFixture(fixtures.Fixture):
"""A fixture to placement operations.
Runs a local WSGI server bound on a free port and having the Placement
application with NoAuth middleware.
This fixture also prevents calling the ServiceCatalog for getting the
endpoint.
It's possible to ask for a specific token when running the fixtures so
all calls would be passing this token.
"""
def __init__(self, token='admin'):
self.token = token
def setUp(self):
super(PlacementFixture, self).setUp()
conf_fixture = config_fixture.Config(CONF)
conf_fixture.config(group='api', auth_strategy='noauth2')
loader = deploy.loadapp(CONF)
app = lambda: loader
self.endpoint = 'http://%s/placement' % uuidutils.generate_uuid()
intercept = interceptor.RequestsInterceptor(app, url=self.endpoint)
intercept.install_intercept()
self.addCleanup(intercept.uninstall_intercept)

View File

@ -1,39 +0,0 @@
# This tests the individual CRUD operations on
# /resource_providers/{uuid}/aggregates* using a non-admin user with an
# open policy configuration. The response validation is intentionally minimal.
fixtures:
- OpenPolicyFixture
defaults:
request_headers:
x-auth-token: user
accept: application/json
content-type: application/json
openstack-api-version: placement latest
vars:
- &agg_1 f918801a-5e54-4bee-9095-09a9d0c786b8
- &agg_2 a893eb5c-e2a0-4251-ab26-f71d3b0cfc0b
tests:
- name: post new resource provider
POST: /resource_providers
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 200
- name: put some aggregates
PUT: /resource_providers/$ENVIRON['RP_UUID']/aggregates
data:
resource_provider_generation: 0
aggregates:
- *agg_1
- *agg_2
status: 200
- name: get those aggregates
GET: $LAST_URL
response_json_paths:
$.aggregates.`len`: 2

View File

@ -1,204 +0,0 @@
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
content-type: application/json
openstack-api-version: placement latest
vars:
- &agg_1 f918801a-5e54-4bee-9095-09a9d0c786b8
- &agg_2 a893eb5c-e2a0-4251-ab26-f71d3b0cfc0b
tests:
- name: get aggregates for bad resource provider
GET: /resource_providers/6984bb2d-830d-4c8d-ac64-c5a8103664be/aggregates
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: put aggregates for bad resource provider
PUT: /resource_providers/6984bb2d-830d-4c8d-ac64-c5a8103664be/aggregates
data: []
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: post new resource provider
POST: /resource_providers
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 200
response_headers:
location: //resource_providers/[a-f0-9-]+/
- name: get empty aggregates
GET: /resource_providers/$ENVIRON['RP_UUID']/aggregates
response_json_paths:
$.aggregates: []
- name: aggregates 404 for out of date microversion get
GET: /resource_providers/$ENVIRON['RP_UUID']/aggregates
request_headers:
openstack-api-version: placement 1.0
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: aggregates 404 for out of date microversion put
PUT: /resource_providers/$ENVIRON['RP_UUID']/aggregates
request_headers:
openstack-api-version: placement 1.0
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: put some aggregates - old payload and new microversion
PUT: $LAST_URL
data:
- *agg_1
- *agg_2
status: 400
response_strings:
- JSON does not validate
response_json_paths:
$.errors[0].title: Bad Request
- name: put some aggregates - new payload and old microversion
PUT: $LAST_URL
request_headers:
openstack-api-version: placement 1.18
data:
resource_provider_generation: 0
aggregates:
- *agg_1
- *agg_2
status: 400
response_strings:
- JSON does not validate
response_json_paths:
$.errors[0].title: Bad Request
- name: put some aggregates - new payload and new microversion
PUT: $LAST_URL
data:
resource_provider_generation: 0
aggregates:
- *agg_1
- *agg_2
status: 200
response_headers:
content-type: /application/json/
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
response_json_paths:
$.aggregates[0]: *agg_1
$.aggregates[1]: *agg_2
$.resource_provider_generation: 1
- name: get those aggregates
GET: $LAST_URL
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
response_json_paths:
$.aggregates.`len`: 2
- name: clear those aggregates - generation conflict
PUT: $LAST_URL
data:
resource_provider_generation: 0
aggregates: []
status: 409
response_json_paths:
$.errors[0].code: placement.concurrent_update
- name: clear those aggregates
PUT: $LAST_URL
data:
resource_provider_generation: 1
aggregates: []
status: 200
response_json_paths:
$.aggregates: []
- name: get empty aggregates again
GET: /resource_providers/$ENVIRON['RP_UUID']/aggregates
response_json_paths:
$.aggregates: []
- name: put non json
PUT: $LAST_URL
data: '{"bad", "not json"}'
status: 400
response_strings:
- Malformed JSON
response_json_paths:
$.errors[0].title: Bad Request
- name: put invalid json no generation
PUT: $LAST_URL
data:
aggregates:
- *agg_1
- *agg_2
status: 400
response_strings:
- JSON does not validate
response_json_paths:
$.errors[0].title: Bad Request
- name: put invalid json not uuids
PUT: $LAST_URL
data:
aggregates:
- harry
- sally
resource_provider_generation: 2
status: 400
response_strings:
- "is not a 'uuid'"
response_json_paths:
$.errors[0].title: Bad Request
- name: put same aggregates twice
PUT: $LAST_URL
data:
aggregates:
- *agg_1
- *agg_1
resource_provider_generation: 2
status: 400
response_strings:
- has non-unique elements
response_json_paths:
$.errors[0].title: Bad Request
# The next two tests confirm that prior to version 1.15 we do
# not set the cache-control or last-modified headers on either
# PUT or GET.
- name: put some aggregates v1.14
PUT: $LAST_URL
request_headers:
openstack-api-version: placement 1.14
data:
- *agg_1
- *agg_2
response_forbidden_headers:
- last-modified
- cache-control
- name: get those aggregates v1.14
GET: $LAST_URL
request_headers:
openstack-api-version: placement 1.14
response_forbidden_headers:
- last-modified
- cache-control

View File

@ -1,77 +0,0 @@
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
content-type: application/json
# Using <= 1.11 allows the PUT /allocations/{uuid} below
# to work with the older request form.
openstack-api-version: placement 1.11
tests:
- name: create a resource provider
POST: /resource_providers
data:
name: an rp
status: 201
- name: get resource provider
GET: $LOCATION
status: 200
- name: create a resource class
PUT: /resource_classes/CUSTOM_GOLD
status: 201
- name: add inventory to an rp
PUT: /resource_providers/$HISTORY['get resource provider'].$RESPONSE['$.uuid']/inventories
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 24
CUSTOM_GOLD:
total: 5
status: 200
- name: allocate some of it two
desc: this is the one that used to raise a 500
PUT: /allocations/6d9f83db-6eb5-49f6-84b0-5d03c6aa9fc8
data:
allocations:
- resource_provider:
uuid: $HISTORY['get resource provider'].$RESPONSE['$.uuid']
resources:
DISK_GB: 5
CUSTOM_GOLD: 1
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 409
- name: allocate some of it custom
PUT: /allocations/6d9f83db-6eb5-49f6-84b0-5d03c6aa9fc8
data:
allocations:
- resource_provider:
uuid: $HISTORY['get resource provider'].$RESPONSE['$.uuid']
resources:
CUSTOM_GOLD: 1
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 204
- name: allocate some of it standard
PUT: /allocations/6d9f83db-6eb5-49f6-84b0-5d03c6aa9fc8
data:
allocations:
- resource_provider:
uuid: $HISTORY['get resource provider'].$RESPONSE['$.uuid']
resources:
DISK_GB: 1
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 409

View File

@ -1,141 +0,0 @@
# Tests of allocation candidates API
fixtures:
- NonSharedStorageFixture
defaults:
request_headers:
x-auth-token: admin
content-type: application/json
accept: application/json
openstack-api-version: placement 1.24
tests:
- name: get bad member_of microversion
GET: /allocation_candidates?resources=VCPU:1&member_of=in:$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID']
request_headers:
openstack-api-version: placement 1.18
status: 400
response_strings:
- Invalid query string parameters
- "'member_of' was unexpected"
- name: get allocation candidates invalid member_of value
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=INVALID_UUID
status: 400
response_strings:
- Expected 'member_of' parameter to contain valid UUID(s).
- name: get allocation candidates no 'in:' for multiple member_of
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID']
status: 400
response_strings:
- Multiple values for 'member_of' must be prefixed with the 'in:' keyword
- name: get allocation candidates multiple member_of with 'in:' but invalid values
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=in:$ENVIRON['AGGA_UUID'],INVALID_UUID
status: 400
response_strings:
- Expected 'member_of' parameter to contain valid UUID(s).
- name: get allocation candidates multiple member_of with 'in:' but no aggregates
GET: /allocation_candidates?&member_of=in:&resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100
status: 400
response_strings:
- Expected 'member_of' parameter to contain valid UUID(s).
- name: get allocation candidates with no match for member_of
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGA_UUID']
status: 200
response_json_paths:
$.allocation_requests.`len`: 0
- name: get compute node 1 state
GET: /resource_providers/$ENVIRON['CN1_UUID']
- name: associate the first compute node with aggA
PUT: /resource_providers/$ENVIRON['CN1_UUID']/aggregates
data:
aggregates:
- $ENVIRON['AGGA_UUID']
resource_provider_generation: $HISTORY['get compute node 1 state'].$RESPONSE['$.generation']
status: 200
- name: verify that the member_of call now returns 1 allocation_candidate
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=in:$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID']
status: 200
response_json_paths:
$.allocation_requests.`len`: 1
- name: get compute node 2 state
GET: /resource_providers/$ENVIRON['CN2_UUID']
- name: associate the second compute node with aggB
PUT: /resource_providers/$ENVIRON['CN2_UUID']/aggregates
data:
aggregates:
- $ENVIRON['AGGB_UUID']
resource_provider_generation: $HISTORY['get compute node 2 state'].$RESPONSE['$.generation']
status: 200
- name: verify that the member_of call now returns both RPs
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=in:$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID']
status: 200
response_json_paths:
$.allocation_requests.`len`: 2
- name: verify that aggC still returns no RPs
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGC_UUID']
status: 200
response_json_paths:
$.allocation_requests.`len`: 0
- name: get current compute node 1 state
GET: /resource_providers/$ENVIRON['CN1_UUID']
- name: now associate the first compute node with both aggA and aggC
PUT: /resource_providers/$ENVIRON['CN1_UUID']/aggregates
data:
aggregates:
- $ENVIRON['AGGA_UUID']
- $ENVIRON['AGGC_UUID']
resource_provider_generation: $HISTORY['get current compute node 1 state'].$RESPONSE['$.generation']
- name: verify that the member_of call for aggs A and B still returns 2 allocation_candidates
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=in:$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID']
status: 200
response_json_paths:
$.allocation_requests.`len`: 2
status: 200
- name: verify microversion fail for multiple member_of params
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGA_UUID']&member_of=$ENVIRON['AGGB_UUID']
request_headers:
openstack-api-version: placement 1.23
status: 400
response_strings:
- 'Multiple member_of parameters are not supported'
response_json_paths:
$.errors[0].title: Bad Request
- name: verify that no RP is associated with BOTH aggA and aggB
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGA_UUID']&member_of=$ENVIRON['AGGB_UUID']
status: 200
response_json_paths:
$.allocation_requests.`len`: 0
- name: associate the second compute node with aggA and aggB
PUT: /resource_providers/$ENVIRON['CN2_UUID']/aggregates
data:
aggregates:
- $ENVIRON['AGGA_UUID']
- $ENVIRON['AGGB_UUID']
resource_provider_generation: $HISTORY['associate the second compute node with aggB'].$RESPONSE['$.resource_provider_generation']
status: 200
- name: verify that second RP is associated with BOTH aggA and aggB
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGA_UUID']&member_of=$ENVIRON['AGGB_UUID']
status: 200
response_json_paths:
$.allocation_requests.`len`: 1

View File

@ -1,18 +0,0 @@
# This tests GET /allocation_candidates using a non-admin
# user with an open policy configuration. The response validation is
# intentionally minimal.
fixtures:
- OpenPolicyFixture
defaults:
request_headers:
x-auth-token: user
accept: application/json
content-type: application/json
openstack-api-version: placement latest
tests:
- name: get allocation candidates
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100
status: 200

View File

@ -1,416 +0,0 @@
# Tests of allocation candidates API
fixtures:
- SharedStorageFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
openstack-api-version: placement 1.10
tests:
- name: list traits
GET: /traits
status: 200
response_strings:
# We at least want to make sure that this trait is supported.
- MISC_SHARES_VIA_AGGREGATE
- name: get allocation candidates before microversion
GET: /allocation_candidates?resources=VCPU:1
request_headers:
openstack-api-version: placement 1.8
status: 404
- name: get allocation candidates empty resources
GET: /allocation_candidates?resources=
status: 400
response_strings:
- Badly formed resources parameter. Expected resources query string parameter in form
- 'Got: empty string.'
- name: get allocation candidates no resources
GET: /allocation_candidates
status: 400
response_strings:
- "'resources' is a required property"
- name: get bad resource class
GET: /allocation_candidates?resources=MCPU:99
status: 400
response_strings:
- Invalid resource class in resources parameter
- name: get bad limit microversion
GET: /allocation_candidates?resources=VCPU:1&limit=5
request_headers:
openstack-api-version: placement 1.15
status: 400
response_strings:
- Invalid query string parameters
- "'limit' was unexpected"
- name: get bad limit type
GET: /allocation_candidates?resources=VCPU:1&limit=cow
request_headers:
openstack-api-version: placement 1.16
status: 400
response_strings:
- Invalid query string parameters
- "Failed validating 'pattern'"
- name: get bad limit value negative
GET: /allocation_candidates?resources=VCPU:1&limit=-99
request_headers:
openstack-api-version: placement 1.16
status: 400
response_strings:
- Invalid query string parameters
- "Failed validating 'pattern'"
- name: get bad limit value zero
GET: /allocation_candidates?resources=VCPU:1&limit=0
request_headers:
openstack-api-version: placement 1.16
status: 400
response_strings:
- Invalid query string parameters
- "Failed validating 'pattern'"
- name: get allocation candidates no allocations yet
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100
status: 200
response_json_paths:
# There are 3 providers involved. 2 compute nodes, 1 shared storage
# provider
$.provider_summaries.`len`: 3
# However, there are only 2 allocation requests, one for each compute
# node that provides the VCPU/MEMORY_MB and DISK_GB provided by the
# shared storage provider
$.allocation_requests.`len`: 2
# Verify that compute node #1 only has VCPU and MEMORY_MB listed in the
# resource requests. This validates the entire resources key.
$.allocation_requests..allocations[?resource_provider.uuid="$ENVIRON['CN1_UUID']"].resources:
VCPU: 1
MEMORY_MB: 1024
# Verify that compute node #2 only has VCPU and MEMORY_MB listed in the
# resource requests
$.allocation_requests..allocations[?resource_provider.uuid="$ENVIRON['CN2_UUID']"].resources:
VCPU: 1
MEMORY_MB: 1024
# Verify that shared storage provider only has DISK_GB listed in the
# resource requests, but is listed twice
$.allocation_requests..allocations[?resource_provider.uuid="$ENVIRON['SS_UUID']"].resources[DISK_GB]: [100, 100]
# Verify that the resources listed in the provider summary for compute
# node #1 show correct capacity and usage
$.provider_summaries["$ENVIRON['CN1_UUID']"].resources[VCPU].capacity: 384 # 16.0 * 24
$.provider_summaries["$ENVIRON['CN1_UUID']"].resources[VCPU].used: 0
$.provider_summaries["$ENVIRON['CN1_UUID']"].resources[MEMORY_MB].capacity: 196608 # 1.5 * 128G
$.provider_summaries["$ENVIRON['CN1_UUID']"].resources[MEMORY_MB].used: 0
# Verify that the resources listed in the provider summary for compute
# node #2 show correct capacity and usage
$.provider_summaries["$ENVIRON['CN2_UUID']"].resources[VCPU].capacity: 384 # 16.0 * 24
$.provider_summaries["$ENVIRON['CN2_UUID']"].resources[VCPU].used: 0
$.provider_summaries["$ENVIRON['CN2_UUID']"].resources[MEMORY_MB].capacity: 196608 # 1.5 * 128G
$.provider_summaries["$ENVIRON['CN2_UUID']"].resources[MEMORY_MB].used: 0
# Verify that the resources listed in the provider summary for shared
# storage show correct capacity and usage
$.provider_summaries["$ENVIRON['SS_UUID']"].resources[DISK_GB].capacity: 1900 # 1.0 * 2000 - 100G
$.provider_summaries["$ENVIRON['SS_UUID']"].resources[DISK_GB].used: 0
response_forbidden_headers:
# In the default microversion in this file (1.10) the cache headers
# are not preset.
- cache-control
- last-modified
# Verify the 1.12 format of the allocation_requests sub object which
# changes from a list-list to dict-ish format.
- name: get allocation candidates 1.12 dictish
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100
request_headers:
openstack-api-version: placement 1.12
response_json_paths:
# There are 3 providers involved. 2 compute nodes, 1 shared storage
# provider
$.provider_summaries.`len`: 3
# However, there are only 2 allocation requests, one for each compute
# node that provides the VCPU/MEMORY_MB and DISK_GB provided by the
# shared storage provider
$.allocation_requests.`len`: 2
# Verify that compute node #1 only has VCPU and MEMORY_MB listed in the
# resource requests. This validates the entire resources key.
$.allocation_requests..allocations["$ENVIRON['CN1_UUID']"].resources:
VCPU: 1
MEMORY_MB: 1024
# Verify that compute node #2 only has VCPU and MEMORY_MB listed in the
# resource requests
$.allocation_requests..allocations["$ENVIRON['CN2_UUID']"].resources:
VCPU: 1
MEMORY_MB: 1024
# Verify that shared storage provider only has DISK_GB listed in the
# resource requests, but is listed twice
$.allocation_requests..allocations["$ENVIRON['SS_UUID']"].resources[DISK_GB]: [100, 100]
- name: get allocation candidates cache headers
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100
request_headers:
# microversion 1.15 to cause cache headers
openstack-api-version: placement 1.15
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: get allocation candidates with limit
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&limit=1
status: 200
request_headers:
openstack-api-version: placement 1.16
response_json_paths:
$.allocation_requests.`len`: 1
- name: get allocation candidates with required traits in old version
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=HW_CPU_X86_SSE
status: 400
request_headers:
openstack-api-version: placement 1.16
response_strings:
- Invalid query string parameters
- "'required' was unexpected"
- name: get allocation candidates without traits summary in old version
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100
status: 200
request_headers:
openstack-api-version: placement 1.16
response_json_paths:
$.provider_summaries["$ENVIRON['CN1_UUID']"].`len`: 1
$.provider_summaries["$ENVIRON['CN2_UUID']"].`len`: 1
- name: get allocation candidates with invalid trait
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=INVALID_TRAIT
status: 400
request_headers:
openstack-api-version: placement 1.17
response_strings:
- No such trait(s)
- name: get allocation candidates with empty required value
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=
status: 400
request_headers:
openstack-api-version: placement 1.17
response_strings:
- "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,CUSTOM_MAGIC."
- name: get allocation candidates with empty required value 1.22
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=
status: 400
request_headers:
openstack-api-version: placement 1.22
response_strings:
- "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,!CUSTOM_MAGIC."
- name: get allocation candidates with invalid required value
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=,,
status: 400
request_headers:
openstack-api-version: placement 1.17
response_strings:
- "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,CUSTOM_MAGIC."
- name: get allocation candidates with forbidden trait pre-forbidden
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=!CUSTOM_MAGIC
status: 400
request_headers:
openstack-api-version: placement 1.17
response_strings:
- "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,CUSTOM_MAGIC."
- name: get allocation candidates with required trait
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=HW_CPU_X86_SSE
status: 200
request_headers:
openstack-api-version: placement 1.17
response_json_paths:
$.allocation_requests.`len`: 1
$.provider_summaries.`len`: 2
$.provider_summaries["$ENVIRON['CN1_UUID']"].`len`: 2
$.provider_summaries["$ENVIRON['CN1_UUID']"].traits.`sorted`:
- HW_CPU_X86_SSE
- HW_CPU_X86_SSE2
- name: get allocation candidates with forbidden trait
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=!HW_CPU_X86_SSE
status: 200
request_headers:
openstack-api-version: placement 1.22
response_json_paths:
# There are no allocations for CN1
$.allocation_requests.`len`: 1
$.allocation_requests[0].allocations.`len`: 2
$.allocation_requests[0].allocations["$ENVIRON['CN2_UUID']"].resources.VCPU: 1
$.allocation_requests[0].allocations["$ENVIRON['CN2_UUID']"].resources.MEMORY_MB: 1024
$.allocation_requests[0].allocations["$ENVIRON['SS_UUID']"].resources.DISK_GB: 100
- name: get allocation candidates with multiple required traits
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=HW_CPU_X86_SSE,HW_CPU_X86_SSE2
status: 200
request_headers:
openstack-api-version: placement 1.17
response_json_paths:
$.allocation_requests.`len`: 1
$.provider_summaries.`len`: 2
$.provider_summaries["$ENVIRON['CN1_UUID']"].`len`: 2
$.provider_summaries["$ENVIRON['CN1_UUID']"].traits.`sorted`:
- HW_CPU_X86_SSE
- HW_CPU_X86_SSE2
- name: get allocation candidates with required trait and no matching
GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=HW_CPU_X86_SSE3
status: 200
request_headers:
openstack-api-version: placement 1.17
response_json_paths:
$.allocation_requests.`len`: 0
$.provider_summaries.`len`: 0
# Before microversion 1.27, the ``provider_summaries`` field in the response
# of the ``GET /allocation_candidates`` API included inventories of resource
# classes that are requested.
- name: get allocation candidates provider summaries with requested resource
GET: /allocation_candidates?resources=VCPU:1
status: 200
request_headers:
openstack-api-version: placement 1.26
response_json_paths:
$.allocation_requests.`len`: 2
$.provider_summaries.`len`: 2
$.provider_summaries["$ENVIRON['CN1_UUID']"].resources.`len`: 1
$.provider_summaries["$ENVIRON['CN1_UUID']"].resources:
VCPU:
capacity: 384 # 16.0 * 24
used: 0
$.provider_summaries["$ENVIRON['CN2_UUID']"].resources.`len`: 1
$.provider_summaries["$ENVIRON['CN2_UUID']"].resources:
VCPU:
capacity: 384 # 16.0 * 24
used: 0
# From microversion 1.27, the ``provider_summaries`` field includes
# all the resource class inventories regardless of whether it is requested.
- name: get allocation candidates provider summaries with all resources
GET: /allocation_candidates?resources=VCPU:1
status: 200
request_headers:
openstack-api-version: placement 1.27
response_json_paths:
$.allocation_requests.`len`: 2
$.provider_summaries.`len`: 2
$.provider_summaries["$ENVIRON['CN1_UUID']"].resources.`len`: 2
$.provider_summaries["$ENVIRON['CN1_UUID']"].resources:
VCPU:
capacity: 384 # 16.0 * 24
used: 0
MEMORY_MB:
capacity: 196608 # 1.5 * 128G
used: 0
$.provider_summaries["$ENVIRON['CN2_UUID']"].resources.`len`: 2
$.provider_summaries["$ENVIRON['CN2_UUID']"].resources:
VCPU:
capacity: 384 # 16.0 * 24
used: 0
MEMORY_MB:
capacity: 196608 # 1.5 * 128G
used: 0
# Before microversion 1.29, no root/parent uuid is included
- name: get allocation candidates no root or parent uuid
GET: /allocation_candidates?resources=VCPU:1
status: 200
request_headers:
openstack-api-version: placement 1.28
response_json_paths:
$.allocation_requests.`len`: 2
$.provider_summaries.`len`: 2
$.provider_summaries.["$ENVIRON['CN1_UUID']"].`len`: 2
$.provider_summaries.["$ENVIRON['CN2_UUID']"].`len`: 2
- name: get allocation candidates with root and parent uuid
GET: /allocation_candidates?resources=VCPU:1
status: 200
request_headers:
openstack-api-version: placement 1.29
response_json_paths:
$.allocation_requests.`len`: 2
$.provider_summaries.`len`: 10
$.provider_summaries.["$ENVIRON['CN1_UUID']"].`len`: 4
$.provider_summaries.["$ENVIRON['CN2_UUID']"].`len`: 4
$.provider_summaries.["$ENVIRON['CN1_UUID']"].parent_provider_uuid: null
$.provider_summaries.["$ENVIRON['CN1_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']"
$.provider_summaries.["$ENVIRON['NUMA1_1_UUID']"].parent_provider_uuid: "$ENVIRON['CN1_UUID']"
$.provider_summaries.["$ENVIRON['NUMA1_1_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']"
$.provider_summaries.["$ENVIRON['NUMA1_2_UUID']"].parent_provider_uuid: "$ENVIRON['CN1_UUID']"
$.provider_summaries.["$ENVIRON['NUMA1_2_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']"
$.provider_summaries.["$ENVIRON['PF1_1_UUID']"].parent_provider_uuid: "$ENVIRON['NUMA1_1_UUID']"
$.provider_summaries.["$ENVIRON['PF1_1_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']"
$.provider_summaries.["$ENVIRON['PF1_2_UUID']"].parent_provider_uuid: "$ENVIRON['NUMA1_2_UUID']"
$.provider_summaries.["$ENVIRON['PF1_2_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']"
# Before microversion 1.29, it isn't aware of nested providers.
# Namely, it can return non-root providers for allocation candidates,
- name: get allocation candidates only nested provider old microversion
GET: /allocation_candidates?resources=SRIOV_NET_VF:4
status: 200
request_headers:
openstack-api-version: placement 1.28
response_json_paths:
$.allocation_requests.`len`: 4
$.provider_summaries.`len`: 4
- name: get allocation candidates only nested provider new microversion
GET: /allocation_candidates?resources=SRIOV_NET_VF:4
status: 200
request_headers:
openstack-api-version: placement 1.29
response_json_paths:
$.allocation_requests.`len`: 4
$.provider_summaries.`len`: 10
# ...but it can't return combinations of providers in a tree.
- name: get allocation candidates root and nested old microversion
GET: /allocation_candidates?resources=VCPU:1,SRIOV_NET_VF:4
status: 200
request_headers:
openstack-api-version: placement 1.28
response_json_paths:
$.allocation_requests.`len`: 0
$.provider_summaries.`len`: 0
- name: get allocation candidates root and nested new microversion
GET: /allocation_candidates?resources=VCPU:1,SRIOV_NET_VF:4
status: 200
request_headers:
openstack-api-version: placement 1.29
response_json_paths:
$.allocation_requests.`len`: 4
$.provider_summaries.`len`: 10
$.allocation_requests..allocations["$ENVIRON['CN1_UUID']"].resources.VCPU: [1, 1]
$.allocation_requests..allocations["$ENVIRON['PF1_1_UUID']"].resources.SRIOV_NET_VF: 4
$.allocation_requests..allocations["$ENVIRON['PF1_2_UUID']"].resources.SRIOV_NET_VF: 4
$.allocation_requests..allocations["$ENVIRON['CN2_UUID']"].resources.VCPU: [1, 1]
$.allocation_requests..allocations["$ENVIRON['PF2_1_UUID']"].resources.SRIOV_NET_VF: 4
$.allocation_requests..allocations["$ENVIRON['PF2_2_UUID']"].resources.SRIOV_NET_VF: 4
# Make sure that old microversions can return combinations where
# sharing providers are involved
- name: get allocation candidates shared and nested old microversion
GET: /allocation_candidates?resources=DISK_GB:10,SRIOV_NET_VF:4
status: 200
request_headers:
openstack-api-version: placement 1.28
response_json_paths:
$.allocation_requests.`len`: 4
$.provider_summaries.`len`: 5

View File

@ -1,130 +0,0 @@
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
content-type: application/json
openstack-api-version: placement 1.12
tests:
- name: put an allocation listish
PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
status: 400
response_strings:
- JSON does not validate
- name: put resource provider not uuid
PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9
data:
allocations:
nice_house_friend:
resources:
VCPU: 1
DISK_GB: 20
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 400
response_strings:
- JSON does not validate
- does not match any of the regexes
- name: put resource class not valid
PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
vcpu: 1
DISK_GB: 20
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 400
response_strings:
- JSON does not validate
- does not match any of the regexes
- name: put empty allocations
PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9
data:
allocations: {}
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 400
response_strings:
- JSON does not validate
- does not have enough properties
- name: put unused field
PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
VCPU: 1
DISK_GB: 20
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
bad_field: moo
status: 400
response_strings:
- JSON does not validate
- name: create the resource provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 201
- name: set some inventory
PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_provider_generation: 0
inventories:
DISK_GB:
total: 2048
min_unit: 10
max_unit: 1024
VCPU:
total: 96
status: 200
- name: put an allocation dictish
PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
VCPU: 1
DISK_GB: 20
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 204
- name: get that allocation
GET: $LAST_URL
- name: put that same allocation back
PUT: $LAST_URL
data:
# there's a generation in allocations, ignored
allocations: $RESPONSE['$.allocations']
# project_id and user_id not in the get response so we add it
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 204

View File

@ -1,152 +0,0 @@
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
openstack-api-version: placement 1.8
tests:
- name: put an allocation no project_id or user_id
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
status: 400
response_strings:
- Failed validating 'required' in schema
- name: put an allocation no project_id
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
user_id: $ENVIRON['USER_ID']
status: 400
response_strings:
- Failed validating 'required' in schema
- name: put an allocation no user_id
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
status: 400
response_strings:
- Failed validating 'required' in schema
- name: put an allocation project_id less than min length
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
project_id: ""
user_id: $ENVIRON['USER_ID']
status: 400
response_strings:
- "Failed validating 'minLength'"
- name: put an allocation user_id less than min length
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: ""
status: 400
response_strings:
- "Failed validating 'minLength'"
- name: put an allocation project_id exceeds max length
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
project_id: 78725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b1
user_id: $ENVIRON['USER_ID']
status: 400
response_strings:
- "Failed validating 'maxLength'"
- name: put an allocation user_id exceeds max length
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: 78725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b1
status: 400
response_strings:
- "Failed validating 'maxLength'"
- name: create the resource provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 201
- name: post some inventory
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 2048
min_unit: 10
max_unit: 1024
status: 201
- name: put an allocation
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
status: 204

View File

@ -1,255 +0,0 @@
fixtures:
- AllocationFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
content-type: application/json
openstack-api-version: placement 1.28
#
# Scenarios to test
# Start with no consumers
# old, no CG = success, consumer gets created
# new, no CG = fail, due to schema
# new, CG=None = success, consumer gets created
# new, CG=<any> = fail
# Create an allocation, and with it, a consumer
# Now create another allocation
# old, no CG = success
# new, CG=None = fail
# new, CG !match = fail
# new, get CG from /allocations
# new, CG matches = success
tests:
- name: old version no gen no existing
PUT: /allocations/11111111-1111-1111-1111-111111111111
request_headers:
openstack-api-version: placement 1.27
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
status: 204
- name: new version no gen no existing
PUT: /allocations/22222222-2222-2222-2222-222222222222
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
status: 400
response_strings:
- JSON does not validate
- name: new version gen is not null no existing
PUT: /allocations/22222222-2222-2222-2222-222222222222
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 5
status: 409
response_strings:
- consumer generation conflict
- expected null but got 5
response_json_paths:
$.errors[0].code: placement.concurrent_update
- name: new version gen is None no existing
PUT: /allocations/22222222-2222-2222-2222-222222222222
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: null
status: 204
- name: new version any gen no existing
PUT: /allocations/33333333-3333-3333-3333-333333333333
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 33
status: 409
response_strings:
- consumer generation conflict
# Now create an allocation for a specific consumer
- name: put an allocation
PUT: /allocations/44444444-4444-4444-4444-444444444444
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: null
status: 204
- name: new version no gen existing
PUT: /allocations/44444444-4444-4444-4444-444444444444
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: null
status: 409
response_strings:
- consumer generation conflict
- name: get the current consumer generation
GET: /allocations/44444444-4444-4444-4444-444444444444
status: 200
- name: new version matching gen existing
PUT: /allocations/44444444-4444-4444-4444-444444444444
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: $HISTORY["get the current consumer generation"].$RESPONSE["consumer_generation"]
status: 204
- name: new version mismatch gen existing
PUT: /allocations/44444444-4444-4444-4444-444444444444
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 12
status: 409
response_strings:
- consumer generation conflict
response_json_paths:
$.errors[0].code: placement.concurrent_update
- name: old version no gen existing
PUT: /allocations/44444444-4444-4444-4444-444444444444
request_headers:
openstack-api-version: placement 1.27
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 10
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
status: 204
- name: new version serialization contains consumer generation
GET: /allocations/44444444-4444-4444-4444-444444444444
status: 200
response_json_paths:
$.consumer_generation: /^\d+$/
- name: empty allocations dict now possible in PUT /allocations/{consumer_uuid}
PUT: /allocations/44444444-4444-4444-4444-444444444444
data:
allocations: {}
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: $HISTORY["new version serialization contains consumer generation"].$RESPONSE["consumer_generation"]
status: 204
- name: should now return no allocations for this consumer
GET: /allocations/44444444-4444-4444-4444-444444444444
status: 200
response_json_paths:
$.allocations.`len`: 0
# The following tests cover cases where we are putting allocations to
# multiple resource providers from one consumer uuid, both a brand new
# consumer and an existing one.
- name: create shared disk
POST: /resource_providers
data:
name: shared_disker
uuid: 8aa83304-4b6d-4a23-b954-06d8b36b206a
- name: trait that disk
PUT: /resource_providers/8aa83304-4b6d-4a23-b954-06d8b36b206a/traits
data:
resource_provider_generation: $RESPONSE['$.generation']
traits:
- MISC_SHARES_VIA_AGGREGATE
- STORAGE_DISK_SSD
- name: set disk inventory
PUT: /resource_providers/8aa83304-4b6d-4a23-b954-06d8b36b206a/inventories
data:
inventories:
DISK_GB:
total: 5000
resource_provider_generation: $RESPONSE['$.resource_provider_generation']
- name: disk in aggregate
PUT: /resource_providers/8aa83304-4b6d-4a23-b954-06d8b36b206a/aggregates
data:
resource_provider_generation: $RESPONSE['$.resource_provider_generation']
aggregates:
- 7fade9e1-ab01-4d1b-84db-ac74f740bb42
- name: compute in aggregate
PUT: /resource_providers/$ENVIRON['RP_UUID']/aggregates
request_headers:
# avoid generation in aggregates
openstack-api-version: placement 1.10
data:
- 7fade9e1-ab01-4d1b-84db-ac74f740bb42
- name: get candidates with shared
GET: /allocation_candidates?resources=VCPU:1,DISK_GB:200&required=STORAGE_DISK_SSD
response_json_paths:
$.allocation_requests.`len`: 1
$.allocation_requests[0].allocations['$ENVIRON["RP_UUID"]'].resources.VCPU: 1
$.allocation_requests[0].allocations['8aa83304-4b6d-4a23-b954-06d8b36b206a'].resources.DISK_GB: 200
- name: put that allocation to new consumer
PUT: /allocations/55555555-5555-5555-5555-555555555555
data:
allocations: $RESPONSE['$.allocation_requests[0].allocations']
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: null
status: 204
- name: put that allocation to existing consumer
PUT: /allocations/22222222-2222-2222-2222-222222222222
data:
allocations: $HISTORY['get candidates with shared'].$RESPONSE['$.allocation_requests[0].allocations']
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
# we just happen to know this is supposed to be 1 here, so shortcutting
consumer_generation: 1
status: 204

View File

@ -1,97 +0,0 @@
# Bug 1714072 describes a situation where a resource provider is present in the
# body of an allocation, but the resources object is empty. There should be at
# least one resource class and value pair. If there is not a 400 response
# should be returned.
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
content-type: application/json
# Default to <= 1.11 so the PUT /allocations in here that use the
# older list-ish format continue to work.
openstack-api-version: placement 1.11
tests:
- name: create a resource provider
POST: /resource_providers
data:
name: an rp
status: 201
- name: get resource provider
GET: $LOCATION
status: 200
- name: add inventory to an rp
PUT: $RESPONSE['$.links[?rel = "inventories"].href']
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 24
MEMORY_MB:
total: 1024
status: 200
- name: put a successful allocation
PUT: /allocations/c9f0186b-64f8-44fb-b6c9-83008d8d6940
data:
allocations:
- resource_provider:
uuid: $HISTORY['get resource provider'].$RESPONSE['$.uuid']
resources:
VCPU: 1
MEMORY_MB: 1
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 204
- name: fail with empty resources
PUT: /allocations/c9f0186b-64f8-44fb-b6c9-83008d8d6940
data:
allocations:
- resource_provider:
uuid: $HISTORY['get resource provider'].$RESPONSE['$.uuid']
resources: {}
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 400
response_strings:
- does not have enough properties
# The next two tests confirm that the bug identified by
# this file's name is not present in the PUT /allocations/{consumer_uuid}
# format added by microversion 1.12.
- name: put a successful dictish allocation
PUT: /allocations/c9f0186b-64f8-44fb-b6c9-83008d8d6940
request_headers:
openstack-api-version: placement 1.12
data:
allocations:
$HISTORY['get resource provider'].$RESPONSE['$.uuid']:
resources:
VCPU: 1
MEMORY_MB: 1
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 204
- name: fail with empty resources dictish
PUT: /allocations/c9f0186b-64f8-44fb-b6c9-83008d8d6940
request_headers:
openstack-api-version: placement 1.12
data:
allocations:
$HISTORY['get resource provider'].$RESPONSE['$.uuid']:
resources: {}
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 400
response_strings:
- does not have enough properties

View File

@ -1,71 +0,0 @@
# Demonstrate part of bug 1778591, where when creating an allocation for
# a new consumer will create the consumer and its generation, but if it
# fails the subsequent request requires generation 0, not null, which is
# not what we expect. This is made more problematic in the we cannot query
# the generation when the consumer has no allocations.
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
# consumer generations were added in 1.28
openstack-api-version: placement 1.28
content-type: application/json
accept: application/json
tests:
# create a simple resource provider with limited inventory
- name: create provider
POST: /resource_providers
data:
name: simple
uuid: $ENVIRON['RP_UUID']
- name: set inventory
PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 4
- name: fail allocations new consumer, bad capacity
PUT: /allocations/88888888-8888-8888-8888-888888888888
data:
allocations:
"$ENVIRON['RP_UUID']":
resources:
VCPU: 9999
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: null
status: 409
response_strings:
- The requested amount would exceed the capacity
- name: try to get consumer generation
desc: when there are no allocations we can't see the generation of a consumer
GET: /allocations/88888888-8888-8888-8888-888888888888
response_json_paths:
# check entire response
$:
allocations: {}
# The failure to allocate above should have deleted the auto-created consumer,
# so when we retry the allocation here, we should be able to use the
# appropriate null generation to indicate this is a new consumer
- name: retry allocations new consumer, still null gen
PUT: /allocations/88888888-8888-8888-8888-888888888888
data:
allocations:
"$ENVIRON['RP_UUID']":
resources:
VCPU: 1
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: null
status: 204

View File

@ -1,70 +0,0 @@
# Test to see if capacity check in POST allocations works as expected.
# It did not, due to bug 1778743, but it is now fixed.
fixtures:
- APIFixture
defaults:
request_headers:
# 1.28 provides consumer generation in allocations
openstack-api-version: placement 1.28
x-auth-token: admin
content-type: application/json
accept: application/json
tests:
- name: create an rp
POST: /resource_providers
data:
uuid: 4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55
name: rp1
- name: add vcpu inventory
PUT: /resource_providers/4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55/inventories
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 2
- name: post multiple allocations
desc: this should 409 because we're allocating 3 VCPU!
POST: /allocations
data:
a6ace019-f230-4dcc-8a76-36d27b9c2257:
allocations:
4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55:
resources:
VCPU: 1
project_id: a2cec092-0f67-42ed-b870-f3925cc5c6d4
user_id: d28385b2-7860-4055-b32d-4cd1057cd5f2
consumer_generation: null
2e613d4f-f5b2-4956-bd61-ea5be6600f80:
allocations:
4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55:
resources:
VCPU: 1
project_id: a2cec092-0f67-42ed-b870-f3925cc5c6d4
user_id: d28385b2-7860-4055-b32d-4cd1057cd5f2
consumer_generation: null
2b3abca1-b72b-4817-9217-397f19b52c92:
allocations:
4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55:
resources:
VCPU: 1
project_id: a2cec092-0f67-42ed-b870-f3925cc5c6d4
user_id: d28385b2-7860-4055-b32d-4cd1057cd5f2
consumer_generation: null
status: 409
- name: check usage
GET: /resource_providers/4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55/usages
response_json_paths:
$.usages.VCPU: 0
- name: check inventory
GET: /resource_providers/4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55/inventories
response_json_paths:
$.inventories.VCPU.total: 2

View File

@ -1,102 +0,0 @@
# Test that it's possible to change the project or user identifier for a
# consumer by specifying a different project_id or user_id value in the payload
# of both a PUT /allocations/{consumer_uuid} or POST /allocations
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
content-type: application/json
openstack-api-version: placement 1.28
tests:
- name: create cn1
POST: /resource_providers
data:
name: cn1
status: 200
- name: add inventory
PUT: $HISTORY['create cn1'].$RESPONSE['links[?rel = "inventories"].href']
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 16
MEMORY_MB:
total: 2048
- name: create allocations for consumer1
PUT: /allocations/11111111-1111-1111-1111-111111111111
data:
allocations:
$HISTORY['create cn1'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 2
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: null
status: 204
- name: get allocations for consumer1
GET: /allocations/11111111-1111-1111-1111-111111111111
status: 200
response_json_paths:
$.project_id: $ENVIRON['PROJECT_ID']
$.user_id: $ENVIRON['USER_ID']
- name: change the project for consumer1
PUT: /allocations/11111111-1111-1111-1111-111111111111
data:
allocations:
$HISTORY['create cn1'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 2
project_id: $ENVIRON['PROJECT_ID_ALT']
user_id: $ENVIRON['USER_ID']
consumer_generation: 1
status: 204
- name: check consumer1's project is now the other project
GET: /allocations/11111111-1111-1111-1111-111111111111
status: 200
response_json_paths:
$.project_id: $ENVIRON['PROJECT_ID_ALT']
$.user_id: $ENVIRON['USER_ID']
- name: create allocations for two consumers
POST: /allocations
data:
11111111-1111-1111-1111-111111111111:
allocations:
$HISTORY['create cn1'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 1
consumer_generation: 2
# Change consumer1's project back to the original PROJECT_ID
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
22222222-2222-2222-2222-222222222222:
allocations:
$HISTORY['create cn1'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 1
consumer_generation: null
project_id: $ENVIRON['PROJECT_ID_ALT']
user_id: $ENVIRON['USER_ID_ALT']
status: 204
- name: check consumer1's project is back to the original project
GET: /allocations/11111111-1111-1111-1111-111111111111
status: 200
response_json_paths:
$.project_id: $ENVIRON['PROJECT_ID']
$.user_id: $ENVIRON['USER_ID']

View File

@ -1,76 +0,0 @@
# This tests the individual CRUD operations on
# /allocations* and /resource_providers/{uuid}/allocations using a non-admin
# user with an open policy configuration. The response validation is
# intentionally minimal.
fixtures:
- OpenPolicyFixture
defaults:
request_headers:
x-auth-token: user
accept: application/json
content-type: application/json
openstack-api-version: placement latest
tests:
- name: create resource provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 200
- name: set some inventory
PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_provider_generation: 0
inventories:
DISK_GB:
total: 2048
min_unit: 10
max_unit: 1024
VCPU:
total: 96
status: 200
- name: create allocation for consumer
PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9
data:
allocations:
$ENVIRON['RP_UUID']:
resources:
VCPU: 1
DISK_GB: 20
consumer_generation: null
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 204
- name: list allocations for consumer
GET: $LAST_URL
- name: list allocations for resource provider
GET: /resource_providers/$ENVIRON['RP_UUID']/allocations
- name: manage allocations
POST: /allocations
data:
a0b15655-273a-4b3d-9792-2e579b7d5ad9:
consumer_generation: 1
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
allocations:
$ENVIRON['RP_UUID']:
resources:
VCPU: 8
DISK_GB: 40
status: 204
- name: delete allocation for consumer
DELETE: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9
status: 204

View File

@ -1,399 +0,0 @@
# Test that it possible to POST multiple allocations to /allocations to
# simultaneously make changes, including removing resources for a consumer if
# the allocations are empty.
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
content-type: application/json
openstack-api-version: placement 1.13
tests:
- name: create compute one
POST: /resource_providers
data:
name: compute01
status: 201
- name: rp compute01
desc: provide a reference for later reuse
GET: $LOCATION
- name: create compute two
POST: /resource_providers
data:
name: compute02
status: 201
- name: rp compute02
desc: provide a reference for later reuse
GET: $LOCATION
- name: create shared disk
POST: /resource_providers
data:
name: storage01
status: 201
- name: rp storage01
desc: provide a reference for later reuse
GET: $LOCATION
- name: inventory compute01
PUT: $HISTORY['rp compute01'].$RESPONSE['links[?rel = "inventories"].href']
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 16
MEMORY_MB:
total: 2048
- name: inventory compute02
PUT: $HISTORY['rp compute02'].$RESPONSE['links[?rel = "inventories"].href']
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 16
MEMORY_MB:
total: 2048
- name: inventory storage01
PUT: $HISTORY['rp storage01'].$RESPONSE['links[?rel = "inventories"].href']
data:
resource_provider_generation: 0
inventories:
DISK_GB:
total: 4096
- name: confirm only POST
GET: /allocations
status: 405
response_headers:
allow: POST
- name: 404 on older 1.12 microversion post
POST: /allocations
request_headers:
openstack-api-version: placement 1.12
status: 404
- name: post allocations two consumers
POST: /allocations
data:
$ENVIRON['INSTANCE_UUID']:
allocations:
$HISTORY['rp compute02'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 2
$HISTORY['rp storage01'].$RESPONSE['uuid']:
resources:
DISK_GB: 5
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
$ENVIRON['MIGRATION_UUID']:
allocations:
$HISTORY['rp compute01'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 2
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
status: 204
- name: get allocations for instance consumer
GET: /allocations/$ENVIRON['INSTANCE_UUID']
request_headers:
# We want to inspect the consumer generations...
openstack-api-version: placement 1.28
response_json_paths:
$.allocations["$HISTORY['rp compute02'].$RESPONSE['uuid']"].resources[MEMORY_MB]: 1024
$.allocations["$HISTORY['rp compute02'].$RESPONSE['uuid']"].resources[VCPU]: 2
$.allocations["$HISTORY['rp storage01'].$RESPONSE['uuid']"].resources[DISK_GB]: 5
$.consumer_generation: 1
$.project_id: $ENVIRON['PROJECT_ID']
$.user_id: $ENVIRON['USER_ID']
- name: get allocations for migration consumer
GET: /allocations/$ENVIRON['MIGRATION_UUID']
request_headers:
# We want to inspect the consumer generations...
openstack-api-version: placement 1.28
response_json_paths:
$.allocations["$HISTORY['rp compute01'].$RESPONSE['uuid']"].resources[MEMORY_MB]: 1024
$.allocations["$HISTORY['rp compute01'].$RESPONSE['uuid']"].resources[VCPU]: 2
$.consumer_generation: 1
$.project_id: $ENVIRON['PROJECT_ID']
$.user_id: $ENVIRON['USER_ID']
- name: confirm usages
GET: /usages?project_id=$ENVIRON['PROJECT_ID']
response_json_paths:
$.usages.DISK_GB: 5
$.usages.VCPU: 4
$.usages.MEMORY_MB: 2048
- name: clear and set allocations
POST: /allocations
data:
$ENVIRON['INSTANCE_UUID']:
allocations:
$HISTORY['rp compute02'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 2
$HISTORY['rp storage01'].$RESPONSE['uuid']:
resources:
DISK_GB: 5
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
$ENVIRON['MIGRATION_UUID']:
allocations: {}
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
status: 204
- name: confirm usages after clear
GET: /usages?project_id=$ENVIRON['PROJECT_ID']
response_json_paths:
$.usages.DISK_GB: 5
$.usages.VCPU: 2
$.usages.MEMORY_MB: 1024
- name: post allocations two users
POST: /allocations
data:
$ENVIRON['INSTANCE_UUID']:
allocations:
$HISTORY['rp compute02'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 2
$HISTORY['rp storage01'].$RESPONSE['uuid']:
resources:
DISK_GB: 5
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
# We must use a fresh consumer id with the alternate project id info.
# A previously seen consumer id will be assumed to always have the same
# project and user.
$ENVIRON['CONSUMER_UUID']:
allocations:
$HISTORY['rp compute01'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 2
project_id: $ENVIRON['PROJECT_ID_ALT']
user_id: $ENVIRON['USER_ID_ALT']
status: 204
- name: confirm usages user a
GET: /usages?project_id=$ENVIRON['PROJECT_ID']
response_json_paths:
$.usages.`len`: 3
$.usages.DISK_GB: 5
$.usages.VCPU: 2
$.usages.MEMORY_MB: 1024
- name: confirm usages user b
GET: /usages?project_id=$ENVIRON['PROJECT_ID_ALT']
response_json_paths:
$.usages.`len`: 2
$.usages.VCPU: 2
$.usages.MEMORY_MB: 1024
- name: fail allocations over capacity
POST: /allocations
data:
$ENVIRON['INSTANCE_UUID']:
allocations:
$HISTORY['rp compute02'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 2
$HISTORY['rp storage01'].$RESPONSE['uuid']:
resources:
DISK_GB: 5
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
$ENVIRON['CONSUMER_UUID']:
allocations:
$HISTORY['rp compute01'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 2049
VCPU: 2
project_id: $ENVIRON['PROJECT_ID_ALT']
user_id: $ENVIRON['USER_ID_ALT']
status: 409
response_strings:
- The requested amount would exceed the capacity
- name: fail allocations deep schema violate
desc: no schema yet
POST: /allocations
data:
$ENVIRON['INSTANCE_UUID']:
allocations:
$HISTORY['rp compute02'].$RESPONSE['uuid']:
cow: moo
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
status: 400
- name: fail allocations shallow schema violate
desc: no schema yet
POST: /allocations
data:
$ENVIRON['INSTANCE_UUID']:
cow: moo
status: 400
- name: fail resource provider not exist
POST: /allocations
data:
$ENVIRON['INSTANCE_UUID']:
allocations:
# this rp does not exist
'c42def7b-498b-4442-9502-c7970b14bea4':
resources:
MEMORY_MB: 1024
VCPU: 2
$HISTORY['rp storage01'].$RESPONSE['uuid']:
resources:
DISK_GB: 5
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
status: 400
response_strings:
- that does not exist
- name: fail resource class not in inventory
POST: /allocations
data:
$ENVIRON['INSTANCE_UUID']:
allocations:
$HISTORY['rp compute02'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 2
PCI_DEVICE: 1
$HISTORY['rp storage01'].$RESPONSE['uuid']:
resources:
DISK_GB: 5
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
status: 409
response_strings:
- "Inventory for 'PCI_DEVICE' on"
- name: fail resource class not exist
POST: /allocations
data:
$ENVIRON['INSTANCE_UUID']:
allocations:
$HISTORY['rp compute02'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 2
CUSTOM_PONY: 1
$HISTORY['rp storage01'].$RESPONSE['uuid']:
resources:
DISK_GB: 5
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
status: 400
response_strings:
- No such resource class CUSTOM_PONY
- name: fail missing consumer generation >= 1.28
POST: /allocations
request_headers:
openstack-api-version: placement 1.28
data:
$ENVIRON['INSTANCE_UUID']:
allocations:
$HISTORY['rp compute02'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 2
$HISTORY['rp storage01'].$RESPONSE['uuid']:
resources:
DISK_GB: 5
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
$ENVIRON['CONSUMER_UUID']:
allocations:
$HISTORY['rp compute01'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 2049
VCPU: 2
project_id: $ENVIRON['PROJECT_ID_ALT']
user_id: $ENVIRON['USER_ID_ALT']
status: 400
response_strings:
- JSON does not validate
- name: fail incorrect consumer generation >= 1.28
POST: /allocations
request_headers:
openstack-api-version: placement 1.28
data:
$ENVIRON['INSTANCE_UUID']:
allocations:
$HISTORY['rp compute02'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 1
$HISTORY['rp storage01'].$RESPONSE['uuid']:
resources:
DISK_GB: 4
consumer_generation: 1
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
$ENVIRON['CONSUMER_UUID']:
allocations:
$HISTORY['rp compute01'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 1
consumer_generation: 1
project_id: $ENVIRON['PROJECT_ID_ALT']
user_id: $ENVIRON['USER_ID_ALT']
status: 409
response_strings:
- consumer generation conflict - expected 3 but got 1
- name: change allocations for existing providers >= 1.28
POST: /allocations
request_headers:
openstack-api-version: placement 1.28
data:
$ENVIRON['INSTANCE_UUID']:
allocations:
$HISTORY['rp compute02'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 1
$HISTORY['rp storage01'].$RESPONSE['uuid']:
resources:
DISK_GB: 4
consumer_generation: 3
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
$ENVIRON['CONSUMER_UUID']:
allocations:
$HISTORY['rp compute01'].$RESPONSE['uuid']:
resources:
MEMORY_MB: 1024
VCPU: 1
consumer_generation: 1
project_id: $ENVIRON['PROJECT_ID_ALT']
user_id: $ENVIRON['USER_ID_ALT']
status: 204

View File

@ -1,509 +0,0 @@
# Tests of allocations API
#
# Note(cdent): Consumer ids are not validated against anything to
# confirm that they are associated with anything real. This is
# by design.
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
tests:
- name: get allocations no consumer is 405
GET: /allocations
status: 405
response_json_paths:
$.errors[0].title: Method Not Allowed
- name: get allocations is empty dict
GET: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
response_json_paths:
$.allocations: {}
- name: put an allocation no resource provider
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resources:
DISK_GB: 10
status: 400
response_json_paths:
$.errors[0].title: Bad Request
- name: create the resource provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 201
- name: put an allocation no data
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
status: 400
response_json_paths:
$.errors[0].title: Bad Request
- name: put an allocation empty list
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations: []
status: 400
response_strings:
- "Failed validating 'minItems'"
- name: put an allocation violate schema
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
cow: 10
status: 400
response_json_paths:
$.errors[0].title: Bad Request
- name: put an allocation no inventory
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
status: 409
response_json_paths:
$.errors[0].title: Conflict
- name: post some inventory
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 2048
min_unit: 10
max_unit: 1024
status: 201
- name: put an allocation with zero usage
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 0
status: 400
response_strings:
- "JSON does not validate: 0 is less than the minimum of 1"
- Failed validating 'minimum' in schema
- name: put an allocation with omitted usage
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
status: 400
response_strings:
- Failed validating 'required' in schema
- name: put an allocation
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
status: 204
- name: fail to delete that provider
DELETE: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
content-type: application/json
# we need this microversion to get error codes in the response
openstack-api-version: placement 1.23
status: 409
response_strings:
- "Unable to delete resource provider $ENVIRON['RP_UUID']"
response_json_paths:
errors[0].code: placement.resource_provider.inuse
- name: put an allocation different consumer
PUT: /allocations/39715579-2167-4c63-8247-301311cc6703
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
status: 204
- name: check usages after another 10
GET: /resource_providers/$ENVIRON['RP_UUID']/usages
response_json_paths:
$.usages.DISK_GB: 20
# NOTE(cdent): Contravening the spec, we decided that it is
# important to be able to update an existing allocation, so this
# should work but it is important to check the usage.
- name: put allocation again
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 12
status: 204
- name: check usages after 12
GET: /resource_providers/$ENVIRON['RP_UUID']/usages
response_json_paths:
$.usages.DISK_GB: 22
- name: put allocation bad resource class
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
COWS: 12
status: 400
response_strings:
- Unable to allocate inventory for consumer
- No such resource class COWS
response_json_paths:
$.errors[0].title: Bad Request
- name: delete allocation
DELETE: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
status: 204
- name: delete allocation again
DELETE: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
status: 404
response_strings:
- No allocations for consumer '599ffd2d-526a-4b2e-8683-f13ad25f9958'
response_json_paths:
$.errors[0].title: Not Found
- name: delete allocation of unknown consumer id
DELETE: /allocations/da78521f-bf7e-4e6e-9901-3f79bd94d55d
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: redo an allocation
PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
status: 204
- name: add other inventory
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: VCPU
total: 32
min_unit: 1
max_unit: 8
status: 201
- name: multiple allocations
PUT: /allocations/833f0885-f78c-4788-bb2b-3607b0656be7
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 20
VCPU: 4
status: 204
- name: check usages
GET: /resource_providers/$ENVIRON['RP_UUID']/usages
response_json_paths:
$.resource_provider_generation: 7
$.usages.DISK_GB: 40
- name: check allocations for the resource provider
GET: /resource_providers/$ENVIRON['RP_UUID']/allocations
response_json_paths:
$.resource_provider_generation: 7
# allocations are keyed by consumer id, jsonpath-rw needs us
# to quote the uuids or its parser gets confused that maybe
# they are numbers on which math needs to be done.
$.allocations['833f0885-f78c-4788-bb2b-3607b0656be7'].resources.DISK_GB: 20
$.allocations['833f0885-f78c-4788-bb2b-3607b0656be7'].resources.VCPU: 4
$.allocations['599ffd2d-526a-4b2e-8683-f13ad25f9958'].resources.DISK_GB: 10
$.allocations['39715579-2167-4c63-8247-301311cc6703'].resources.DISK_GB: 10
- name: confirm 404 for allocations of bad resource provider
GET: /resource_providers/cb8a3007-b93a-471f-9e1f-4d58355678bd/allocations
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: check allocations by consumer id
GET: /allocations/833f0885-f78c-4788-bb2b-3607b0656be7
response_json_paths:
$.allocations["$ENVIRON['RP_UUID']"].generation: 7
$.allocations["$ENVIRON['RP_UUID']"].resources.DISK_GB: 20
$.allocations["$ENVIRON['RP_UUID']"].resources.VCPU: 4
- name: check allocations by different consumer id
GET: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
response_json_paths:
$.allocations["$ENVIRON['RP_UUID']"].generation: 7
$.allocations["$ENVIRON['RP_UUID']"].resources.DISK_GB: 10
# create another two resource providers to test retrieving
# allocations
- name: create resource provider 1
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: rp1
uuid: 9229b2fc-d556-4e38-9c18-443e4bc6ceae
status: 201
- name: create resource provider 2
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: rp2
uuid: fcfa516a-abbe-45d1-8152-d5225d82e596
status: 201
- name: set inventory on rp1
PUT: /resource_providers/9229b2fc-d556-4e38-9c18-443e4bc6ceae/inventories
request_headers:
content-type: application/json
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 32
max_unit: 32
DISK_GB:
total: 10
max_unit: 10
- name: set inventory on rp2
PUT: /resource_providers/fcfa516a-abbe-45d1-8152-d5225d82e596/inventories
request_headers:
content-type: application/json
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 16
max_unit: 16
DISK_GB:
total: 20
max_unit: 20
status: 200
- name: put allocations on both those providers one
PUT: /allocations/1835b1c9-1c61-45af-9eb3-3e0e9f29487b
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: fcfa516a-abbe-45d1-8152-d5225d82e596
resources:
DISK_GB: 10
VCPU: 8
- resource_provider:
uuid: 9229b2fc-d556-4e38-9c18-443e4bc6ceae
resources:
DISK_GB: 5
VCPU: 16
status: 204
- name: put allocations on both those providers two
PUT: /allocations/75d0f5f7-75d9-458c-b204-f90ac91604ec
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: fcfa516a-abbe-45d1-8152-d5225d82e596
resources:
DISK_GB: 5
VCPU: 4
- resource_provider:
uuid: 9229b2fc-d556-4e38-9c18-443e4bc6ceae
resources:
DISK_GB: 2
VCPU: 8
status: 204
# These headers should not be present in any microversion on PUT
# because there is no response body.
response_forbidden_headers:
- cache-control
- last-modified
- name: get those allocations for consumer
GET: /allocations/1835b1c9-1c61-45af-9eb3-3e0e9f29487b
response_json_paths:
$.allocations.['fcfa516a-abbe-45d1-8152-d5225d82e596'].generation: 3
$.allocations.['fcfa516a-abbe-45d1-8152-d5225d82e596'].resources.DISK_GB: 10
$.allocations.['fcfa516a-abbe-45d1-8152-d5225d82e596'].resources.VCPU: 8
$.allocations.['9229b2fc-d556-4e38-9c18-443e4bc6ceae'].generation: 3
$.allocations.['9229b2fc-d556-4e38-9c18-443e4bc6ceae'].resources.DISK_GB: 5
$.allocations.['9229b2fc-d556-4e38-9c18-443e4bc6ceae'].resources.VCPU: 16
- name: get those allocations for resource provider
GET: /resource_providers/fcfa516a-abbe-45d1-8152-d5225d82e596/allocations
response_json_paths:
$.resource_provider_generation: 3
$.allocations.['75d0f5f7-75d9-458c-b204-f90ac91604ec'].resources.DISK_GB: 5
$.allocations.['75d0f5f7-75d9-458c-b204-f90ac91604ec'].resources.VCPU: 4
$.allocations.['1835b1c9-1c61-45af-9eb3-3e0e9f29487b'].resources.DISK_GB: 10
$.allocations.['1835b1c9-1c61-45af-9eb3-3e0e9f29487b'].resources.VCPU: 8
- name: put allocations on existing consumer with dashless UUID
PUT: /allocations/75d0f5f775d9458cb204f90ac91604ec
request_headers:
content-type: application/json
# Consumer generation
openstack-api-version: placement 1.28
data:
allocations:
fcfa516a-abbe-45d1-8152-d5225d82e596:
resources:
DISK_GB: 1
VCPU: 1
9229b2fc-d556-4e38-9c18-443e4bc6ceae:
resources:
DISK_GB: 1
VCPU: 1
consumer_generation: 1
project_id: 00000000-0000-0000-0000-000000000000
user_id: 00000000-0000-0000-0000-000000000000
status: 204
- name: get allocations on existing consumer with dashed UUID
GET: /allocations/75d0f5f7-75d9-458c-b204-f90ac91604ec
response_json_paths:
$.allocations.['fcfa516a-abbe-45d1-8152-d5225d82e596'].generation: 4
$.allocations.['fcfa516a-abbe-45d1-8152-d5225d82e596'].resources.DISK_GB: 1
$.allocations.['fcfa516a-abbe-45d1-8152-d5225d82e596'].resources.VCPU: 1
$.allocations.['9229b2fc-d556-4e38-9c18-443e4bc6ceae'].generation: 4
$.allocations.['9229b2fc-d556-4e38-9c18-443e4bc6ceae'].resources.DISK_GB: 1
$.allocations.['9229b2fc-d556-4e38-9c18-443e4bc6ceae'].resources.VCPU: 1
- name: put an allocation for a not existing resource provider
PUT: /allocations/75d0f5f7-75d9-458c-b204-f90ac91604ec
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: be8b9cba-e7db-4a12-a386-99b4242167fe
resources:
DISK_GB: 5
VCPU: 4
status: 400
response_strings:
- Allocation for resource provider 'be8b9cba-e7db-4a12-a386-99b4242167fe' that does not exist
response_json_paths:
$.errors[0].title: Bad Request
- name: get allocations for resource provider with cache headers 1.15
GET: /resource_providers/fcfa516a-abbe-45d1-8152-d5225d82e596/allocations
request_headers:
openstack-api-version: placement 1.15
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: get allocations for resource provider without cache headers 1.14
GET: /resource_providers/fcfa516a-abbe-45d1-8152-d5225d82e596/allocations
request_headers:
openstack-api-version: placement 1.14
response_forbidden_headers:
- cache-control
- last-modified
- name: get allocations for consumer with cache headers 1.15
GET: /allocations/1835b1c9-1c61-45af-9eb3-3e0e9f29487b
request_headers:
openstack-api-version: placement 1.15
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: get allocations for consumer without cache headers 1.14
GET: /allocations/1835b1c9-1c61-45af-9eb3-3e0e9f29487b
request_headers:
openstack-api-version: placement 1.14
response_forbidden_headers:
- cache-control
- last-modified
- name: creating allocation with a non UUID consumer fails
PUT: /allocations/not-a-uuid
request_headers:
content-type: application/json
data:
allocations:
- resource_provider:
uuid: fcfa516a-abbe-45d1-8152-d5225d82e596
resources:
DISK_GB: 1
VCPU: 1
status: 400
response_strings:
- Malformed consumer_uuid

View File

@ -1,207 +0,0 @@
#
# Test the basic handling of HTTP (expected response codes and the
# like).
#
fixtures:
- APIFixture
defaults:
request_headers:
# NOTE(cdent): Get past keystone, even though at this stage
# we don't require auth.
x-auth-token: admin
accept: application/json
tests:
- name: 404 at no service
GET: /barnabas
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: error message has request id
GET: /barnabas
status: 404
response_json_paths:
$.errors[0].request_id: /req-[a-fA-F0-9-]+/
- name: error message has default code 1.23
GET: /barnabas
status: 404
request_headers:
openstack-api-version: placement 1.23
response_json_paths:
$.errors[0].code: placement.undefined_code
- name: 404 at no resource provider
GET: /resource_providers/fd0dd55c-6330-463b-876c-31c54e95cb95
status: 404
- name: 405 on bad method at root
DELETE: /
status: 405
response_headers:
allow: GET
response_json_paths:
$.errors[0].title: Method Not Allowed
- name: 200 at home
GET: /
status: 200
- name: 405 on bad method on app
DELETE: /resource_providers
status: 405
response_headers:
allow: /(GET|POST), (POST|GET)/
response_json_paths:
$.errors[0].title: Method Not Allowed
response_strings:
- The method DELETE is not allowed for this resource.
- name: 405 on bad options method on app
OPTIONS: /resource_providers
status: 405
response_headers:
allow: /(GET|POST), (POST|GET)/
response_json_paths:
$.errors[0].title: Method Not Allowed
response_strings:
- The method OPTIONS is not allowed for this resource.
- name: bad accept resource providers
GET: /resource_providers
request_headers:
accept: text/plain
status: 406
- name: complex accept resource providers
GET: /resource_providers
request_headers:
accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
status: 200
response_json_paths:
$.resource_providers: []
- name: post resource provider wrong content-type
POST: /resource_providers
request_headers:
content-type: text/plain
data: I want a resource provider please
status: 415
- name: post resource provider missing content-type
desc: because content-length is set, we should have a content-type
POST: /resource_providers
data: I want a resource provider please
status: 400
response_strings:
- content-type header required
# NOTE(cdent): This is an awkward test. It is not actually testing a
# PUT of a resource provider. It is confirming that a PUT with no
# body, no content-length header and no content-type header will
# reach the desired handler.
- name: PUT resource provider no body
desc: different response string from prior test indicates past content-length requirement
PUT: /resource_providers/d3a64825-8228-4ccb-8a6c-1c6d3eb6a3e8
status: 415
response_strings:
- The media type None is not supported, use application/json
- name: post resource provider schema mismatch
POST: /resource_providers
request_headers:
content-type: application/json
data:
transport: car
color: blue
status: 400
- name: post good resource provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 201
- name: get resource provider wrong accept
GET: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
accept: text/plain
status: 406
response_strings:
- Only application/json is provided
- name: get resource provider complex accept wild match
desc: like a browser, */* should match
GET: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
response_json_paths:
$.uuid: $ENVIRON['RP_UUID']
- name: get resource provider complex accept no match
desc: no */*, no match
GET: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
accept: text/html,application/xhtml+xml,application/xml;q=0.9
status: 406
- name: put poor format resource provider
PUT: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
content-type: text/plain
data: Why U no provide?
status: 415
- name: non inventory sub resource provider path
GET: /resource_providers/7850178f-1807-4512-b135-0b174985405b/cows
request_headers:
accept: application/json
status: 404
response_json_paths:
$.errors[0].title: Not Found
response_strings:
- The resource could not be found.
- name: root at 1.15 has cache headers
GET: /
request_headers:
openstack-api-version: placement 1.15
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: root at 1.14 no cache headers
GET: /
request_headers:
openstack-api-version: placement 1.14
response_forbidden_headers:
- last-modified
- cache-control
- name: test starred accept and errors
GET: /resource_providers/foo
request_headers:
accept: "*/*"
status: 404
response_headers:
content-type: application/json
response_json_paths:
$.errors[0].title: Not Found
- name: bad content length not int
POST: /resource_providers
request_headers:
content-type: application/json
content-length: hi mom
data:
uuid: ce13d7f1-9988-4dfd-8e16-ce071802eb36
status: 400
response_strings:
- content-length header must be an integer

View File

@ -1,38 +0,0 @@
# Test launchpad bug https://bugs.launchpad.net/nova/+bug/1674694
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
tests:
- name: 404 with application/json
GET: /bc8d9d50-7b0d-45ef-839c-e7b5e1c4e8fd
request_headers:
accept: application/json
status: 404
response_headers:
content-type: application/json
response_json_paths:
$.errors[0].status: 404
- name: 404 with no accept
GET: /bc8d9d50-7b0d-45ef-839c-e7b5e1c4e8fd
status: 404
response_headers:
content-type: application/json
response_json_paths:
$.errors[0].status: 404
- name: 404 with other accept
GET: /bc8d9d50-7b0d-45ef-839c-e7b5e1c4e8fd
status: 404
request_headers:
accept: text/html
response_headers:
content-type: /text/html/
response_strings:
- The resource could not be found

View File

@ -1,32 +0,0 @@
#
# Confirm that the noauth handler is causing a 401 when no fake
# token is provided.
#
fixtures:
- APIFixture
defaults:
request_headers:
accept: application/json
tests:
- name: no token gets 200 at root
GET: /
status: 200
- name: with token 200 at root
GET: /
request_headers:
x-auth-token: admin:admin
status: 200
- name: no token gets 401
GET: /resource_providers
status: 401
- name: with token 200
GET: /resource_providers
request_headers:
x-auth-token: admin:admin
status: 200

View File

@ -1,47 +0,0 @@
# Confirm that CORS is present. No complex configuration is done so
# this just tests the basics. Borrowed, in spirit, from
# nova.tests.functional.test_middleware.
fixtures:
- CORSFixture
defaults:
request_headers:
x-auth-token: user
tests:
- name: valid options request
OPTIONS: /
request_headers:
origin: http://valid.example.com
access-control-request-method: GET
status: 200
response_headers:
access-control-allow-origin: http://valid.example.com
- name: invalid options request
OPTIONS: /
request_headers:
origin: http://invalid.example.com
access-control-request-method: GET
status: 200
response_forbidden_headers:
- access-control-allow-origin
- name: valid get request
GET: /
request_headers:
origin: http://valid.example.com
access-control-request-method: GET
status: 200
response_headers:
access-control-allow-origin: http://valid.example.com
- name: invalid get request
GET: /
request_headers:
origin: http://invalid.example.com
access-control-request-method: GET
status: 200
response_forbidden_headers:
- access-control-allow-origin

View File

@ -1,41 +0,0 @@
# Tests of the ensure consumer behaviour for versions of the API before 1.8;
# starting with 1.8, project_id and user_id are required by the
# PUT: /allocations/{consumer_uuid} API.
fixtures:
- AllocationFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
openstack-api-version: placement 1.7
vars:
- &default_incomplete_id 00000000-0000-0000-0000-000000000000
tests:
- name: put an allocation without project/user (1.7)
PUT: /allocations/$ENVIRON['CONSUMER_UUID']
request_headers:
content-type: application/json
openstack-api-version: placement 1.7
data:
allocations:
- resource_provider:
uuid: $ENVIRON['RP_UUID']
resources:
DISK_GB: 10
status: 204
# We now ALWAYS create a consumer record, and if project or user isn't
# specified (as was the case in 1.7) we should get the project/user
# corresponding to the CONF option for incomplete consumers when asking for the
# allocation information at a microversion that shows project/user information
# (1.12+)
- name: get with 1.12 microversion and check project and user are filled
GET: /allocations/$ENVIRON['CONSUMER_UUID']
request_headers:
openstack-api-version: placement 1.12
response_json_paths:
$.project_id: *default_incomplete_id
$.user_id: *default_incomplete_id

View File

@ -1,474 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Tests for granular resource requests
fixtures:
# See the layout diagram in this fixture's docstring in ../fixtures.py
- GranularFixture
defaults:
request_headers:
x-auth-token: admin
content-type: application/json
accept: application/json
openstack-api-version: placement 1.25
tests:
- name: different groups hit with group_policy=none
GET: /allocation_candidates
query_parameters:
resources1: VCPU:1
resources2: MEMORY_MB:1024
group_policy: none
status: 200
response_json_paths:
$.allocation_requests.`len`: 3
$.provider_summaries.`len`: 3
$.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources:
VCPU: 1
MEMORY_MB: 1024
$.allocation_requests..allocations["$ENVIRON['CN_MIDDLE']"].resources:
VCPU: 1
MEMORY_MB: 1024
$.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources:
VCPU: 1
MEMORY_MB: 1024
$.provider_summaries["$ENVIRON['CN_LEFT']"].resources:
VCPU:
capacity: 8
used: 0
MEMORY_MB:
capacity: 4096
used: 0
$.provider_summaries["$ENVIRON['CN_MIDDLE']"].resources:
VCPU:
capacity: 8
used: 0
MEMORY_MB:
capacity: 4096
used: 0
$.provider_summaries["$ENVIRON['CN_RIGHT']"].resources:
VCPU:
capacity: 8
used: 0
MEMORY_MB:
capacity: 4096
used: 0
- name: different groups miss with group_policy=isolate
GET: /allocation_candidates
query_parameters:
resources1: VCPU:1
resources2: MEMORY_MB:1024
group_policy: isolate
status: 200
response_json_paths:
# We asked for VCPU and MEMORY_MB to be satisfied by *different*
# providers, because they're in separate numbered request groups and
# group_policy=isolate. Since there are no sharing providers of these
# resources, we get no results.
$.allocation_requests.`len`: 0
$.provider_summaries.`len`: 0
- name: resources combine
GET: /allocation_candidates
query_parameters:
resources: VCPU:3,MEMORY_MB:512
resources1: VCPU:1,MEMORY_MB:1024
resources2: VCPU:2
group_policy: none
status: 200
response_json_paths:
$.allocation_requests.`len`: 3
$.provider_summaries.`len`: 3
$.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources:
VCPU: 6
MEMORY_MB: 1536
$.allocation_requests..allocations["$ENVIRON['CN_MIDDLE']"].resources:
VCPU: 6
MEMORY_MB: 1536
$.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources:
VCPU: 6
MEMORY_MB: 1536
- name: group policy not required with only one numbered group
GET: /allocation_candidates?resources=VCPU:1&resources1=MEMORY_MB:2048
status: 200
response_json_paths:
$.allocation_requests.`len`: 3
$.provider_summaries.`len`: 3
- name: disk sharing isolated
GET: /allocation_candidates
query_parameters:
resources1: VCPU:1,MEMORY_MB:1024
resources2: DISK_GB:100
group_policy: isolate
status: 200
response_json_paths:
# Here we've asked for VCPU and MEMORY_MB to be satisfied by the same
# provider - all three of our non-sharing providers can do that - and
# the DISK_GB to be satisfied by a *different* provider than the VCPU and
# MEMORY_MB. So we'll get all permutations where cn_* provide VCPU and
# MEMORY_MB and shr_disk_* provide the DISK_GB; but *no* results where
# DISK_GB is provided by the cn_*s themselves.
$.allocation_requests.`len`: 5
$.provider_summaries.`len`: 5
- name: disk sharing non-isolated
GET: /allocation_candidates
query_parameters:
resources1: VCPU:1,MEMORY_MB:1024
resources2: DISK_GB:100
group_policy: none
status: 200
response_json_paths:
$.allocation_requests.`len`: 7
$.provider_summaries.`len`: 5
- name: isolated ssd
GET: /allocation_candidates
query_parameters:
resources1: VCPU:1,MEMORY_MB:1024
resources2: DISK_GB:100
required2: CUSTOM_DISK_SSD
group_policy: isolate
status: 200
response_json_paths:
# We get candidates [cn_left + shr_disk_1] and [cn_middle + shr_disk_1]
# We don't get [cn_right + shr_disk_1] because they're not associated via aggregate.
# We don't get [cn_left/middle + shr_disk_2] because shr_disk_2 doesn't have the SSD trait
# We don't get [cn_left] or [cn_right] even though they have SSD disk because we asked to isolate
$.allocation_requests.`len`: 2
$.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources:
VCPU: 1
MEMORY_MB: 1024
$.allocation_requests..allocations["$ENVIRON['CN_MIDDLE']"].resources:
VCPU: 1
MEMORY_MB: 1024
# shr_disk_1 satisfies the disk for both allocation requests
$.allocation_requests..allocations["$ENVIRON['SHR_DISK_1']"].resources[DISK_GB]: [100, 100]
$.provider_summaries.`len`: 3
$.provider_summaries["$ENVIRON['CN_LEFT']"].resources:
VCPU:
capacity: 8
used: 0
MEMORY_MB:
capacity: 4096
used: 0
DISK_GB:
capacity: 500
used: 0
$.provider_summaries["$ENVIRON['CN_MIDDLE']"].resources:
VCPU:
capacity: 8
used: 0
MEMORY_MB:
capacity: 4096
used: 0
$.provider_summaries["$ENVIRON['SHR_DISK_1']"].resources:
DISK_GB:
capacity: 1000
used: 0
- name: no isolation, forbid ssd
GET: /allocation_candidates
query_parameters:
resources1: VCPU:1
resources2: DISK_GB:100
required2: "!CUSTOM_DISK_SSD"
group_policy: none
status: 200
response_json_paths:
# The permutations we *don't* get are:
# cn_right by itself because it has SSD
# - anything involving shr_disk_1 because it has SSD
$.allocation_requests.`len`: 4
# We get two allocation requests involving cn_left - one where it
# satisfies the disk itself and one where shr_disk_2 provides it
$.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[VCPU]: [1, 1]
# We get one for [cn_middle + shr_disk_2] - it doesn't have disk to provide for itself
$.allocation_requests..allocations["$ENVIRON['CN_MIDDLE']"].resources[VCPU]: 1
# We get one for [cn_right + shr_disk_2] - cn_right can't provide its own
# disk due to the forbidden SSD trait.
$.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[VCPU]: 1
# shr_disk_2 satisfies the disk for three out of the four allocation
# requests (all except the one where cn_left provides for itself)
$.allocation_requests..allocations["$ENVIRON['SHR_DISK_2']"].resources[DISK_GB]: [100, 100, 100]
# Validate that we got the correct four providers in the summaries
$.provider_summaries.`len`: 4
$.provider_summaries["$ENVIRON['CN_LEFT']"].resources[VCPU][capacity]: 8
$.provider_summaries["$ENVIRON['CN_MIDDLE']"].resources[VCPU][capacity]: 8
$.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[VCPU][capacity]: 8
$.provider_summaries["$ENVIRON['SHR_DISK_2']"].resources[DISK_GB][capacity]: 1000
- name: member_of filters
GET: /allocation_candidates
query_parameters:
resources1: VCPU:1
resources2: DISK_GB:100
member_of2: $ENVIRON['AGGC']
group_policy: none
status: 200
response_json_paths:
$.allocation_requests.`len`: 1
$.allocation_requests[0].allocations["$ENVIRON['CN_RIGHT']"].resources:
VCPU: 1
DISK_GB: 100
$.provider_summaries.`len`: 1
$.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[VCPU][capacity]: 8
$.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[DISK_GB][capacity]: 500
- name: required, forbidden, member_of in
GET: /allocation_candidates
query_parameters:
resources1: VCPU:1
required1: "!HW_CPU_X86_SSE"
resources2: DISK_GB:100
required2: CUSTOM_DISK_SSD
member_of2: in:$ENVIRON['AGGA'],$ENVIRON['AGGC']
group_policy: none
status: 200
response_json_paths:
# cn_middle won't appear (forbidden SSE trait)
# shr_disk_2 won't appear (required SSD trait is absent)
# [cn_left] won't be in the results (required SSD trait is absent)
# So we'll get:
# [cn_left, shr_disk_1]
# [cn_right]
$.allocation_requests.`len`: 2
$.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[VCPU]: 1
$.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[VCPU]: 1
$.allocation_requests..allocations["$ENVIRON['SHR_DISK_1']"].resources[DISK_GB]: 100
$.provider_summaries.`len`: 3
$.provider_summaries["$ENVIRON['CN_LEFT']"].resources[VCPU][capacity]: 8
$.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[VCPU][capacity]: 8
$.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[DISK_GB][capacity]: 500
$.provider_summaries["$ENVIRON['SHR_DISK_1']"].resources[DISK_GB][capacity]: 1000
- name: multiple member_of
GET: /allocation_candidates
query_parameters:
resources1: VCPU:1
resources2: DISK_GB:100
member_of2: $ENVIRON['AGGA']
member_of2: in:$ENVIRON['AGGB'],$ENVIRON['AGGC']
group_policy: isolate
status: 200
response_json_paths:
# The member_of2 specifications say that the DISK_GB resource must come
# from a provider that's in aggA and also in (aggB and/or aggC). Only
# shr_disk_2 qualifies; so we'll get results anchored at cn_middle and
# cn_right. But note that we'll also get a result anchored at cn_left:
# it doesn't meet the member_of criteria, but it doesn't need to, since
# it's not providing the DISK_GB resource.
$.allocation_requests.`len`: 3
$.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[VCPU]: 1
$.allocation_requests..allocations["$ENVIRON['CN_MIDDLE']"].resources[VCPU]: 1
$.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[VCPU]: 1
$.allocation_requests..allocations["$ENVIRON['SHR_DISK_2']"].resources[DISK_GB]: [100, 100, 100]
$.provider_summaries.`len`: 4
$.provider_summaries["$ENVIRON['CN_LEFT']"].resources[VCPU][capacity]: 8
$.provider_summaries["$ENVIRON['CN_MIDDLE']"].resources[VCPU][capacity]: 8
$.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[VCPU][capacity]: 8
$.provider_summaries["$ENVIRON['SHR_DISK_2']"].resources[DISK_GB][capacity]: 1000
- name: multiple disks, multiple networks
GET: /allocation_candidates
query_parameters:
resources1: VCPU:1
resources2: VGPU:1
required2: HW_GPU_API_DXVA
resources3: MEMORY_MB:1024
resources4: DISK_GB:100
required4: CUSTOM_DISK_SSD
resources5: DISK_GB:50
required5: "!CUSTOM_DISK_SSD"
resources6: SRIOV_NET_VF:1,CUSTOM_NET_MBPS:1000
resources7: SRIOV_NET_VF:2,CUSTOM_NET_MBPS:2000
group_policy: none
# Breaking it down:
# => These could come from cn_left, cn_middle, or cn_right
# ?resources1=VCPU:1
# &resources3=MEMORY_MB:1024
# => But this limits us to cn_left and cn_right
# &resources2=VGPU:1&required2=HW_GPU_API_DXVA
# => Since we're not isolating, this SSD can come from cn_right or shr_disk_1
# &resources4=DISK_GB:100&required4=CUSTOM_DISK_SSD
# => This non-SSD can come from cn_left or shr_disk_2
# &resources5=DISK_GB:50&required5=!CUSTOM_DISK_SSD
# => These VFs and bandwidth can come from cn_left or shr_net. Since cn_left
# can't be an anchor for shr_net, these will always combine.
# &resources6=SRIOV_NET_VF:1,CUSTOM_NET_MBPS:1000
# &resources7=SRIOV_NET_VF:2,CUSTOM_NET_MBPS:2000
# => If we didn't do this, the separated VCPU/MEMORY_MB/VGPU resources would
# cause us to get no results
# &group_policy=none
status: 200
response_json_paths:
# We have two permutations involving cn_left.
# - One where the non-SSD is satisfied by cn_left itself
# [cn_left(VCPU:1, MEMORY_MB:1024, VGPU:1, DISK_GB:50, SRIOV_NET_VF:3, CUSTOM_NET_MBPS:3000),
# shr_disk_1(DISK_GB:100)]
# - And one where the non-SSD is satisfied by shr_disk_2
# [cn_left(VCPU:1, MEMORY_MB:1024, VGPU:1, SRIOV_NET_VF:3, CUSTOM_NET_MBPS:3000),
# shr_disk_1(DISK_GB:100),
# shr_disk_2(DISK_GB: 50)]
# There's only one result involving cn_right.
# - We must satisfy the SSD from cn_right and the non-SSD from shr_disk_2
# - We must satisfy the network stuff from shr_net
# [cn_right(VCPU:1, MEMORY_MB:1024, VGPU:1, DISK_GB:100),
# shr_disk_2(DISK_GB:50),
# shr_net(SRIOV_NET_VF:3, CUSTOM_NET_MBPS:3000)]
$.allocation_requests.`len`: 3
$.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[VCPU]: [1, 1]
$.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[MEMORY_MB]: [1024, 1024]
$.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[VGPU]: [1, 1]
$.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[SRIOV_NET_VF]: [3, 3]
$.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[CUSTOM_NET_MBPS]: [3000, 3000]
$.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[DISK_GB]: 50
# These come from the cn_left results
$.allocation_requests..allocations["$ENVIRON['SHR_DISK_1']"].resources[DISK_GB]: [100, 100]
# One of these comes from the second cn_left result, the other from the cn_right result
$.allocation_requests..allocations["$ENVIRON['SHR_DISK_2']"].resources[DISK_GB]: [50, 50]
$.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[VCPU]: 1
$.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[MEMORY_MB]: 1024
$.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[VGPU]: 1
$.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[DISK_GB]: 100
$.allocation_requests..allocations["$ENVIRON['SHR_NET']"].resources[SRIOV_NET_VF]: 3
$.allocation_requests..allocations["$ENVIRON['SHR_NET']"].resources[CUSTOM_NET_MBPS]: 3000
# Just make sure we got the correct four providers in the summaries
$.provider_summaries.`len`: 5
$.provider_summaries["$ENVIRON['CN_LEFT']"].resources[VCPU][capacity]: 8
$.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[VCPU][capacity]: 8
$.provider_summaries["$ENVIRON['SHR_DISK_1']"].resources[DISK_GB][capacity]: 1000
$.provider_summaries["$ENVIRON['SHR_DISK_2']"].resources[DISK_GB][capacity]: 1000
$.provider_summaries["$ENVIRON['SHR_NET']"].resources[SRIOV_NET_VF][capacity]: 16
- name: combining request groups exceeds capacity
GET: /allocation_candidates
query_parameters:
resources: VCPU:2,MEMORY_MB:2048,SRIOV_NET_VF:1,CUSTOM_NET_MBPS:2000
resources1: SRIOV_NET_VF:1,CUSTOM_NET_MBPS:3000
status: 200
response_json_paths:
# CUSTOM_NET_MBPS of 2000 + 3000 = 5000 is too much for cn_left, but
# shr_net can accomodate it.
$.allocation_requests.`len`: 1
$.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[VCPU]: 2
$.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[MEMORY_MB]: 2048
$.allocation_requests..allocations["$ENVIRON['SHR_NET']"].resources[SRIOV_NET_VF]: 2
$.allocation_requests..allocations["$ENVIRON['SHR_NET']"].resources[CUSTOM_NET_MBPS]: 5000
$.provider_summaries.`len`: 2
$.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[VCPU][capacity]: 8
$.provider_summaries["$ENVIRON['SHR_NET']"].resources[CUSTOM_NET_MBPS][capacity]: 40000
- name: combining request groups exceeds max_unit
GET: /allocation_candidates
query_parameters:
resources: VGPU:1
resources1: VGPU:1
resources2: VGPU:1
group_policy: none
status: 200
response_json_paths:
# VGPU of 1 + 1 + 1 = 3 exceeds max_unit on cn_right, but cn_left can handle it.
$.allocation_requests.`len`: 1
$.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[VGPU]: 3
$.provider_summaries.`len`: 1
$.provider_summaries["$ENVIRON['CN_LEFT']"].resources[VGPU][capacity]: 8
#################
# Error scenarios
#################
- name: numbered resources bad microversion
GET: /allocation_candidates?resources=MEMORY_MB:1024&resources1=VCPU:1
request_headers:
openstack-api-version: placement 1.24
status: 400
response_strings:
- Invalid query string parameters
- "'resources1' was unexpected"
- name: numbered traits bad microversion
GET: /allocation_candidates?resources=MEMORY_MB:1024&required2=HW_CPU_X86_AVX2
request_headers:
openstack-api-version: placement 1.24
status: 400
response_strings:
- Invalid query string parameters
- "'required2' was unexpected"
- name: numbered member_of bad microversion
GET: /allocation_candidates?resources=MEMORY_MB:1024&member_of3=$ENVIRON['AGGB']
request_headers:
openstack-api-version: placement 1.24
status: 400
response_strings:
- Invalid query string parameters
- "'member_of3' was unexpected"
- name: group_policy bad microversion
GET: /allocation_candidates?resources=VCPU:1&group_policy=isolate
request_headers:
openstack-api-version: placement 1.24
status: 400
response_strings:
- Invalid query string parameters
- "'group_policy' was unexpected"
- name: bogus numbering
GET: /allocation_candidates?resources01=VCPU:1
status: 400
response_strings:
- Invalid query string parameters
- "'resources01' does not match any of the regexes"
- name: bogus suffix
GET: /allocation_candidates?resources1a=VCPU:1
status: 400
response_strings:
- Invalid query string parameters
- "'resources1a' does not match any of the regexes"
- name: invalid group_policy value
GET: /allocation_candidates?resources=VCPU:1&group_policy=bogus
status: 400
response_strings:
- Invalid query string parameters
- "'bogus' is not one of ['none', 'isolate']"
- name: group_policy required when more than one numbered group
GET: /allocation_candidates?resources1=VCPU:1&resources2=VCPU:1
status: 400
response_strings:
- The \"group_policy\" parameter is required when specifying more than one \"resources{N}\" parameter.
- name: orphaned traits keys
GET: /allocation_candidates?required=FOO&required1=BAR
status: 400
response_strings:
- 'Found the following orphaned traits keys: required, required1'
- name: orphaned member_of keys
GET: /allocation_candidates?member_of=$ENVIRON['AGGA']&member_of3=$ENVIRON['AGGC']
status: 400
response_strings:
- 'Found the following orphaned member_of keys: member_of, member_of3'
- name: at least one request group required
GET: /allocation_candidates?group_policy=isolate
status: 400
response_strings:
- At least one request group (`resources` or `resources{N}`) is required.

View File

@ -1,85 +0,0 @@
# This tests the individual CRUD operations on
# /resource_providers/{uuid}/inventories* using a non-admin user with an
# open policy configuration. The response validation is intentionally minimal.
fixtures:
- OpenPolicyFixture
defaults:
request_headers:
x-auth-token: user
accept: application/json
content-type: application/json
openstack-api-version: placement latest
tests:
- name: post new resource provider
POST: /resource_providers
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 200
- name: list inventories
GET: /resource_providers/$ENVIRON['RP_UUID']/inventories
response_json_paths:
$.resource_provider_generation: 0
$.inventories: {}
- name: post an inventory
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
data:
resource_class: DISK_GB
total: 2048
reserved: 512
min_unit: 10
max_unit: 1024
step_size: 10
allocation_ratio: 1.0
status: 201
response_headers:
location: $SCHEME://$NETLOC/resource_providers/$ENVIRON['RP_UUID']/inventories/DISK_GB
- name: show inventory
GET: $LOCATION
status: 200
- name: update one inventory
PUT: $LAST_URL
request_headers:
content-type: application/json
data:
resource_provider_generation: 1
total: 2048
reserved: 1024
min_unit: 10
max_unit: 1024
step_size: 10
allocation_ratio: 1.0
status: 200
- name: update all inventory
PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_provider_generation: 2
inventories:
DISK_GB:
total: 2048
reserved: 1024
min_unit: 10
max_unit: 1024
step_size: 10
allocation_ratio: 1.0
VCPU:
total: 8
status: 200
- name: delete specific inventory
DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories/DISK_GB
status: 204
- name: delete all inventory
DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories
status: 204

View File

@ -1,812 +0,0 @@
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
tests:
- name: inventories for missing provider
GET: /resource_providers/7260669a-e3d4-4867-aaa7-683e2ab6958c/inventories
status: 404
response_strings:
- No resource provider with uuid 7260669a-e3d4-4867-aaa7-683e2ab6958c found
response_json_paths:
$.errors[0].title: Not Found
- name: delete all inventory for missing resource provider
DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
openstack-api-version: placement 1.5
status: 404
- name: post new resource provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 201
response_headers:
location: //resource_providers/[a-f0-9-]+/
- name: get empty inventories
GET: /resource_providers/$ENVIRON['RP_UUID']/inventories
response_json_paths:
$.resource_provider_generation: 0
$.inventories: {}
- name: post a conflicting capacity inventory
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 256
reserved: 512
status: 400
response_strings:
- Unable to create inventory for resource provider
response_json_paths:
$.errors[0].title: Bad Request
- name: post an inventory with no total specified
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
status: 400
response_strings:
- JSON does not validate
- "'total' is a required property"
- name: post a negative inventory
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: -1
status: 400
response_strings:
- JSON does not validate
- -1 is less than the minimum of 1
- name: post an inventory with invalid total
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 0
reserved: 512
min_unit: 1
max_unit: 1024
step_size: 10
allocation_ratio: 1.0
status: 400
response_strings:
- "JSON does not validate: 0 is less than the minimum of 1"
- "Failed validating 'minimum' in schema['properties']['total']"
- name: post an inventory invalid min_unit
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 2048
reserved: 512
min_unit: 0
max_unit: 1024
step_size: 10
allocation_ratio: 1.0
status: 400
response_strings:
- "JSON does not validate: 0 is less than the minimum of 1"
- "Failed validating 'minimum' in schema['properties']['min_unit']"
- name: post an inventory invalid max_unit
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 2048
reserved: 512
min_unit: 10
max_unit: 0
step_size: 10
allocation_ratio: 1.0
status: 400
response_strings:
- "JSON does not validate: 0 is less than the minimum of 1"
- "Failed validating 'minimum' in schema['properties']['max_unit']"
- name: post an inventory invalid step_size
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 2048
reserved: 512
min_unit: 10
max_unit: 1024
step_size: 0
allocation_ratio: 1.0
status: 400
response_strings:
- "JSON does not validate: 0 is less than the minimum of 1"
- "Failed validating 'minimum' in schema['properties']['step_size']"
- name: post an inventory
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 2048
reserved: 512
min_unit: 10
max_unit: 1024
step_size: 10
allocation_ratio: 1.0
status: 201
response_headers:
location: $SCHEME://$NETLOC/resource_providers/$ENVIRON['RP_UUID']/inventories/DISK_GB
response_json_paths:
$.resource_provider_generation: 1
$.total: 2048
$.reserved: 512
- name: get that inventory
GET: $LOCATION
status: 200
request_headers:
# set microversion to 1.15 to get timestamp headers
openstack-api-version: placement 1.15
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
response_json_paths:
$.resource_provider_generation: 1
$.total: 2048
$.reserved: 512
$.min_unit: 10
$.max_unit: 1024
$.step_size: 10
$.allocation_ratio: 1.0
- name: get inventory v1.14 no cache headers
GET: $LAST_URL
status: 200
request_headers:
openstack-api-version: placement 1.14
response_forbidden_headers:
- cache-control
- last-modified
- name: modify the inventory
PUT: $LAST_URL
request_headers:
content-type: application/json
data:
resource_provider_generation: 1
total: 2048
reserved: 1024
min_unit: 10
max_unit: 1024
step_size: 10
allocation_ratio: 1.0
status: 200
response_headers:
content-type: /application/json/
response_json_paths:
$.reserved: 1024
- name: confirm inventory change
GET: $LAST_URL
response_json_paths:
$.resource_provider_generation: 2
$.total: 2048
$.reserved: 1024
- name: modify inventory invalid generation
PUT: $LAST_URL
request_headers:
content-type: application/json
openstack-api-version: placement 1.23
data:
resource_provider_generation: 5
total: 2048
status: 409
response_strings:
- resource provider generation conflict
response_json_paths:
$.errors[0].title: Conflict
$.errors[0].code: placement.concurrent_update
- name: modify inventory no such resource class in inventory
PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories/MEMORY_MB
request_headers:
content-type: application/json
data:
resource_provider_generation: 2
total: 2048
status: 400
response_strings:
- No inventory record with resource class
response_json_paths:
$.errors[0].title: Bad Request
- name: modify inventory invalid data
desc: This should 400 because reserved is greater than total
PUT: $LAST_URL
request_headers:
content-type: application/json
data:
resource_provider_generation: 2
total: 2048
reserved: 4096
min_unit: 10
max_unit: 1024
step_size: 10
allocation_ratio: 1.0
status: 400
response_strings:
- Unable to update inventory for resource provider $ENVIRON['RP_UUID']
response_json_paths:
$.errors[0].title: Bad Request
- name: put inventory bad form
desc: This should 400 because reserved is greater than total
PUT: $LAST_URL
request_headers:
content-type: application/json
data:
house: red
car: blue
status: 400
response_strings:
- JSON does not validate
response_json_paths:
$.errors[0].title: Bad Request
- name: post inventory malformed json
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data: '{"foo": }'
status: 400
response_strings:
- Malformed JSON
response_json_paths:
$.errors[0].title: Bad Request
- name: post inventory bad syntax schema
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: bad_class
total: 2048
status: 400
response_json_paths:
$.errors[0].title: Bad Request
- name: post inventory bad resource class
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: NO_CLASS_14
total: 2048
status: 400
response_strings:
- No such resource class NO_CLASS_14
response_json_paths:
$.errors[0].title: Bad Request
- name: post inventory duplicated resource class
desc: DISK_GB was already created above
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 2048
status: 409
response_strings:
- Update conflict
response_json_paths:
$.errors[0].title: Conflict
- name: get list of inventories
GET: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
# set microversion to 1.15 to get timestamp headers
openstack-api-version: placement 1.15
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
response_json_paths:
$.resource_provider_generation: 2
$.inventories.DISK_GB.total: 2048
$.inventories.DISK_GB.reserved: 1024
- name: delete the inventory
DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories/DISK_GB
status: 204
- name: get now empty inventories
GET: /resource_providers/$ENVIRON['RP_UUID']/inventories
response_json_paths:
$.resource_provider_generation: 3
$.inventories: {}
- name: post new disk inventory
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 1024
status: 201
- name: post new ipv4 address inventory
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: IPV4_ADDRESS
total: 255
reserved: 2
status: 201
- name: list both those inventories
GET: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
response_json_paths:
$.resource_provider_generation: 5
$.inventories.DISK_GB.total: 1024
$.inventories.IPV4_ADDRESS.total: 255
- name: post ipv4 address inventory again
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: IPV4_ADDRESS
total: 255
reserved: 2
status: 409
response_json_paths:
$.errors[0].title: Conflict
- name: delete inventory
DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories/IPV4_ADDRESS
status: 204
response_forbidden_headers:
- content-type
- name: delete inventory again
DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories/IPV4_ADDRESS
status: 404
response_strings:
- No inventory of class IPV4_ADDRESS found for delete
response_json_paths:
$.errors[0].title: Not Found
- name: get missing inventory class
GET: /resource_providers/$ENVIRON['RP_UUID']/inventories/IPV4_ADDRESS
status: 404
response_strings:
- No inventory of class IPV4_ADDRESS for $ENVIRON['RP_UUID']
response_json_paths:
$.errors[0].title: Not Found
- name: get invalid inventory class
GET: /resource_providers/$ENVIRON['RP_UUID']/inventories/HOUSE
status: 404
response_strings:
- No inventory of class HOUSE for $ENVIRON['RP_UUID']
response_json_paths:
$.errors[0].title: Not Found
- name: get missing resource provider inventory
GET: /resource_providers/2e1dda56-8b18-4fb9-8c5c-3125891b7143/inventories/VCPU
status: 404
- name: create another resource provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: disk-network
status: 201
- name: put all inventory
PUT: $LOCATION/inventories
request_headers:
content-type: application/json
# set microversion to 1.15 to get timestamp headers
openstack-api-version: placement 1.15
data:
resource_provider_generation: 0
inventories:
IPV4_ADDRESS:
total: 253
DISK_GB:
total: 1024
status: 200
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
response_json_paths:
$.resource_provider_generation: 1
$.inventories.IPV4_ADDRESS.total: 253
$.inventories.IPV4_ADDRESS.reserved: 0
$.inventories.DISK_GB.total: 1024
$.inventories.DISK_GB.allocation_ratio: 1.0
- name: check both inventory classes
GET: $LAST_URL
response_json_paths:
$.resource_provider_generation: 1
$.inventories.DISK_GB.total: 1024
$.inventories.IPV4_ADDRESS.total: 253
- name: check one inventory class
GET: $LAST_URL/DISK_GB
response_json_paths:
$.total: 1024
- name: put all inventory bad generation
PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
openstack-api-version: placement 1.23
data:
resource_provider_generation: 99
inventories:
IPV4_ADDRESS:
total: 253
status: 409
response_strings:
- resource provider generation conflict
response_json_paths:
$.errors[0].title: Conflict
$.errors[0].code: placement.concurrent_update
- name: put all inventory unknown resource class
PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_provider_generation: 6
inventories:
HOUSE:
total: 253
status: 400
response_strings:
- Unknown resource class in inventory
response_json_paths:
$.errors[0].title: Bad Request
- name: post an inventory with total exceed max limit
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 2147483648
reserved: 512
min_unit: 10
max_unit: 1024
step_size: 10
allocation_ratio: 1.0
status: 400
response_strings:
- "Failed validating 'maximum'"
response_json_paths:
$.errors[0].title: Bad Request
- name: post an inventory with reserved exceed max limit
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 1024
reserved: 2147483648
min_unit: 10
max_unit: 1024
step_size: 10
allocation_ratio: 1.0
status: 400
response_strings:
- "Failed validating 'maximum'"
response_json_paths:
$.errors[0].title: Bad Request
- name: post an inventory with min_unit exceed max limit
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 1024
reserved: 512
min_unit: 2147483648
max_unit: 1024
step_size: 10
allocation_ratio: 1.0
status: 400
response_strings:
- "Failed validating 'maximum'"
response_json_paths:
$.errors[0].title: Bad Request
- name: post an inventory with max_unit exceed max limit
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 1024
reserved: 512
min_unit: 10
max_unit: 2147483648
step_size: 10
allocation_ratio: 1.0
status: 400
response_strings:
- "Failed validating 'maximum'"
response_json_paths:
$.errors[0].title: Bad Request
- name: post an inventory with step_size exceed max limit
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 1024
reserved: 512
min_unit: 10
max_unit: 1024
step_size: 2147483648
allocation_ratio: 1.0
status: 400
response_strings:
- "Failed validating 'maximum'"
response_json_paths:
$.errors[0].title: Bad Request
- name: post an inventory with allocation_ratio exceed max limit
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 1024
reserved: 512
min_unit: 10
max_unit: 1024
step_size: 10
allocation_ratio: 3.40282e+39
status: 400
response_strings:
- "Failed validating 'maximum'"
response_json_paths:
$.errors[0].title: Bad Request
- name: modify the inventory with total exceed max limit
PUT: $LAST_URL
request_headers:
content-type: application/json
data:
resource_provider_generation: 1
inventories:
DISK_GB:
total: 2147483648
reserved: 512
status: 400
response_strings:
- "Failed validating 'maximum'"
response_json_paths:
$.errors[0].title: Bad Request
- name: modify the inventory with allocation_ratio exceed max limit
PUT: $LAST_URL
request_headers:
content-type: application/json
data:
resource_provider_generation: 1
inventories:
DISK_GB:
total: 1024
reserved: 512
allocation_ratio: 3.40282e+39
status: 400
response_strings:
- "Failed validating 'maximum'"
response_json_paths:
$.errors[0].title: Bad Request
# NOTE(cdent): The generation is 6 now, based on the activity at
# the start of this file.
- name: put all inventory bad capacity
PUT: $LAST_URL
request_headers:
content-type: application/json
data:
resource_provider_generation: 6
inventories:
IPV4_ADDRESS:
total: 253
reserved: 512
status: 400
response_strings:
- Unable to update inventory
- greater than or equal to total
response_json_paths:
$.errors[0].title: Bad Request
- name: put all inventory zero capacity old microversion
PUT: $LAST_URL
request_headers:
content-type: application/json
data:
resource_provider_generation: 6
inventories:
IPV4_ADDRESS:
total: 253
reserved: 253
status: 400
response_strings:
- Unable to update inventory
- greater than or equal to total
response_json_paths:
$.errors[0].title: Bad Request
- name: put inventory with reserved equal to total
PUT: $LAST_URL
request_headers:
content-type: application/json
openstack-api-version: placement 1.26
data:
resource_provider_generation: 6
inventories:
IPV4_ADDRESS:
total: 253
reserved: 253
status: 200
- name: put all inventory bad capacity in new microversion
PUT: $LAST_URL
request_headers:
content-type: application/json
openstack-api-version: placement 1.26
data:
resource_provider_generation: 7
inventories:
IPV4_ADDRESS:
total: 253
reserved: 512
status: 400
response_strings:
- Unable to update inventory
- greater than total
response_json_paths:
$.errors[0].title: Bad Request
- name: put one inventory zero capacity old microversion
PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories/IPV4_ADDRESS
request_headers:
content-type: application/json
data:
resource_provider_generation: 7
total: 253
reserved: 253
status: 400
response_strings:
- Unable to update inventory
- greater than or equal to total
response_json_paths:
$.errors[0].title: Bad Request
- name: put one inventory with reserved equal to total new microversion
PUT: $LAST_URL
request_headers:
content-type: application/json
openstack-api-version: placement 1.26
data:
resource_provider_generation: 7
total: 512
reserved: 512
status: 200
- name: delete all inventory bad generation
PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_provider_generation: 99
inventories:
IPV4_ADDRESS:
total: 253
status: 409
response_strings:
- resource provider generation conflict
- name: delete all inventory
DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
openstack-api-version: placement 1.5
status: 204
- name: delete empty inventories
DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
openstack-api-version: placement 1.5
status: 204
- name: get inventories after deletions
GET: /resource_providers/$ENVIRON['RP_UUID']/inventories
response_json_paths:
$.resource_provider_generation: 10
$.inventories: {}
- name: post an inventory again
POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_class: DISK_GB
total: 2048
reserved: 512
min_unit: 10
max_unit: 1024
step_size: 10
allocation_ratio: 1.0
status: 201
response_headers:
location: $SCHEME://$NETLOC/resource_providers/$ENVIRON['RP_UUID']/inventories/DISK_GB
response_json_paths:
$.resource_provider_generation: 11
$.total: 2048
$.reserved: 512
- name: delete all inventory with put
PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
content-type: application/json
openstack-api-version: placement 1.4
data:
resource_provider_generation: 11
inventories: {}
response_json_paths:
$.resource_provider_generation: 12
$.inventories: {}
status: 200
- name: get generation after deletion
GET: /resource_providers/$ENVIRON['RP_UUID']/inventories
response_json_paths:
$.resource_provider_generation: 12
$.inventories: {}
- name: delete inventories earlier version
DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
openstack-api-version: placement 1.4
status: 405

View File

@ -1,22 +0,0 @@
# Test launchpad bug https://bugs.launchpad.net/nova/+bug/1724065
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: user
tests:
# min version from start of placement time is 1.0
# Without the fix, this results in a 500 with an 'HTTP_ACCEPT'
# KeyError.
- name: no accept header and out of range microversion
GET: /resource_providers
request_headers:
openstack-api-version: placement 0.9
status: 406
response_strings:
- Unacceptable version header

View File

@ -1,90 +0,0 @@
# Tests to build microversion functionality behavior and confirm
# it is present and behaving as expected.
fixtures:
- APIFixture
defaults:
request_headers:
accept: application/json
x-auth-token: user
tests:
- name: root has microversion header
GET: /
response_headers:
vary: /openstack-api-version/
openstack-api-version: /^placement \d+\.\d+$/
- name: root has microversion info
GET: /
response_json_paths:
$.versions[0].max_version: /^\d+\.\d+$/
$.versions[0].min_version: /^\d+\.\d+$/
$.versions[0].id: v1.0
$.versions[0].status: CURRENT
$.versions[0].links[?rel = 'self'].href: ''
- name: unavailable microversion raises 406
GET: /
request_headers:
openstack-api-version: placement 0.5
status: 406
response_headers:
content-type: /application/json/
response_json_paths:
$.errors.[0].title: Not Acceptable
$.errors.[0].max_version: /^\d+\.\d+$/
$.errors.[0].min_version: /^\d+\.\d+$/
response_strings:
- "Unacceptable version header: 0.5"
response_json_paths:
$.errors[0].title: Not Acceptable
- name: latest microversion is 1.30
GET: /
request_headers:
openstack-api-version: placement latest
response_headers:
vary: /openstack-api-version/
openstack-api-version: placement 1.30
- name: other accept header bad version
GET: /
request_headers:
accept: text/html
openstack-api-version: placement 0.5
status: 406
response_headers:
content-type: /text/html/
response_strings:
- "Unacceptable version header: 0.5"
- name: bad format string raises 400
GET: /
request_headers:
openstack-api-version: placement pony.horse
status: 400
response_strings:
- "invalid version string: pony.horse"
response_json_paths:
$.errors[0].title: Bad Request
- name: bad format multidot raises 400
GET: /
request_headers:
openstack-api-version: placement 1.2.3
status: 400
response_strings:
- "invalid version string: 1.2.3"
response_json_paths:
$.errors[0].title: Bad Request
- name: error in application produces microversion headers
desc: we do not want xml
POST: /
request_headers:
content-type: application/xml
status: 405
response_headers:
openstack-api-version: placement 1.0

View File

@ -1,25 +0,0 @@
# Confirm that things work as intended when CORS is not configured.
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: user
tests:
- name: options request not allowed
OPTIONS: /
request_headers:
origin: http://valid.example.com
access-control-request-method: GET
status: 405
- name: get request no cors headers
GET: /
request_headers:
origin: http://valid.example.com
access-control-request-method: GET
status: 200
response_forbidden_headers:
- access-control-allow-origin

View File

@ -1,20 +0,0 @@
# This tests POSTs to /reshaper using a non-admin user with an open policy
# configuration. The response is a 400 because of bad content, meaning we got
# past policy enforcement. If policy was being enforced we'd get a 403.
fixtures:
- OpenPolicyFixture
defaults:
request_headers:
x-auth-token: user
accept: application/json
content-type: application/json
openstack-api-version: placement latest
tests:
- name: attempt reshape
POST: /reshaper
data:
bad: content
status: 400

View File

@ -1,558 +0,0 @@
# /reshaper provides a way to atomically move inventory and allocations from
# one resource provider to another, often from a root provider to a new child.
fixtures:
- AllocationFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
content-type: application/json
openstack-api-version: placement 1.30
tests:
- name: reshaper is POST only
GET: /reshaper
status: 405
response_headers:
allow: POST
- name: reshaper requires admin not user
POST: /reshaper
request_headers:
x-auth-token: user
status: 403
- name: reshaper not there old
POST: /reshaper
request_headers:
openstack-api-version: placement 1.29
status: 404
- name: very invalid 400
POST: /reshaper
status: 400
data:
cows: moo
response_strings:
- JSON does not validate
- name: missing allocations
POST: /reshaper
data:
inventories:
$ENVIRON['RP_UUID']:
resource_provider_generation: 0
inventories:
VCPU:
total: 1
status: 400
# There are existing allocations on RP_UUID (created by the AllocationFixture).
# As the code is currently we cannot null out those allocations from reshaper
# because the allocations identify nothing (replace_all() is a no op).
- name: empty allocations inv in use
POST: /reshaper
data:
inventories:
$ENVIRON['RP_UUID']:
resource_provider_generation: 5
inventories:
VCPU:
total: 1
allocations: {}
status: 409
response_json_paths:
$.errors[0].code: placement.inventory.inuse
# Again, with the existing allocations on RP_UUID being held by CONSUMER_ID,
# not INSTANCE_ID, when we try to allocate here, we don't have room. This
# is a correctly invalid operation as to be actually reshaping here, we
# would be needing to move the CONSUMER_ID allocations in this call (and
# setting the inventory to something that could accomodate them).
- name: with allocations
POST: /reshaper
data:
inventories:
$ENVIRON['RP_UUID']:
resource_provider_generation: 5
inventories:
VCPU:
total: 1
allocations:
$ENVIRON['INSTANCE_UUID']:
allocations:
$ENVIRON['RP_UUID']:
resources:
VCPU: 1
consumer_generation: null
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
status: 409
response_strings:
- Unable to allocate inventory
- name: bad rp gen
POST: /reshaper
data:
inventories:
$ENVIRON['RP_UUID']:
resource_provider_generation: 4
inventories:
VCPU:
total: 1
allocations: {}
status: 409
response_strings:
- resource provider generation conflict
- 'actual: 5, given: 4'
- name: bad consumer gen
POST: /reshaper
data:
inventories:
$ENVIRON['RP_UUID']:
resource_provider_generation: 5
inventories:
VCPU:
total: 1
allocations:
$ENVIRON['INSTANCE_UUID']:
allocations:
$ENVIRON['RP_UUID']:
resources:
VCPU: 1
# The correct generation here is null, because INSTANCE_UUID
# represents a new consumer at this point.
consumer_generation: 99
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
status: 409
response_strings:
- consumer generation conflict
- name: create a child provider
POST: /resource_providers
data:
uuid: $ENVIRON['ALT_RP_UUID']
name: $ENVIRON['ALT_RP_NAME']
parent_provider_uuid: $ENVIRON['RP_UUID']
# This and subsequent error checking tests are modelled on the successful
# test which is at the end of this file. Using the same data, with minor
# adjustments, so that the cause of failure is clear.
- name: move to bad child 400
POST: /reshaper
data:
inventories:
$ENVIRON['RP_UUID']:
resource_provider_generation: 5
inventories:
DISK_GB:
total: 2048
step_size: 10
min_unit: 10
max_unit: 1200
# This resource provider does not exist.
'39bafc00-3fff-444d-b87a-2ead3f866e05':
resource_provider_generation: 0
inventories:
VCPU:
total: 10
max_unit: 8
# these consumer generations are all 1 because they have
# previously allocated
allocations:
$ENVIRON['CONSUMER_0']:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 1000
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 1
$ENVIRON['CONSUMER_ID']:
allocations:
$ENVIRON['ALT_RP_UUID']:
resources:
VCPU: 8
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 1
$ENVIRON['ALT_CONSUMER_ID']:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 20
$ENVIRON['ALT_RP_UUID']:
resources:
VCPU: 1
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['ALT_USER_ID']
consumer_generation: 1
status: 400
response_json_paths:
$.errors[0].code: placement.resource_provider.not_found
- name: poorly formed inventory 400
POST: /reshaper
data:
inventories:
$ENVIRON['RP_UUID']:
resource_provider_generation: 5
inventories:
DISK_GB:
total: 2048
step_size: 10
min_unit: 10
max_unit: 1200
bad_field: moo
$ENVIRON['ALT_RP_UUID']:
resource_provider_generation: 0
inventories:
VCPU:
total: 10
max_unit: 8
# these consumer generations are all 1 because they have
# previously allocated
allocations:
$ENVIRON['CONSUMER_0']:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 1000
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 1
$ENVIRON['CONSUMER_ID']:
allocations:
$ENVIRON['ALT_RP_UUID']:
resources:
VCPU: 8
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 1
$ENVIRON['ALT_CONSUMER_ID']:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 20
$ENVIRON['ALT_RP_UUID']:
resources:
VCPU: 1
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['ALT_USER_ID']
consumer_generation: 1
status: 400
response_strings:
- JSON does not validate
- "'bad_field' was unexpected"
- name: poorly formed allocation 400
POST: /reshaper
data:
inventories:
$ENVIRON['RP_UUID']:
resource_provider_generation: 5
inventories:
DISK_GB:
total: 2048
step_size: 10
min_unit: 10
max_unit: 1200
$ENVIRON['ALT_RP_UUID']:
resource_provider_generation: 0
inventories:
VCPU:
total: 10
max_unit: 8
# these consumer generations are all 1 because they have
# previously allocated
allocations:
$ENVIRON['CONSUMER_0']:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 1000
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 1
# This bad field will cause a failure in the schema.
bad_field: moo
$ENVIRON['CONSUMER_ID']:
allocations:
$ENVIRON['ALT_RP_UUID']:
resources:
VCPU: 8
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 1
$ENVIRON['ALT_CONSUMER_ID']:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 20
$ENVIRON['ALT_RP_UUID']:
resources:
VCPU: 1
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['ALT_USER_ID']
consumer_generation: 1
status: 400
response_strings:
- JSON does not validate
- "'bad_field' was unexpected"
- name: target resource class not found
POST: /reshaper
data:
inventories:
$ENVIRON['RP_UUID']:
resource_provider_generation: 5
inventories:
# not a real inventory, but valid form
DISK_OF_STEEL:
total: 2048
step_size: 10
min_unit: 10
max_unit: 1200
$ENVIRON['ALT_RP_UUID']:
resource_provider_generation: 0
inventories:
VCPU:
total: 10
max_unit: 8
# these consumer generations are all 1 because they have
# previously allocated
allocations:
$ENVIRON['CONSUMER_0']:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 1000
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 1
$ENVIRON['CONSUMER_ID']:
allocations:
$ENVIRON['ALT_RP_UUID']:
resources:
VCPU: 8
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 1
$ENVIRON['ALT_CONSUMER_ID']:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 20
$ENVIRON['ALT_RP_UUID']:
resources:
VCPU: 1
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['ALT_USER_ID']
consumer_generation: 1
status: 400
response_strings:
- No such resource class DISK_OF_STEEL
- name: move bad allocation 409
desc: max unit on disk gb inventory violated
POST: /reshaper
data:
inventories:
$ENVIRON['RP_UUID']:
resource_provider_generation: 5
inventories:
DISK_GB:
total: 2048
step_size: 10
min_unit: 10
max_unit: 600
$ENVIRON['ALT_RP_UUID']:
resource_provider_generation: 0
inventories:
VCPU:
total: 10
max_unit: 8
# these consumer generations are all 1 because they have
# previously allocated
allocations:
$ENVIRON['CONSUMER_0']:
allocations:
$ENVIRON['RP_UUID']:
resources:
# Violates max unit
DISK_GB: 1000
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 1
$ENVIRON['CONSUMER_ID']:
allocations:
$ENVIRON['ALT_RP_UUID']:
resources:
VCPU: 8
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 1
$ENVIRON['ALT_CONSUMER_ID']:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 20
$ENVIRON['ALT_RP_UUID']:
resources:
VCPU: 1
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['ALT_USER_ID']
consumer_generation: 1
status: 409
response_strings:
- Unable to allocate inventory
# This is a successful reshape using information as it was established above
# or in the AllocationFixture. A non-obvious fact of this test is that it
# confirms that resource provider and consumer generations are rolled back
# when failures occur, as in the tests above.
- name: move vcpu inventory and allocations to child
POST: /reshaper
data:
inventories:
$ENVIRON['RP_UUID']:
resource_provider_generation: 5
inventories:
DISK_GB:
total: 2048
step_size: 10
min_unit: 10
max_unit: 1200
$ENVIRON['ALT_RP_UUID']:
resource_provider_generation: 0
inventories:
VCPU:
total: 10
max_unit: 8
# these consumer generations are all 1 because they have
# previously allocated
allocations:
$ENVIRON['CONSUMER_0']:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 1000
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 1
$ENVIRON['CONSUMER_ID']:
allocations:
$ENVIRON['ALT_RP_UUID']:
resources:
VCPU: 8
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 1
$ENVIRON['ALT_CONSUMER_ID']:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 20
$ENVIRON['ALT_RP_UUID']:
resources:
VCPU: 1
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['ALT_USER_ID']
consumer_generation: 1
status: 204
- name: get usages on parent after move
GET: /resource_providers/$ENVIRON['RP_UUID']/usages
response_json_paths:
$.usages:
DISK_GB: 1020
$.resource_provider_generation: 8
- name: get usages on child after move
GET: /resource_providers/$ENVIRON['ALT_RP_UUID']/usages
response_json_paths:
$.usages:
VCPU: 9
$.resource_provider_generation: 3
# Now move some of the inventory back to the original provider, and put all
# the allocations under two new consumers. This is an artificial test to
# exercise new consumer creation.
- name: consolidate inventory and allocations
POST: /reshaper
data:
inventories:
$ENVIRON['RP_UUID']:
resource_provider_generation: 8
inventories:
DISK_GB:
total: 2048
step_size: 10
min_unit: 10
max_unit: 1200
VCPU:
total: 10
max_unit: 8
$ENVIRON['ALT_RP_UUID']:
resource_provider_generation: 3
inventories: {}
allocations:
$ENVIRON['CONSUMER_0']:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 1000
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 2
'7bd2e864-0415-445c-8fc2-328520ef7642':
allocations:
$ENVIRON['RP_UUID']:
resources:
VCPU: 8
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: null
'2dfa608c-cecb-4fe0-a1bb-950015fa731f':
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 20
VCPU: 1
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['ALT_USER_ID']
consumer_generation: null
$ENVIRON['CONSUMER_ID']:
allocations: {}
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['USER_ID']
consumer_generation: 2
$ENVIRON['ALT_CONSUMER_ID']:
allocations:
$ENVIRON['RP_UUID']:
resources:
DISK_GB: 20
project_id: $ENVIRON['PROJECT_ID']
user_id: $ENVIRON['ALT_USER_ID']
consumer_generation: 2
status: 204
- name: get usages on parent after move back
GET: /resource_providers/$ENVIRON['RP_UUID']/usages
response_json_paths:
$.usages:
VCPU: 9
DISK_GB: 1040
$.resource_provider_generation: 11
- name: get usages on child after move back
GET: /resource_providers/$ENVIRON['ALT_RP_UUID']/usages
response_json_paths:
$.usages: {}
$.resource_provider_generation: 5

View File

@ -1,80 +0,0 @@
# A sequence of tests that confirms that a resource class in use
# cannot be deleted.
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
content-type: application/json
# We need version 1.11 as the PUT /allocations below is
# using the < 1.12 data format.
openstack-api-version: placement 1.11
tests:
- name: create a resource provider
POST: /resource_providers
data:
name: an rp
status: 201
- name: get resource provider
GET: $LOCATION
status: 200
- name: create a resource class
PUT: /resource_classes/CUSTOM_GOLD
status: 201
- name: add inventory to an rp
PUT: /resource_providers/$HISTORY['get resource provider'].$RESPONSE['$.uuid']/inventories
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 24
CUSTOM_GOLD:
total: 5
status: 200
- name: allocate some of it
PUT: /allocations/6d9f83db-6eb5-49f6-84b0-5d03c6aa9fc8
data:
allocations:
- resource_provider:
uuid: $HISTORY['get resource provider'].$RESPONSE['$.uuid']
resources:
VCPU: 5
CUSTOM_GOLD: 1
project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784
user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70
status: 204
- name: fail delete resource class allocations
DELETE: /resource_classes/CUSTOM_GOLD
status: 409
response_strings:
- Error in delete resource class
- Class is in use in inventory
- name: delete the allocation
DELETE: $HISTORY['allocate some of it'].$URL
status: 204
- name: fail delete resource class inventory
DELETE: /resource_classes/CUSTOM_GOLD
status: 409
response_strings:
- Error in delete resource class
- Class is in use in inventory
- name: delete the inventory
DELETE: $HISTORY['add inventory to an rp'].$URL
status: 204
- name: delete resource class
DELETE: /resource_classes/CUSTOM_GOLD
status: 204

View File

@ -1,21 +0,0 @@
# Confirm that 1.7 behavior of PUT resource classes is not in
# microversion 1.6.
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
content-type: application/json
openstack-api-version: placement 1.6
tests:
- name: bodiless put
PUT: /resource_classes/CUSTOM_COW
status: 400
response_strings:
# We don't check much of this string because it is different
# between python 2 and 3.
- "Malformed JSON:"

View File

@ -1,49 +0,0 @@
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
content-type: application/json
openstack-api-version: placement 1.7
tests:
- name: create new custom class with put
PUT: /resource_classes/CUSTOM_COW
status: 201
response_headers:
location: //resource_classes/CUSTOM_COW/
- name: verify that class with put
PUT: /resource_classes/CUSTOM_COW
status: 204
response_headers:
location: //resource_classes/CUSTOM_COW/
- name: fail to put non custom class
PUT: /resource_classes/COW
status: 400
response_strings:
- "Failed validating 'pattern'"
- name: try to put standard class
PUT: /resource_classes/VCPU
status: 400
response_strings:
- "Failed validating 'pattern'"
- name: try to put too long class
PUT: /resource_classes/CUSTOM_SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
status: 400
response_strings:
- "Failed validating 'maxLength'"
- name: post to create still works
POST: /resource_classes
data:
name: CUSTOM_SHEEP
status: 201
response_headers:
location: //resource_classes/CUSTOM_SHEEP/

View File

@ -1,117 +0,0 @@
# Confirm the behavior and presence of last-modified headers for resource
# classes across multiple microversions.
#
# We have the following routes, with associated microversion, and bodies.
#
# '/resource_classes': {
# 'GET': resource_class.list_resource_classes,
# v1.2, body
# 'POST': resource_class.create_resource_class
# v1.2, no body
# },
# '/resource_classes/{name}': {
# 'GET': resource_class.get_resource_class,
# v1.2, body
# 'PUT': resource_class.update_resource_class,
# v1.2, body, but time's arrow
# v1.7, no body
# 'DELETE': resource_class.delete_resource_class,
# v1.2, no body
# },
#
# This means that in 1.15 we only expect last-modified headers for
# the two GET requests, for the other requests we should confirm it
# is not there.
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
content-type: application/json
openstack-api-version: placement 1.15
tests:
- name: get resource classes
desc: last modified is now with standards only
GET: /resource_classes
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: create a custom class
PUT: /resource_classes/CUSTOM_MOO_MACHINE
status: 201
response_forbidden_headers:
- last-modified
- cache-control
- name: get custom class
GET: $LAST_URL
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: get standard class
GET: /resource_classes/VCPU
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: post a resource class
POST: /resource_classes
data:
name: CUSTOM_ALPHA
status: 201
response_forbidden_headers:
- last-modified
- cache-control
- name: get resource classes including custom
desc: last modified will still be now with customs because of standards
GET: /resource_classes
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: put a resource class 1.6 microversion
PUT: /resource_classes/CUSTOM_MOO_MACHINE
request_headers:
openstack-api-version: placement 1.6
data:
name: CUSTOM_BETA
status: 200
response_forbidden_headers:
- last-modified
- cache-control
- name: get resource classes 1.14 microversion
GET: /resource_classes
request_headers:
openstack-api-version: placement 1.14
response_forbidden_headers:
- last-modified
- cache-control
- name: get standard class 1.14 microversion
GET: /resource_classes/VCPU
request_headers:
openstack-api-version: placement 1.14
response_forbidden_headers:
- last-modified
- cache-control
- name: get custom class 1.14 microversion
GET: $LAST_URL
request_headers:
openstack-api-version: placement 1.14
response_forbidden_headers:
- last-modified
- cache-control

View File

@ -1,40 +0,0 @@
# This tests the individual CRUD operations on /resource_classes
# using a non-admin user with an open policy configuration. The
# response validation is intentionally minimal.
fixtures:
- OpenPolicyFixture
defaults:
request_headers:
x-auth-token: user
accept: application/json
content-type: application/json
openstack-api-version: placement latest
tests:
- name: list resource classes
GET: /resource_classes
response_json_paths:
$.resource_classes.`len`: 14 # Number of standard resource classes
- name: create resource class
POST: /resource_classes
data:
name: CUSTOM_RES_CLASS_POLICY
status: 201
response_headers:
location: //resource_classes/CUSTOM_RES_CLASS_POLICY/
- name: show resource class
GET: /resource_classes/CUSTOM_RES_CLASS_POLICY
response_json_paths:
$.name: CUSTOM_RES_CLASS_POLICY
- name: update resource class
PUT: /resource_classes/CUSTOM_NEW_CLASS_POLICY
status: 201
- name: delete resource class
DELETE: /resource_classes/CUSTOM_NEW_CLASS_POLICY
status: 204

View File

@ -1,325 +0,0 @@
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
openstack-api-version: placement latest
tests:
- name: test microversion masks resource-classes endpoint for list with 404
GET: /resource_classes
request_headers:
openstack-api-version: placement 1.1
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: test microversion masks resource-classes endpoint for create with 404
desc: we want to get a 404 even if content-type is correct
POST: /resource_classes
request_headers:
openstack-api-version: placement 1.1
content-type: application/json
data:
name: CUSTOM_NFV_BAR
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: test microversion mask when wrong content type
desc: we want to get a 404 before a 415
POST: /resource_classes
request_headers:
openstack-api-version: placement 1.1
content-type: text/plain
data: data
status: 404
- name: test wrong content type
desc: we want to get a 415 when bad content type
POST: /resource_classes
request_headers:
openstack-api-version: placement 1.2
content-type: text/plain
data: data
status: 415
- name: what is at resource classes
GET: /resource_classes
response_json_paths:
response_json_paths:
$.resource_classes.`len`: 14 # Number of standard resource classes
$.resource_classes[0].name: VCPU
- name: non admin forbidden
GET: /resource_classes
request_headers:
x-auth-token: user
accept: application/json
status: 403
response_json_paths:
$.errors[0].title: Forbidden
- name: post invalid non json
POST: /resource_classes
request_headers:
accept: text/plain
content-type: application/json
data:
name: FOO
status: 400
response_strings:
- JSON does not validate
- name: post illegal characters in name
POST: /resource_classes
request_headers:
content-type: application/json
data:
name: CUSTOM_Illegal&@!Name?
status: 400
response_strings:
- JSON does not validate
response_json_paths:
$.errors[0].title: Bad Request
- name: post new resource class
POST: /resource_classes
request_headers:
content-type: application/json
data:
name: $ENVIRON['CUSTOM_RES_CLASS']
status: 201
response_headers:
location: //resource_classes/$ENVIRON['CUSTOM_RES_CLASS']/
response_forbidden_headers:
- content-type
- name: try to create same again
POST: /resource_classes
request_headers:
content-type: application/json
data:
name: $ENVIRON['CUSTOM_RES_CLASS']
status: 409
response_strings:
- Conflicting resource class already exists
response_json_paths:
$.errors[0].title: Conflict
- name: confirm the correct post
GET: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS']
request_headers:
content-type: application/json
response_json_paths:
$.name: $ENVIRON['CUSTOM_RES_CLASS']
$.links[?rel = "self"].href: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS']
- name: test microversion masks resource-classes endpoint for show with 404
GET: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS']
request_headers:
openstack-api-version: placement 1.1
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: get resource class works with no accept
GET: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS']
request_headers:
content-type: application/json
response_headers:
content-type: /application/json/
response_json_paths:
$.name: $ENVIRON['CUSTOM_RES_CLASS']
- name: list resource classes after addition of custom res class
GET: /resource_classes
response_json_paths:
$.resource_classes.`len`: 15 # 14 standard plus 1 custom
$.resource_classes[14].name: $ENVIRON['CUSTOM_RES_CLASS']
$.resource_classes[14].links[?rel = "self"].href: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS']
- name: update standard resource class bad json
PUT: /resource_classes/VCPU
request_headers:
content-type: application/json
openstack-api-version: placement 1.6
data:
name: VCPU_ALTERNATE
status: 400
response_strings:
- JSON does not validate
response_json_paths:
$.errors[0].title: Bad Request
- name: update standard resource class to custom
desc: standard classes cannot be updated
PUT: /resource_classes/VCPU
request_headers:
content-type: application/json
openstack-api-version: placement 1.6
data:
name: $ENVIRON['CUSTOM_RES_CLASS']
status: 400
response_strings:
- Cannot update standard resource class VCPU
response_json_paths:
$.errors[0].title: Bad Request
- name: update custom resource class to standard resource class name
PUT: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS']
request_headers:
content-type: application/json
openstack-api-version: placement 1.6
data:
name: VCPU
status: 400
response_strings:
- JSON does not validate
response_json_paths:
$.errors[0].title: Bad Request
- name: post another custom resource class
POST: /resource_classes
request_headers:
content-type: application/json
data:
name: CUSTOM_NFV_FOO
status: 201
- name: update custom resource class to already existing custom resource class name
PUT: /resource_classes/CUSTOM_NFV_FOO
request_headers:
content-type: application/json
openstack-api-version: placement 1.6
data:
name: $ENVIRON['CUSTOM_RES_CLASS']
status: 409
response_strings:
- Resource class already exists
- $ENVIRON['CUSTOM_RES_CLASS']
response_json_paths:
$.errors[0].title: Conflict
- name: test microversion masks resource-classes endpoint for update with 404
PUT: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS']
request_headers:
openstack-api-version: placement 1.1
content-type: application/json
data:
name: CUSTOM_NFV_BAR
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: update custom resource class with additional properties
PUT: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS']
request_headers:
content-type: application/json
openstack-api-version: placement 1.6
data:
name: CUSTOM_NFV_BAR
additional: additional
status: 400
response_strings:
- Additional properties are not allowed
- name: update custom resource class
PUT: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS']
request_headers:
content-type: application/json
openstack-api-version: placement 1.6
data:
name: CUSTOM_NFV_BAR
status: 200
response_json_paths:
$.name: CUSTOM_NFV_BAR
$.links[?rel = "self"].href: /resource_classes/CUSTOM_NFV_BAR
- name: delete standard resource class
DELETE: /resource_classes/VCPU
status: 400
response_strings:
- Cannot delete standard resource class
response_json_paths:
$.errors[0].title: Bad Request
- name: test microversion masks resource-classes endpoint for delete with 404
DELETE: /resource_classes/CUSTOM_NFV_BAR
request_headers:
openstack-api-version: placement 1.1
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: delete custom resource class
DELETE: /resource_classes/CUSTOM_NFV_BAR
status: 204
- name: 404 on deleted resource class
DELETE: $LAST_URL
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: post malformed json as json
POST: /resource_classes
request_headers:
content-type: application/json
data: '{"foo": }'
status: 400
response_strings:
- 'Malformed JSON:'
response_json_paths:
$.errors[0].title: Bad Request
- name: post bad resource class name IRON_NFV
POST: /resource_classes
request_headers:
content-type: application/json
data:
name: IRON_NFV # Doesn't start with CUSTOM_
status: 400
response_strings:
- JSON does not validate
response_json_paths:
$.errors[0].title: Bad Request
- name: try to create resource class with name exceed max characters
POST: /resource_classes
request_headers:
content-type: application/json
data:
name: &name_exceeds_max_length_check CUSTOM_THIS_IS_A_LONG_TEXT_OF_LENGTH_256_CHARACTERSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
status: 400
response_strings:
- "Failed validating 'maxLength'"
response_json_paths:
$.errors[0].title: Bad Request
- name: try to update resource class with name exceed max characters
PUT: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS']
request_headers:
content-type: application/json
openstack-api-version: placement 1.6
data:
name: *name_exceeds_max_length_check
status: 400
response_strings:
- "Failed validating 'maxLength'"
response_json_paths:
$.errors[0].title: Bad Request
- name: try to create resource class with additional properties
POST: /resource_classes
request_headers:
content-type: application/json
data:
name: CUSTOM_NFV_BAR
additional: additional
status: 400
response_strings:
- Additional properties are not allowed

View File

@ -1,181 +0,0 @@
# Tests filtering resource providers by aggregates
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
content-type: application/json
accept: application/json
openstack-api-version: placement latest
tests:
- name: post new provider 1
POST: /resource_providers
data:
name: rp_1
uuid: 893337e9-1e55-49f0-bcfe-6a2f16fbf2f7
status: 200
- name: post new provider 2
POST: /resource_providers
data:
name: rp_2
uuid: 5202c48f-c960-4eec-bde3-89c4f22a17b9
status: 200
- name: get by aggregates no result
GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91'
response_json_paths:
$.resource_providers: []
- name: associate an aggregate with rp1
PUT: /resource_providers/893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/aggregates
data:
aggregates:
- 83a3d69d-8920-48e2-8914-cadfd8fa2f91
resource_provider_generation: 0
status: 200
- name: get by aggregates one result
GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91'
response_json_paths:
$.resource_providers[0].uuid: 893337e9-1e55-49f0-bcfe-6a2f16fbf2f7
- name: get by aggregates one result no in
GET: '/resource_providers?member_of=83a3d69d-8920-48e2-8914-cadfd8fa2f91'
response_json_paths:
$.resource_providers[0].uuid: 893337e9-1e55-49f0-bcfe-6a2f16fbf2f7
- name: get by aggregates no result not a uuid
GET: '/resource_providers?member_of=not+a+uuid'
status: 400
response_strings:
- "Expected 'member_of' parameter to contain valid UUID(s)."
response_json_paths:
$.errors[0].title: Bad Request
- name: associate an aggregate with rp2
PUT: /resource_providers/5202c48f-c960-4eec-bde3-89c4f22a17b9/aggregates
data:
aggregates:
- 83a3d69d-8920-48e2-8914-cadfd8fa2f91
resource_provider_generation: 0
status: 200
- name: get by aggregates two result
GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91'
response_json_paths:
$.resource_providers.`len`: 2
$.resource_providers[0].uuid: /5202c48f-c960-4eec-bde3-89c4f22a17b9|893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/
$.resource_providers[1].uuid: /5202c48f-c960-4eec-bde3-89c4f22a17b9|893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/
- name: associate another aggregate with rp2
PUT: /resource_providers/5202c48f-c960-4eec-bde3-89c4f22a17b9/aggregates
data:
aggregates:
- 99652f11-9f77-46b9-80b7-4b1989be9f8c
resource_provider_generation: 1
status: 200
- name: get by both aggregates two
GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91,99652f11-9f77-46b9-80b7-4b1989be9f8c'
response_json_paths:
$.resource_providers.`len`: 2
$.resource_providers[0].uuid: /5202c48f-c960-4eec-bde3-89c4f22a17b9|893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/
$.resource_providers[1].uuid: /5202c48f-c960-4eec-bde3-89c4f22a17b9|893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/
- name: clear aggregates on rp1
PUT: /resource_providers/893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/aggregates
data:
aggregates: []
resource_provider_generation: 1
status: 200
- name: get by both aggregates one
desc: only one result because we disassociated aggregates in the PUT above
GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91,99652f11-9f77-46b9-80b7-4b1989be9f8c'
response_json_paths:
$.resource_providers.`len`: 1
$.resource_providers[0].uuid: 5202c48f-c960-4eec-bde3-89c4f22a17b9
- name: error on old microversion
GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91,99652f11-9f77-46b9-80b7-4b1989be9f8c'
request_headers:
openstack-api-version: placement 1.1
status: 400
response_strings:
- 'Invalid query string parameters'
response_json_paths:
$.errors[0].title: Bad Request
- name: error on bogus query parameter
GET: '/resource_providers?assoc_with_aggregate=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91,99652f11-9f77-46b9-80b7-4b1989be9f8c'
status: 400
response_strings:
- 'Invalid query string parameters'
response_json_paths:
$.errors[0].title: Bad Request
- name: error trying multiple member_of params prior correct microversion
GET: '/resource_providers?member_of=83a3d69d-8920-48e2-8914-cadfd8fa2f91&member_of=99652f11-9f77-46b9-80b7-4b1989be9f8c'
request_headers:
openstack-api-version: placement 1.23
status: 400
response_strings:
- 'Multiple member_of parameters are not supported'
response_json_paths:
$.errors[0].title: Bad Request
- name: multiple member_of params with no results
GET: '/resource_providers?member_of=83a3d69d-8920-48e2-8914-cadfd8fa2f91&member_of=99652f11-9f77-46b9-80b7-4b1989be9f8c'
status: 200
response_json_paths:
# No provider is associated with both aggregates
resource_providers: []
- name: associate two aggregates with rp2
PUT: /resource_providers/5202c48f-c960-4eec-bde3-89c4f22a17b9/aggregates
data:
aggregates:
- 99652f11-9f77-46b9-80b7-4b1989be9f8c
- 83a3d69d-8920-48e2-8914-cadfd8fa2f91
resource_provider_generation: 2
status: 200
- name: multiple member_of params AND together to result in one provider
GET: '/resource_providers?member_of=83a3d69d-8920-48e2-8914-cadfd8fa2f91&member_of=99652f11-9f77-46b9-80b7-4b1989be9f8c'
status: 200
response_json_paths:
# One provider is now associated with both aggregates
$.resource_providers.`len`: 1
$.resource_providers[0].uuid: 5202c48f-c960-4eec-bde3-89c4f22a17b9
- name: associate two aggregates to rp1, one of which overlaps with rp2
PUT: /resource_providers/893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/aggregates
data:
aggregates:
- 282d469e-29e2-4a8a-8f2e-31b3202b696a
- 83a3d69d-8920-48e2-8914-cadfd8fa2f91
resource_provider_generation: 2
status: 200
- name: two AND'd member_ofs with one OR'd member_of
GET: '/resource_providers?member_of=83a3d69d-8920-48e2-8914-cadfd8fa2f91&member_of=in:99652f11-9f77-46b9-80b7-4b1989be9f8c,282d469e-29e2-4a8a-8f2e-31b3202b696a'
status: 200
response_json_paths:
# Both rp1 and rp2 returned because both are associated with agg 83a3d69d
# and each is associated with either agg 99652f11 or agg 282s469e
$.resource_providers.`len`: 2
$.resource_providers[0].uuid: /5202c48f-c960-4eec-bde3-89c4f22a17b9|893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/
$.resource_providers[1].uuid: /5202c48f-c960-4eec-bde3-89c4f22a17b9|893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/
- name: two AND'd member_ofs using same agg UUID
GET: '/resource_providers?member_of=282d469e-29e2-4a8a-8f2e-31b3202b696a&member_of=282d469e-29e2-4a8a-8f2e-31b3202b696a'
status: 200
response_json_paths:
# Only rp2 returned since it's the only one associated with the duplicated agg
$.resource_providers.`len`: 1
$.resource_providers[0].uuid: /893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/

View File

@ -1,123 +0,0 @@
# Test launchpad bug https://bugs.launchpad.net/nova/+bug/1779818
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
openstack-api-version: placement latest
tests:
- name: post a resource provider as alt_parent
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: alt_parent
uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
status: 200
response_json_paths:
$.uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.name: alt_parent
$.parent_provider_uuid: null
$.generation: 0
- name: post another resource provider as parent
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: parent
uuid: $ENVIRON['PARENT_PROVIDER_UUID']
status: 200
response_json_paths:
$.uuid: $ENVIRON['PARENT_PROVIDER_UUID']
$.name: parent
$.parent_provider_uuid: null
$.generation: 0
- name: post a child resource provider of the parent
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: child
uuid: $ENVIRON['RP_UUID']
parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
status: 200
response_json_paths:
$.uuid: $ENVIRON['RP_UUID']
$.name: child
$.parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
$.generation: 0
# Let's validate that now we have two tree structures
# * alt_parent
# * parent
# |
# +-- child
- name: list all resource providers
GET: /resource_providers
response_json_paths:
$.resource_providers.`len`: 3
$.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].parent_provider_uuid: null
$.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].parent_provider_uuid: null
$.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['RP_UUID']"].parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
# Let's re-parent the parent to the alternative parent
# so that we have only one tree.
# * alt_parent
# |
# +-- parent
# |
# +-- child
- name: update a parent of the parent
PUT: /resource_providers/$ENVIRON['PARENT_PROVIDER_UUID']
request_headers:
content-type: application/json
data:
name: parent
parent_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
status: 200
# Let's validate that we have only one root provider now
- name: list all resource providers updated
GET: /resource_providers
response_json_paths:
$.resource_providers.`len`: 3
$.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].parent_provider_uuid: null
$.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].parent_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['RP_UUID']"].parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
- name: list all resource providers in a tree with the child
GET: /resource_providers?in_tree=$ENVIRON['RP_UUID']
response_json_paths:
$.resource_providers.`len`: 3
$.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
- name: list all resource providers in a tree with the parent
GET: /resource_providers?in_tree=$ENVIRON['PARENT_PROVIDER_UUID']
response_json_paths:
$.resource_providers.`len`: 3
$.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
- name: list all resource providers in a tree with the alternative parent
GET: /resource_providers?in_tree=$ENVIRON['ALT_PARENT_PROVIDER_UUID']
response_json_paths:
$.resource_providers.`len`: 3
$.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']

View File

@ -1,48 +0,0 @@
# Verify different error messages was attempting to create a
# resource provider with a duplicated name or UUID.
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
content-type: application/json
tests:
- name: post new resource provider
POST: /resource_providers
data:
name: shared disk
uuid: $ENVIRON['RP_UUID']
status: 201
- name: same uuid different name
POST: /resource_providers
data:
name: shared disk X
uuid: $ENVIRON['RP_UUID']
status: 409
response_strings:
- "Conflicting resource provider uuid: $ENVIRON['RP_UUID']"
- name: same name different uuid
POST: /resource_providers
data:
name: shared disk
uuid: 2c2059d8-005c-4f5c-82b1-b1701b1a29b7
status: 409
response_strings:
- 'Conflicting resource provider name: shared disk'
# On this one, don't test for which field was a duplicate because
# that depends on how the database reports columns.
- name: same name same uuid
POST: /resource_providers
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 409
response_strings:
- Conflicting resource provider

View File

@ -1,106 +0,0 @@
# Confirm that the links provided when getting one or more resources
# providers are correct. They vary across different microversions.
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
tests:
- name: post new resource provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 201
- name: get rp latest
GET: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
openstack-api-version: placement latest
response_json_paths:
$.links.`len`: 6
$.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates
$.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
$.links[?rel = "allocations"].href: /resource_providers/$ENVIRON['RP_UUID']/allocations
$.links[?rel = "traits"].href: /resource_providers/$ENVIRON['RP_UUID']/traits
- name: get rp 1.0
GET: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
openstack-api-version: placement 1.0
response_json_paths:
$.links.`len`: 3
$.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
- name: get rp 1.1
desc: aggregates added in 1.1
GET: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
openstack-api-version: placement 1.1
response_json_paths:
$.links.`len`: 4
$.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
$.links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates
- name: get rp 1.5
desc: traits added after 1.5
GET: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
openstack-api-version: placement 1.5
response_json_paths:
$.links.`len`: 4
$.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
$.links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates
- name: get rp 1.6
desc: traits added in 1.6
GET: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
openstack-api-version: placement 1.6
response_json_paths:
$.links.`len`: 5
$.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
$.links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates
$.links[?rel = "traits"].href: /resource_providers/$ENVIRON['RP_UUID']/traits
- name: get rp 1.7
desc: nothing new in 1.7
GET: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
openstack-api-version: placement 1.7
response_json_paths:
$.links.`len`: 5
$.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
$.links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates
$.links[?rel = "traits"].href: /resource_providers/$ENVIRON['RP_UUID']/traits
- name: get rp allocations link added in 1.11
GET: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
openstack-api-version: placement 1.11
response_json_paths:
$.links.`len`: 6
$.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates
$.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
$.links[?rel = "allocations"].href: /resource_providers/$ENVIRON['RP_UUID']/allocations
$.links[?rel = "traits"].href: /resource_providers/$ENVIRON['RP_UUID']/traits

View File

@ -1,48 +0,0 @@
# This tests the individual CRUD operations on /resource_providers
# using a non-admin user with an open policy configuration. The
# response validation is intentionally minimal.
fixtures:
- OpenPolicyFixture
defaults:
request_headers:
x-auth-token: user
accept: application/json
content-type: application/json
openstack-api-version: placement latest
tests:
- name: list resource providers
GET: /resource_providers
response_json_paths:
$.resource_providers: []
- name: create resource provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 200
response_json_paths:
$.uuid: $ENVIRON['RP_UUID']
- name: show resource provider
GET: /resource_providers/$ENVIRON['RP_UUID']
response_json_paths:
$.uuid: $ENVIRON['RP_UUID']
- name: update resource provider
PUT: /resource_providers/$ENVIRON['RP_UUID']
data:
name: new name
status: 200
response_json_paths:
$.name: new name
$.uuid: $ENVIRON['RP_UUID']
- name: delete resource provider
DELETE: /resource_providers/$ENVIRON['RP_UUID']
status: 204

View File

@ -1,156 +0,0 @@
fixtures:
- AllocationFixture
defaults:
request_headers:
x-auth-token: admin
content-type: application/json
accept: application/json
openstack-api-version: placement latest
tests:
- name: what is at resource providers
GET: /resource_providers
response_json_paths:
$.resource_providers.`len`: 1
$.resource_providers[0].uuid: $ENVIRON['RP_UUID']
$.resource_providers[0].name: $ENVIRON['RP_NAME']
$.resource_providers[0].links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.resource_providers[0].links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.resource_providers[0].links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates
$.resource_providers[0].links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
- name: post new resource provider
POST: /resource_providers
data:
name: $ENVIRON['ALT_RP_NAME']
uuid: $ENVIRON['ALT_RP_UUID']
status: 200
response_headers:
location: //resource_providers/[a-f0-9-]+/
- name: now 2 providers listed
GET: /resource_providers
response_json_paths:
$.resource_providers.`len`: 2
- name: list resource providers providing resources filter before API 1.4
GET: /resource_providers?resources=VCPU:1
request_headers:
openstack-api-version: placement 1.3
status: 400
response_strings:
- 'Invalid query string parameters'
response_json_paths:
$.errors[0].title: Bad Request
- name: list resource providers providing a badly-formatted resources filter
GET: /resource_providers?resources=VCPU
status: 400
response_strings:
- 'Badly formed resources parameter. Expected resources query string parameter in form:'
- 'Got: VCPU.'
response_json_paths:
$.errors[0].title: Bad Request
- name: list resource providers providing a resources filter with non-integer amount
GET: /resource_providers?resources=VCPU:fred
status: 400
response_strings:
- 'Requested resource VCPU expected positive integer amount.'
- 'Got: fred.'
response_json_paths:
$.errors[0].title: Bad Request
- name: list resource providers providing a resources filter with negative amount
GET: /resource_providers?resources=VCPU:-2
status: 400
response_strings:
- 'Requested resource VCPU requires amount >= 1.'
- 'Got: -2.'
response_json_paths:
$.errors[0].title: Bad Request
- name: list resource providers providing a resource class not existing
GET: /resource_providers?resources=MYMISSINGCLASS:1
status: 400
response_strings:
- 'Invalid resource class in resources parameter'
response_json_paths:
$.errors[0].title: Bad Request
- name: list resource providers providing a bad trailing comma
GET: /resource_providers?resources=DISK_GB:500,
status: 400
response_strings:
- 'Badly formed resources parameter. Expected resources query string parameter in form:'
# NOTE(mriedem): The value is empty because splitting on the trailing
# comma results in an empty string.
- 'Got: .'
response_json_paths:
$.errors[0].title: Bad Request
- name: list resource providers providing empty resources
GET: /resource_providers?resources=
status: 400
response_strings:
- Badly formed resources parameter. Expected resources query string parameter in form
- 'Got: empty string.'
- name: list resource providers providing disk resources
GET: /resource_providers?resources=DISK_GB:500
response_json_paths:
$.resource_providers.`len`: 1
$.resource_providers[0].uuid: $ENVIRON['RP_UUID']
- name: list resource providers providing disk and vcpu resources
GET: /resource_providers?resources=DISK_GB:500,VCPU:2
response_json_paths:
$.resource_providers.`len`: 1
$.resource_providers[0].uuid: $ENVIRON['RP_UUID']
- name: list resource providers providing resources (no match - less than min_unit)
GET: /resource_providers?resources=DISK_GB:1
response_json_paths:
$.resource_providers.`len`: 0
- name: list resource providers providing resources (no match - more than max_unit)
GET: /resource_providers?resources=DISK_GB:1010
response_json_paths:
$.resource_providers.`len`: 0
- name: list resource providers providing resources (no match - not enough inventory)
GET: /resource_providers?resources=DISK_GB:102400
response_json_paths:
$.resource_providers.`len`: 0
- name: list resource providers providing resources (no match - bad step size)
GET: /resource_providers?resources=DISK_GB:11
response_json_paths:
$.resource_providers.`len`: 0
- name: list resource providers providing resources (no match - no inventory of resource)
GET: /resource_providers?resources=MEMORY_MB:10240
response_json_paths:
$.resource_providers.`len`: 0
- name: list resource providers providing resources (no match - not enough VCPU)
GET: /resource_providers?resources=DISK_GB:500,VCPU:4
response_json_paths:
$.resource_providers.`len`: 0
- name: associate an aggregate with rp1
PUT: /resource_providers/$ENVIRON['RP_UUID']/aggregates
data:
aggregates:
- 83a3d69d-8920-48e2-8914-cadfd8fa2f91
resource_provider_generation: $HISTORY['list resource providers providing disk and vcpu resources'].$RESPONSE['$.resource_providers[0].generation']
status: 200
- name: get by aggregates with resources
GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91&resources=VCPU:2'
response_json_paths:
$.resource_providers.`len`: 1
$.resource_providers[0].uuid: $ENVIRON['RP_UUID']

View File

@ -1,775 +0,0 @@
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
accept: application/json
openstack-api-version: placement latest
tests:
- name: what is at resource providers
GET: /resource_providers
request_headers:
# microversion 1.15 for cache headers
openstack-api-version: placement 1.15
response_json_paths:
$.resource_providers: []
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: non admin forbidden
GET: /resource_providers
request_headers:
x-auth-token: user
accept: application/json
status: 403
response_json_paths:
$.errors[0].title: Forbidden
- name: route not found non json
GET: /moo
request_headers:
accept: text/plain
status: 404
response_strings:
- The resource could not be found
- name: post new resource provider - old microversion
POST: /resource_providers
request_headers:
content-type: application/json
openstack-api-version: placement 1.19
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 201
response_headers:
location: //resource_providers/[a-f0-9-]+/
response_forbidden_headers:
- content-type
- name: delete it
DELETE: $LOCATION
status: 204
- name: post new resource provider - new microversion
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 200
response_headers:
location: //resource_providers/[a-f0-9-]+/
response_json_paths:
$.uuid: $ENVIRON['RP_UUID']
$.name: $ENVIRON['RP_NAME']
$.parent_provider_uuid: null
$.generation: 0
$.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
# On this one, don't test for which field was a duplicate because
# that depends on how the database reports columns.
- name: try to create same all again
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 409
response_strings:
- Conflicting resource provider
response_json_paths:
$.errors[0].title: Conflict
- name: try to create same name again
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: $ENVIRON['RP_NAME']
uuid: ada30fb5-566d-4fe1-b43b-28a9e988790c
status: 409
response_strings:
- "Conflicting resource provider name: $ENVIRON['RP_NAME'] already exists"
response_json_paths:
$.errors[0].title: Conflict
$.errors[0].code: placement.duplicate_name
- name: confirm the correct post
GET: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
content-type: application/json
openstack-api-version: placement 1.15
response_headers:
content-type: application/json
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
response_json_paths:
$.uuid: $ENVIRON['RP_UUID']
$.name: $ENVIRON['RP_NAME']
$.parent_provider_uuid: null
$.generation: 0
$.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
- name: get resource provider works with no accept
GET: /resource_providers/$ENVIRON['RP_UUID']
response_headers:
content-type: /application/json/
response_json_paths:
$.uuid: $ENVIRON['RP_UUID']
- name: get non-existing resource provider
GET: /resource_providers/d67370b5-4dc0-470d-a4fa-85e8e89abc6c
status: 404
response_strings:
- No resource provider with uuid d67370b5-4dc0-470d-a4fa-85e8e89abc6c found
response_json_paths:
$.errors[0].title: Not Found
- name: list one resource providers
GET: /resource_providers
request_headers:
openstack-api-version: placement 1.15
response_json_paths:
$.resource_providers.`len`: 1
$.resource_providers[0].uuid: $ENVIRON['RP_UUID']
$.resource_providers[0].name: $ENVIRON['RP_NAME']
$.resource_providers[0].generation: 0
$.resource_providers[0].parent_provider_uuid: null
$.resource_providers[0].links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.resource_providers[0].links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.resource_providers[0].links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: filter out all resource providers by name
GET: /resource_providers?name=flubblebubble
response_json_paths:
$.resource_providers.`len`: 0
- name: filter out all resource providers by uuid
GET: /resource_providers?uuid=d67370b5-4dc0-470d-a4fa-85e8e89abc6c
response_json_paths:
$.resource_providers.`len`: 0
- name: list one resource provider filtering by name
GET: /resource_providers?name=$ENVIRON['RP_NAME']
response_json_paths:
$.resource_providers.`len`: 1
$.resource_providers[0].uuid: $ENVIRON['RP_UUID']
$.resource_providers[0].name: $ENVIRON['RP_NAME']
$.resource_providers[0].links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.resource_providers[0].links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.resource_providers[0].links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
- name: list resource providers filtering by invalid uuid
GET: /resource_providers?uuid=spameggs
status: 400
response_strings:
- 'Invalid query string parameters'
response_json_paths:
$.errors[0].title: Bad Request
- name: list resource providers providing an invalid filter
GET: /resource_providers?spam=eggs
status: 400
response_strings:
- 'Invalid query string parameters'
response_json_paths:
$.errors[0].title: Bad Request
- name: list one resource provider filtering by uuid with allocations link
GET: /resource_providers?uuid=$ENVIRON['RP_UUID']
request_headers:
openstack-api-version: placement 1.11
response_json_paths:
$.resource_providers.`len`: 1
$.resource_providers[0].uuid: $ENVIRON['RP_UUID']
$.resource_providers[0].name: $ENVIRON['RP_NAME']
$.resource_providers[0].links.`len`: 6
$.resource_providers[0].links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.resource_providers[0].links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.resource_providers[0].links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
$.resource_providers[0].links[?rel = "allocations"].href: /resource_providers/$ENVIRON['RP_UUID']/allocations
- name: list one resource provider filtering by uuid no allocations link
GET: /resource_providers?uuid=$ENVIRON['RP_UUID']
request_headers:
openstack-api-version: placement 1.10
response_json_paths:
$.resource_providers.`len`: 1
$.resource_providers[0].uuid: $ENVIRON['RP_UUID']
$.resource_providers[0].name: $ENVIRON['RP_NAME']
$.resource_providers[0].links.`len`: 5
$.resource_providers[0].links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
$.resource_providers[0].links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories
$.resource_providers[0].links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages
- name: update a resource provider's name
PUT: /resource_providers/$RESPONSE['$.resource_providers[0].uuid']
request_headers:
content-type: application/json
openstack-api-version: placement 1.15
data:
name: new name
status: 200
response_headers:
content-type: /application/json/
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
response_forbidden_headers:
- location
response_json_paths:
$.generation: 0
$.name: new name
$.uuid: $ENVIRON['RP_UUID']
$.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID']
- name: check the name from that update
GET: $LAST_URL
response_json_paths:
$.name: new name
- name: update a provider poorly
PUT: $LAST_URL
request_headers:
content-type: application/json
data:
badfield: new name
status: 400
response_strings:
- 'JSON does not validate'
response_json_paths:
$.errors[0].title: Bad Request
# This section of tests validate nested resource provider relationships and
# constraints. We attempt to set the parent provider UUID for the primary
# resource provider to a UUID value of a provider we have not yet created and
# expect a failure. We then create that parent provider record and attempt to
# set the same parent provider UUID without also setting the root provider UUID
# to the same value, with an expected failure. Finally, we set the primary
# provider's root AND parent to the new provider UUID and verify success.
- name: test POST microversion limits nested providers
POST: /resource_providers
request_headers:
openstack-api-version: placement 1.13
content-type: application/json
data:
name: child
parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
status: 400
response_strings:
- 'JSON does not validate'
- name: test PUT microversion limits nested providers
PUT: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
openstack-api-version: placement 1.13
content-type: application/json
data:
name: child
parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
status: 400
response_strings:
- 'JSON does not validate'
- name: fail trying to set a root provider UUID
PUT: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
content-type: application/json
data:
root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
status: 400
response_strings:
- 'JSON does not validate'
- name: fail trying to self-parent
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: child
uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
parent_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
status: 400
response_strings:
- 'parent provider UUID cannot be same as UUID'
- 'Unable to create resource provider \"child\", $ENVIRON["ALT_PARENT_PROVIDER_UUID"]:'
- name: update a parent provider UUID to non-existing provider
PUT: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
content-type: application/json
data:
name: parent
parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
status: 400
response_strings:
- 'parent provider UUID does not exist'
- name: now create the parent provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: parent
uuid: $ENVIRON['PARENT_PROVIDER_UUID']
status: 200
response_json_paths:
$.uuid: $ENVIRON['PARENT_PROVIDER_UUID']
$.name: parent
$.parent_provider_uuid: null
$.generation: 0
- name: get provider with old microversion no root provider UUID field
GET: /resource_providers/$ENVIRON['PARENT_PROVIDER_UUID']
request_headers:
openstack-api-version: placement 1.13
content-type: application/json
response_json_paths:
$.`len`: 4
name: parent
status: 200
- name: get provider has root provider UUID field
GET: /resource_providers/$ENVIRON['PARENT_PROVIDER_UUID']
request_headers:
content-type: application/json
response_json_paths:
$.`len`: 6
name: parent
root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
parent_provider_uuid: null
status: 200
- name: update a parent
PUT: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
content-type: application/json
data:
name: child
parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
status: 200
- name: get provider has new parent and root provider UUID field
GET: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
content-type: application/json
response_json_paths:
name: child
root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
status: 200
- name: fail trying to un-parent
PUT: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
content-type: application/json
data:
name: child
parent_provider_uuid: null
status: 400
response_strings:
- 'un-parenting a provider is not currently allowed'
- name: 409 conflict while trying to delete parent with existing child
DELETE: /resource_providers/$ENVIRON['PARENT_PROVIDER_UUID']
status: 409
response_strings:
- "Unable to delete parent resource provider
$ENVIRON['PARENT_PROVIDER_UUID']: It has child resource providers."
response_json_paths:
$.errors[0].code: placement.resource_provider.cannot_delete_parent
- name: list all resource providers in a tree that does not exist
GET: /resource_providers?in_tree=$ENVIRON['ALT_PARENT_PROVIDER_UUID']
response_json_paths:
$.resource_providers.`len`: 0
- name: list all resource providers in a tree with multiple providers in tree
GET: /resource_providers?in_tree=$ENVIRON['RP_UUID']
response_json_paths:
$.resource_providers.`len`: 2
# Verify that we have both the parent and child in the list
$.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
- name: create a new parent provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: altwparent
uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
status: 200
response_headers:
location: //resource_providers/[a-f0-9-]+/
response_json_paths:
$.uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.name: altwparent
- name: list all resource providers in a tree
GET: /resource_providers?in_tree=$ENVIRON['ALT_PARENT_PROVIDER_UUID']
response_json_paths:
$.resource_providers.`len`: 1
$.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
- name: filter providers by traits none of them have
GET: /resource_providers?required=HW_CPU_X86_SGX,HW_CPU_X86_SHA
response_json_paths:
$.resource_providers.`len`: 0
- name: add traits to a provider
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
content-type: application/json
data:
resource_provider_generation: 0
traits: ['HW_CPU_X86_SGX', 'STORAGE_DISK_SSD']
- name: add traits to another provider
PUT: /resource_providers/$ENVIRON['ALT_PARENT_PROVIDER_UUID']/traits
request_headers:
content-type: application/json
data:
resource_provider_generation: 0
traits: ['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_SSD']
- name: filter providers with multiple traits where no provider has all of them
GET: /resource_providers?required=HW_CPU_X86_SGX,MISC_SHARES_VIA_AGGREGATE
response_json_paths:
$.resource_providers.`len`: 0
- name: filter providers with a trait some of them have
GET: /resource_providers?required=STORAGE_DISK_SSD
response_json_paths:
$.resource_providers.`len`: 2
# Don't really care about the root UUID - just validating that the providers present are the ones we expected
$.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
$.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
- name: list providers with 'required' parameter filters cumulatively with in_tree
GET: /resource_providers?required=STORAGE_DISK_SSD&in_tree=$ENVIRON['RP_UUID']
response_json_paths:
$.resource_providers.`len`: 1
# Only RP_UUID satisfies both the tree and trait constraint
$.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
- name: list providers for full count
GET: /resource_providers
response_json_paths:
$.resource_providers.`len`: 3
- name: list providers forbidden 1.22
GET: /resource_providers?required=!STORAGE_DISK_SSD
response_json_paths:
$.resource_providers.`len`: 1
- name: confirm forbidden trait not there
GET: /resource_providers/$RESPONSE['$.resource_providers[0].uuid']/traits
response_json_paths:
$.traits: []
- name: list providers forbidden 1.21
GET: /resource_providers?required=!STORAGE_DISK_SSD
request_headers:
openstack-api-version: placement 1.21
status: 400
response_strings:
- "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,CUSTOM_MAGIC. Got: !STORAGE_DISK_SSD"
- name: list providers forbidden again
GET: /resource_providers?required=!MISC_SHARES_VIA_AGGREGATE
response_json_paths:
$.resource_providers.`len`: 2
- name: mixed required and forbidden
GET: /resource_providers?required=!HW_CPU_X86_SGX,STORAGE_DISK_SSD
response_json_paths:
$.resource_providers.`len`: 1
- name: confirm mixed required and forbidden
GET: /resource_providers/$RESPONSE['$.resource_providers[0].uuid']/traits
response_json_paths:
$.traits.`sorted`: ['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_SSD']
- name: multiple forbidden
GET: /resource_providers?required=!MISC_SHARES_VIA_AGGREGATE,!HW_CPU_X86_SGX
response_json_paths:
$.resource_providers.`len`: 1
- name: confirm multiple forbidden
GET: /resource_providers/$RESPONSE['$.resource_providers[0].uuid']/traits
response_json_paths:
$.traits: []
- name: forbidden no apply
GET: /resource_providers?required=!HW_CPU_X86_VMX
response_json_paths:
$.resource_providers.`len`: 3
- name: create some inventory
PUT: /resource_providers/$ENVIRON['ALT_PARENT_PROVIDER_UUID']/inventories
request_headers:
content-type: application/json
data:
resource_provider_generation: 1
inventories:
IPV4_ADDRESS:
total: 253
DISK_GB:
total: 1024
status: 200
response_json_paths:
$.resource_provider_generation: 2
$.inventories.IPV4_ADDRESS.total: 253
$.inventories.IPV4_ADDRESS.reserved: 0
$.inventories.DISK_GB.total: 1024
$.inventories.DISK_GB.allocation_ratio: 1.0
- name: list providers with 'required' parameter filters cumulatively with resources
GET: /resource_providers?required=STORAGE_DISK_SSD&resources=IPV4_ADDRESS:10
response_json_paths:
$.resource_providers.`len`: 1
# Only ALT_PARENT_PROVIDER_UUID satisfies both the tree and trait constraint
$.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
- name: invalid 'required' parameter - blank
GET: /resource_providers?required=
status: 400
response_strings:
- "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,!CUSTOM_MAGIC."
response_json_paths:
$.errors[0].title: Bad Request
- name: invalid 'required' parameter 1.21
GET: /resource_providers?required=
request_headers:
openstack-api-version: placement 1.21
status: 400
response_strings:
- "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,CUSTOM_MAGIC."
response_json_paths:
$.errors[0].title: Bad Request
- name: invalid 'required' parameter - contains an empty trait name
GET: /resource_providers?required=STORAGE_DISK_SSD,,MISC_SHARES_VIA_AGGREGATE
status: 400
response_strings:
- "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,!CUSTOM_MAGIC."
response_json_paths:
$.errors[0].title: Bad Request
- name: invalid 'required' parameter - contains a nonexistent trait
GET: /resource_providers?required=STORAGE_DISK_SSD,BOGUS_TRAIT,MISC_SHARES_VIA_AGGREGATE
status: 400
response_strings:
- "No such trait(s): BOGUS_TRAIT."
response_json_paths:
$.errors[0].title: Bad Request
- name: schema validation fails with 'required' parameter on old microversion
request_headers:
openstack-api-version: placement 1.17
GET: /resource_providers?required=HW_CPU_X86_SGX,MISC_SHARES_VIA_AGGREGATE
status: 400
response_strings:
- Additional properties are not allowed
response_json_paths:
$.errors[0].title: Bad Request
- name: fail trying to re-parent to a different provider
PUT: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
content-type: application/json
data:
name: child
parent_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID']
status: 400
response_strings:
- 're-parenting a provider is not currently allowed'
- name: create a new provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: cow
status: 200
- name: try to rename that provider to existing name
PUT: $LOCATION
request_headers:
content-type: application/json
data:
name: child
status: 409
response_json_paths:
$.errors[0].title: Conflict
$.errors[0].code: placement.duplicate_name
- name: fail to put that provider with uuid
PUT: $LAST_URL
request_headers:
content-type: application/json
data:
name: second new name
uuid: 7d4275fc-8b40-4995-85e2-74fcec2cb3b6
status: 400
response_strings:
- Additional properties are not allowed
response_json_paths:
$.errors[0].title: Bad Request
- name: delete resource provider
DELETE: $LAST_URL
status: 204
- name: 404 on deleted provider
DELETE: $LAST_URL
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: fail to get a provider
GET: /resource_providers/random_sauce
status: 404
response_json_paths:
$.errors[0].title: Not Found
- name: delete non-existing resource provider
DELETE: /resource_providers/d67370b5-4dc0-470d-a4fa-85e8e89abc6c
status: 404
response_strings:
- No resource provider with uuid d67370b5-4dc0-470d-a4fa-85e8e89abc6c found for delete
response_json_paths:
$.errors[0].title: Not Found
- name: post resource provider no uuid
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: a name
status: 200
response_headers:
location: //resource_providers/[a-f0-9-]+/
- name: post malformed json as json
POST: /resource_providers
request_headers:
content-type: application/json
data: '{"foo": }'
status: 400
response_strings:
- 'Malformed JSON:'
response_json_paths:
$.errors[0].title: Bad Request
- name: post bad uuid in resource provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: my bad rp
uuid: this is not a uuid
status: 400
response_strings:
- "Failed validating 'format'"
response_json_paths:
$.errors[0].title: Bad Request
- name: try to create resource provider with name exceed max characters
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: &name_exceeds_max_length_check This is a long text of 201 charactersssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss
status: 400
response_strings:
- "Failed validating 'maxLength'"
response_json_paths:
$.errors[0].title: Bad Request
- name: try to update resource provider with name exceed max characters
PUT: /resource_providers/$ENVIRON['RP_UUID']
request_headers:
content-type: application/json
data:
name: *name_exceeds_max_length_check
status: 400
response_strings:
- "Failed validating 'maxLength'"
response_json_paths:
$.errors[0].title: Bad Request
- name: confirm no cache-control headers before 1.15
GET: /resource_providers
request_headers:
openstack-api-version: placement 1.14
response_forbidden_headers:
- cache-control
- last-modified
- name: fail updating a parent to itself
PUT: /resource_providers/$ENVIRON['PARENT_PROVIDER_UUID']
request_headers:
content-type: application/json
data:
name: parent
parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID']
status: 400
response_strings:
- 'creating loop in the provider tree is not allowed.'
- name: fail updating the parent to point to its child
PUT: /resource_providers/$ENVIRON['PARENT_PROVIDER_UUID']
request_headers:
content-type: application/json
data:
name: parent
parent_provider_uuid: $ENVIRON['RP_UUID']
status: 400
response_strings:
- 'creating loop in the provider tree is not allowed.'
- name: create a resource provider with dashed uuid
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: rp with dashed uuid
uuid: 2290d4af-9e6e-400b-9d65-1ee01376f71a
status: 200
response_headers:
location: //resource_providers/[a-f0-9-]+/
- name: try to create with the same uuid but without dashes
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: rp with dashless uuid
uuid: 2290d4af9e6e400b9d651ee01376f71a
status: 409
response_strings:
- "Conflicting resource provider uuid: 2290d4af-9e6e-400b-9d65-1ee01376f71a already exists"
response_json_paths:
$.errors[0].title: Conflict

View File

@ -1,143 +0,0 @@
# Create a shared resource provider that shares a custom resource
# class with a compute node and confim that it is returned when
# requesting resources.
#
# NOTE(cdent): raw uuids are used here instead of environment variables as
# there's no need to query on them or change them, but something has to be
# there.
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
content-type: application/json
accept: application/json
openstack-api-version: placement latest
tests:
- name: create compute node 1
POST: /resource_providers
data:
name: cn1
uuid: 8d830468-6395-46b0-b56a-f934a1d60bbe
status: 200
- name: cn1 inventory
PUT: /resource_providers/8d830468-6395-46b0-b56a-f934a1d60bbe/inventories
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 20
MEMORY_MB:
total: 100000
status: 200
- name: create compute node 2
POST: /resource_providers
data:
name: cn2
uuid: ed6ea55d-01ce-4e11-ba97-13a4e5540b3e
status: 200
- name: cn2 inventory
PUT: /resource_providers/ed6ea55d-01ce-4e11-ba97-13a4e5540b3e/inventories
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 20
MEMORY_MB:
total: 100000
DISK_GB:
total: 100000
status: 200
- name: create custom magic
PUT: /resource_classes/CUSTOM_MAGIC
status: 201
- name: create shared 1
POST: /resource_providers
data:
uuid: d450bd39-3b01-4355-9ea1-594f96594cf1
name: custom magic share
status: 200
- name: shared 1 inventory
PUT: /resource_providers/d450bd39-3b01-4355-9ea1-594f96594cf1/inventories
data:
resource_provider_generation: 0
inventories:
CUSTOM_MAGIC:
total: 5
status: 200
# no aggregate association
- name: get resources no agg
GET: /resource_providers?resources=VCPU:1,CUSTOM_MAGIC:1
response_json_paths:
$.resource_providers.`len`: 0
- name: get allocation candidates no agg
desc: this sometimes fails
GET: /allocation_candidates?resources=VCPU:1,CUSTOM_MAGIC:1
response_json_paths:
$.allocation_requests.`len`: 0
$.provider_summaries.`len`: 0
- name: aggregate shared
PUT: /resource_providers/d450bd39-3b01-4355-9ea1-594f96594cf1/aggregates
data:
aggregates:
- f3dc0f36-97d4-4daf-be0c-d71466da9c85
resource_provider_generation: 1
- name: aggregate cn1
PUT: /resource_providers/8d830468-6395-46b0-b56a-f934a1d60bbe/aggregates
data:
aggregates:
- f3dc0f36-97d4-4daf-be0c-d71466da9c85
resource_provider_generation: 1
# no shared trait
- name: get resources no shared
GET: /resource_providers?resources=VCPU:1,CUSTOM_MAGIC:1
response_json_paths:
$.resource_providers.`len`: 0
- name: get allocation candidates no shared
GET: /allocation_candidates?resources=VCPU:1,CUSTOM_MAGIC:1
response_json_paths:
$.allocation_requests.`len`: 0
$.provider_summaries.`len`: 0
- name: set trait shared
PUT: /resource_providers/d450bd39-3b01-4355-9ea1-594f96594cf1/traits
data:
resource_provider_generation: 2
traits:
- MISC_SHARES_VIA_AGGREGATE
# this should be zero because we only expect those resource providers which
# can fully satisfy the resources query themselves when making requests of
# /resource_providers. This may change in the future depending on use
# cases. This test and the next demonstrate and confirm that
# /resource_providers and /allocation_candidates have different behaviors.
- name: get resources shared
GET: /resource_providers?resources=VCPU:1,CUSTOM_MAGIC:1
response_json_paths:
$.resource_providers.`len`: 0
# this is one allocation request and two resource providers because
# at /allocation_candiates we expect those resource providers which
# can either fully the resources query or can do so with the
# assistance of a sharing provider.
- name: get allocation candidates shared
GET: /allocation_candidates?resources=VCPU:1,CUSTOM_MAGIC:1
response_json_paths:
$.allocation_requests.`len`: 1
$.provider_summaries.`len`: 2

View File

@ -1,55 +0,0 @@
# This tests the individual CRUD operations on
# /traits* and /resource_providers/{uuid}/traits using a non-admin user with an
# open policy configuration. The response validation is intentionally minimal.
fixtures:
- OpenPolicyFixture
defaults:
request_headers:
x-auth-token: user
accept: application/json
content-type: application/json
openstack-api-version: placement latest
tests:
- name: list traits
GET: /traits
status: 200
- name: create a trait
PUT: /traits/CUSTOM_TRAIT_X
status: 201
- name: show trait
GET: /traits/CUSTOM_TRAIT_X
status: 204
- name: create resource provider
POST: /resource_providers
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 200
- name: list resource provider traits
GET: /resource_providers/$ENVIRON['RP_UUID']/traits
status: 200
- name: update resource provider traits
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
content-type: application/json
status: 200
data:
traits:
- CUSTOM_TRAIT_X
resource_provider_generation: 0
- name: delete resource provider traits
DELETE: /resource_providers/$ENVIRON['RP_UUID']/traits
status: 204
- name: delete trait
DELETE: /traits/CUSTOM_TRAIT_X
status: 204

View File

@ -1,487 +0,0 @@
fixtures:
- APIFixture
defaults:
request_headers:
x-auth-token: admin
# traits introduced in 1.6
openstack-api-version: placement 1.6
tests:
- name: create a trait without custom namespace
PUT: /traits/TRAIT_X
status: 400
response_strings:
- 'The trait is invalid. A valid trait must be no longer than 255 characters, start with the prefix \"CUSTOM_\" and use following characters: \"A\"-\"Z\", \"0\"-\"9\" and \"_\"'
- name: create a trait with invalid characters
PUT: /traits/CUSTOM_ABC:1
status: 400
response_strings:
- 'The trait is invalid. A valid trait must be no longer than 255 characters, start with the prefix \"CUSTOM_\" and use following characters: \"A\"-\"Z\", \"0\"-\"9\" and \"_\"'
- name: create a trait with name exceed max characters
PUT: /traits/CUSTOM_ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNO
status: 400
response_strings:
- 'The trait is invalid. A valid trait must be no longer than 255 characters, start with the prefix \"CUSTOM_\" and use following characters: \"A\"-\"Z\", \"0\"-\"9\" and \"_\"'
- name: create a trait earlier version
PUT: /traits/CUSTOM_TRAIT_1
request_headers:
openstack-api-version: placement 1.5
status: 404
- name: create a trait
PUT: /traits/CUSTOM_TRAIT_1
status: 201
response_headers:
location: //traits/CUSTOM_TRAIT_1/
response_forbidden_headers:
- content-type
# PUT in 1.6 version should not have cache headers
- cache-control
- last-modified
- name: create a trait which existed
PUT: /traits/CUSTOM_TRAIT_1
status: 204
response_headers:
location: //traits/CUSTOM_TRAIT_1/
response_forbidden_headers:
- content-type
- name: get a trait earlier version
GET: /traits/CUSTOM_TRAIT_1
request_headers:
openstack-api-version: placement 1.5
status: 404
- name: get a trait
GET: /traits/CUSTOM_TRAIT_1
status: 204
response_forbidden_headers:
- content-type
# In early versions cache headers should not be present
- cache-control
- last-modified
- name: get a non-existed trait
GET: /traits/NON_EXISTED
status: 404
- name: delete a trait earlier version
DELETE: /traits/CUSTOM_TRAIT_1
request_headers:
openstack-api-version: placement 1.5
status: 404
- name: delete a trait
DELETE: /traits/CUSTOM_TRAIT_1
status: 204
response_forbidden_headers:
- content-type
# DELETE in any version should not have cache headers
- cache-control
- last-modified
- name: delete a non-existed trait
DELETE: /traits/CUSTOM_NON_EXSITED
status: 404
- name: try to delete standard trait
DELETE: /traits/HW_CPU_X86_SSE
status: 400
response_strings:
- Cannot delete standard trait
- name: create CUSTOM_TRAIT_1
PUT: /traits/CUSTOM_TRAIT_1
status: 201
response_headers:
location: //traits/CUSTOM_TRAIT_1/
response_forbidden_headers:
- content-type
- name: create CUSTOM_TRAIT_2
PUT: /traits/CUSTOM_TRAIT_2
status: 201
response_headers:
location: //traits/CUSTOM_TRAIT_2/
response_forbidden_headers:
- content-type
# NOTE(cdent): This simply tests that traits we know should be
# present are in the results. We can't check length here because
# the standard traits, which will grow over time, are present.
- name: list traits
GET: /traits
status: 200
response_strings:
- CUSTOM_TRAIT_1
- CUSTOM_TRAIT_2
- MISC_SHARES_VIA_AGGREGATE
- HW_CPU_X86_SHA
- name: list traits earlier version
GET: /traits
request_headers:
openstack-api-version: placement 1.5
status: 404
- name: list traits with invalid format of name parameter
GET: /traits?name=in_abc
status: 400
response_strings:
- 'Badly formatted name parameter. Expected name query string parameter in form: ?name=[in|startswith]:[name1,name2|prefix]. Got: \"in_abc\"'
- name: list traits with name=in filter
GET: /traits?name=in:CUSTOM_TRAIT_1,CUSTOM_TRAIT_2
status: 200
response_json_paths:
$.traits.`len`: 2
response_strings:
- CUSTOM_TRAIT_1
- CUSTOM_TRAIT_2
- name: create CUSTOM_ANOTHER_TRAIT
PUT: /traits/CUSTOM_ANOTHER_TRAIT
status: 201
response_headers:
location: //traits/CUSTOM_ANOTHER_TRAIT/
response_forbidden_headers:
- content-type
- name: list traits with prefix
GET: /traits?name=startswith:CUSTOM_TRAIT
status: 200
response_json_paths:
$.traits.`len`: 2
response_strings:
- CUSTOM_TRAIT_1
- CUSTOM_TRAIT_2
- name: list traits with invalid parameters
GET: /traits?invalid=abc
status: 400
response_strings:
- "Invalid query string parameters: Additional properties are not allowed"
- name: list traits 1.14 no cache headers
GET: /traits
request_headers:
openstack-api-version: placement 1.14
response_forbidden_headers:
- cache-control
- last-modified
- name: list traits 1.15 has cache headers
GET: /traits
request_headers:
openstack-api-version: placement 1.15
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: get trait 1.14 no cache headers
GET: /traits/CUSTOM_TRAIT_1
request_headers:
openstack-api-version: placement 1.14
status: 204
response_forbidden_headers:
- cache-control
- last-modified
- name: get trait 1.15 has cache headers
GET: /traits/CUSTOM_TRAIT_1
request_headers:
openstack-api-version: placement 1.15
status: 204
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: put trait 1.14 no cache headers
PUT: /traits/CUSTOM_TRAIT_1
request_headers:
openstack-api-version: placement 1.14
status: 204
response_forbidden_headers:
- cache-control
- last-modified
- name: put trait 1.15 has cache headers
PUT: /traits/CUSTOM_TRAIT_1
request_headers:
openstack-api-version: placement 1.15
status: 204
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: post new resource provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 201
response_headers:
location: //resource_providers/[a-f0-9-]+/
response_forbidden_headers:
- content-type
- name: list traits for resource provider earlier version
GET: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
openstack-api-version: placement 1.5
status: 404
- name: list traits for resource provider without traits
GET: /resource_providers/$ENVIRON['RP_UUID']/traits
status: 200
response_json_paths:
$.resource_provider_generation: 0
$.traits.`len`: 0
response_forbidden_headers:
# In 1.6 no cache headers
- cache-control
- last-modified
- name: set traits for resource provider earlier version
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
content-type: application/json
openstack-api-version: placement 1.5
status: 404
- name: set traits for resource provider
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
content-type: application/json
status: 200
data:
traits:
- CUSTOM_TRAIT_1
- CUSTOM_TRAIT_2
resource_provider_generation: 0
response_json_paths:
$.resource_provider_generation: 1
$.traits.`len`: 2
response_strings:
- CUSTOM_TRAIT_1
- CUSTOM_TRAIT_2
response_forbidden_headers:
# In 1.6 no cache headers
- cache-control
- last-modified
- name: get associated traits
GET: /traits?associated=true
status: 200
response_json_paths:
$.traits.`len`: 2
response_strings:
- CUSTOM_TRAIT_1
- CUSTOM_TRAIT_2
- name: get associated traits with invalid value
GET: /traits?associated=xyz
status: 400
response_strings:
- 'The query parameter \"associated\" only accepts \"true\" or \"false\"'
- name: set traits for resource provider without resource provider generation
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
content-type: application/json
status: 400
data:
traits:
- CUSTOM_TRAIT_1
- CUSTOM_TRAIT_2
response_strings:
- "'resource_provider_generation' is a required property"
- name: set traits for resource provider with invalid resource provider generation
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
content-type: application/json
status: 400
data:
traits:
- CUSTOM_TRAIT_1
resource_provider_generation: invalid_generation
response_strings:
- "'invalid_generation' is not of type 'integer'"
- name: set traits for resource provider with conflict generation
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
content-type: application/json
openstack-api-version: placement 1.23
status: 409
data:
traits:
- CUSTOM_TRAIT_1
resource_provider_generation: 5
response_strings:
- Resource provider's generation already changed. Please update the generation and try again.
response_json_paths:
$.errors[0].code: placement.concurrent_update
- name: set non existed traits for resource provider
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
content-type: application/json
status: 400
data:
traits:
- NON_EXISTED_TRAIT1
- NON_EXISTED_TRAIT2
- CUSTOM_TRAIT_1
resource_provider_generation: 1
response_strings:
- No such trait
- NON_EXISTED_TRAIT1
- NON_EXISTED_TRAIT2
- name: set traits for resource provider with invalid type of traits
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
content-type: application/json
status: 400
data:
traits: invalid_type
resource_provider_generation: 1
response_strings:
- "'invalid_type' is not of type 'array'"
- name: set traits for resource provider with additional properties
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
content-type: application/json
status: 400
data:
traits:
- CUSTOM_TRAIT_1
- CUSTOM_TRAIT_2
resource_provider_generation: 1
additional: additional
response_strings:
- 'Additional properties are not allowed'
- name: set traits for non_existed resource provider
PUT: /resource_providers/non_existed/traits
request_headers:
content-type: application/json
data:
traits:
- CUSTOM_TRAIT_1
resource_provider_generation: 1
status: 404
response_strings:
- No resource provider with uuid non_existed found
- name: list traits for resource provider
GET: /resource_providers/$ENVIRON['RP_UUID']/traits
status: 200
response_json_paths:
$.resource_provider_generation: 1
$.traits.`len`: 2
response_strings:
- CUSTOM_TRAIT_1
- CUSTOM_TRAIT_2
- name: delete an in-use trait
DELETE: /traits/CUSTOM_TRAIT_1
status: 409
response_strings:
- The trait CUSTOM_TRAIT_1 is in use by a resource provider.
- name: list traits for non_existed resource provider
GET: /resource_providers/non_existed/traits
request_headers:
content-type: application/json
status: 404
response_strings:
- No resource provider with uuid non_existed found
- name: delete traits for resource provider earlier version
DELETE: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
openstack-api-version: placement 1.5
status: 404
- name: delete traits for resource provider
DELETE: /resource_providers/$ENVIRON['RP_UUID']/traits
status: 204
response_forbidden_headers:
- content-type
- name: delete traits for non_existed resource provider
DELETE: /resource_providers/non_existed/traits
status: 404
response_strings:
- No resource provider with uuid non_existed found
- name: empty traits for resource provider 1.15 has cache headers
GET: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
openstack-api-version: placement 1.15
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: update rp trait 1.14 no cache headers
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
data:
traits:
- CUSTOM_TRAIT_1
- CUSTOM_TRAIT_2
resource_provider_generation: 2
request_headers:
openstack-api-version: placement 1.14
content-type: application/json
response_forbidden_headers:
- cache-control
- last-modified
- name: update rp trait 1.15 has cache headers
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
data:
traits:
- CUSTOM_TRAIT_1
- CUSTOM_TRAIT_2
resource_provider_generation: 3
request_headers:
openstack-api-version: placement 1.15
content-type: application/json
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: list traits for resource provider 1.14 no cache headers
GET: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
openstack-api-version: placement 1.14
response_forbidden_headers:
- cache-control
- last-modified
- name: list traits for resource provider 1.15 has cache headers
GET: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
openstack-api-version: placement 1.15
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/

View File

@ -1,40 +0,0 @@
fixtures:
- APIFixture
defaults:
request_headers:
accept: application/json
x-auth-token: admin
tests:
- name: get an encoded snowman
desc: this should fall through to a NotFound on the resource provider object
GET: /resources_providers/%e2%98%83
status: 404
- name: post resource provider with snowman
POST: /resource_providers
request_headers:
content-type: application/json
data:
name:
uuid: $ENVIRON['RP_UUID']
status: 201
response_headers:
location: //resource_providers/[a-f0-9-]+/
- name: get that resource provider
GET: $LOCATION
response_json_paths:
$.name:
- name: query by name
GET: /resource_providers?name=%e2%98%83
response_json_paths:
$.resource_providers[0].name:
- name: delete that one
DELETE: /resource_providers/$ENVIRON['RP_UUID']
status: 204

View File

@ -1,33 +0,0 @@
# This tests the individual CRUD operations on
# /resource_providers/{uuid}/usages and /usages
# using a non-admin user with an open policy configuration. The
# response validation is intentionally minimal.
fixtures:
- OpenPolicyFixture
defaults:
request_headers:
x-auth-token: user
accept: application/json
openstack-api-version: placement latest
tests:
- name: create provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 200
- name: list provider usages
GET: /resource_providers/$ENVIRON['RP_UUID']/usages
response_json_paths:
usages: {}
- name: get total usages for project
GET: /usages?project_id=$ENVIRON['PROJECT_ID]
response_json_paths:
usages: {}

View File

@ -1,120 +0,0 @@
# More interesting tests for usages are in with_allocations
fixtures:
- APIFixture
defaults:
request_headers:
accept: application/json
x-auth-token: admin
tests:
- name: fail to get usages for missing provider
GET: /resource_providers/fae14fa3-4b43-498c-a33c-4a1d00edb577/usages
status: 404
response_strings:
- No resource provider with uuid fae14fa3-4b43-498c-a33c-4a1d00edb577 found
response_json_paths:
$.errors[0].title: Not Found
- name: create provider
POST: /resource_providers
request_headers:
content-type: application/json
data:
name: a name
status: 201
- name: check provider exists
GET: $LOCATION
response_json_paths:
name: a name
- name: get empty usages
GET: $LAST_URL/usages
request_headers:
content-type: application/json
response_json_paths:
usages: {}
- name: get usages no cache headers base microversion
GET: $LAST_URL
response_forbidden_headers:
- last-modified
- cache-control
- name: get usages cache headers 1.15
GET: $LAST_URL
request_headers:
openstack-api-version: placement 1.15
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/
- name: get total usages earlier version
GET: /usages?project_id=$ENVIRON['PROJECT_ID']
request_headers:
openstack-api-version: placement 1.8
status: 404
- name: get total usages no project or user
GET: /usages
request_headers:
openstack-api-version: placement 1.9
status: 400
- name: get empty usages with project id
GET: /usages?project_id=$ENVIRON['PROJECT_ID]
request_headers:
openstack-api-version: placement 1.9
response_json_paths:
usages: {}
- name: get empty usages with project id and user id
GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id=78725f09-5c01-4c9e-97a5-98d75e1e32b1
request_headers:
openstack-api-version: placement 1.9
response_json_paths:
usages: {}
- name: get total usages project_id less than min length
GET: /usages?project_id=
request_headers:
openstack-api-version: placement 1.9
status: 400
response_strings:
- "Failed validating 'minLength'"
- name: get total usages user_id less than min length
GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id=
request_headers:
openstack-api-version: placement 1.9
status: 400
response_strings:
- "Failed validating 'minLength'"
- name: get total usages project_id exceeds max length
GET: /usages?project_id=78725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b1
request_headers:
openstack-api-version: placement 1.9
status: 400
response_strings:
- "Failed validating 'maxLength'"
- name: get total usages user_id exceeds max length
GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id=78725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b1
request_headers:
openstack-api-version: placement 1.9
status: 400
response_strings:
- "Failed validating 'maxLength'"
- name: get total usages with additional param
GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id=78725f09-5c01-4c9e-97a5-98d75e1e32b1&dummy=1
request_headers:
openstack-api-version: placement 1.9
status: 400
response_strings:
- "Additional properties are not allowed"

View File

@ -1,159 +0,0 @@
fixtures:
- AllocationFixture
defaults:
request_headers:
x-auth-token: admin
tests:
- name: confirm inventories
GET: /resource_providers/$ENVIRON['RP_UUID']/inventories
response_json_paths:
$.inventories.DISK_GB.total: 2048
$.inventories.DISK_GB.reserved: 0
- name: get usages
GET: /resource_providers/$ENVIRON['RP_UUID']/usages
response_headers:
# use a regex here because charset, which is not only not
# required but superfluous, is present
content-type: /application/json/
response_json_paths:
$.resource_provider_generation: 5
$.usages.DISK_GB: 1020
$.usages.VCPU: 7
- name: get allocations
GET: /resource_providers/$ENVIRON['RP_UUID']/allocations
response_headers:
content-type: /application/json/
response_json_paths:
$.allocations.`len`: 3
$.allocations["$ENVIRON['CONSUMER_0']"].resources:
DISK_GB: 1000
$.allocations["$ENVIRON['CONSUMER_ID']"].resources:
VCPU: 6
$.allocations["$ENVIRON['ALT_CONSUMER_ID']"].resources:
VCPU: 1
DISK_GB: 20
$.resource_provider_generation: 5
- name: fail to delete resource provider
DELETE: /resource_providers/$ENVIRON['RP_UUID']
status: 409
response_strings:
- "Unable to delete resource provider $ENVIRON['RP_UUID']: Resource provider has allocations."
- name: fail to change inventory via put 1.23
PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
accept: application/json
content-type: application/json
openstack-api-version: placement 1.23
data:
resource_provider_generation: 5
inventories: {}
status: 409
response_json_paths:
$.errors[0].code: placement.inventory.inuse
- name: fail to delete all inventory
DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
accept: application/json
openstack-api-version: placement 1.5
status: 409
response_headers:
content-type: /application/json/
response_strings:
- "Inventory for 'VCPU, DISK_GB' on resource provider '$ENVIRON['RP_UUID']' in use"
- name: fail to delete all inventory 1.23
DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
accept: application/json
openstack-api-version: placement 1.23
status: 409
response_headers:
content-type: /application/json/
response_strings:
- "Inventory for 'VCPU, DISK_GB' on resource provider '$ENVIRON['RP_UUID']' in use"
response_json_paths:
$.errors[0].code: placement.inventory.inuse
# We can change inventory in a way that makes existing allocations exceed the
# new capacity. This is allowed.
- name: change inventory despite capacity exceeded
PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories
request_headers:
accept: application/json
content-type: application/json
data:
resource_provider_generation: 5
inventories:
DISK_GB:
total: 1019
VCPU:
total: 97
status: 200
- name: get total usages by project
GET: /usages?project_id=$ENVIRON['PROJECT_ID']
request_headers:
openstack-api-version: placement 1.9
status: 200
response_json_paths:
$.usages.DISK_GB: 1020
$.usages.VCPU: 7
- name: get total usages by project and user
GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id=$ENVIRON['USER_ID']
request_headers:
openstack-api-version: placement 1.9
status: 200
response_json_paths:
$.usages.DISK_GB: 1000
$.usages.VCPU: 6
- name: get total usages by project and alt user
GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id=$ENVIRON['ALT_USER_ID']
request_headers:
openstack-api-version: placement 1.9
status: 200
# In pre 1.15 microversions cache headers not present
response_forbidden_headers:
- last-modified
- cache-control
response_json_paths:
$.usages.DISK_GB: 20
$.usages.VCPU: 1
- name: get allocations without project and user
GET: /allocations/$ENVIRON['CONSUMER_ID']
request_headers:
openstack-api-version: placement 1.11
accept: application/json
response_json_paths:
# only one key in the top level object
$.`len`: 1
- name: get allocations with project and user
GET: /allocations/$ENVIRON['CONSUMER_ID']
request_headers:
openstack-api-version: placement 1.12
accept: application/json
response_json_paths:
$.project_id: $ENVIRON['PROJECT_ID']
$.user_id: $ENVIRON['USER_ID']
$.`len`: 3
- name: get total usages with cache headers
GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id=$ENVIRON['ALT_USER_ID']
request_headers:
openstack-api-version: placement 1.15
response_headers:
cache-control: no-cache
# Does last-modified look like a legit timestamp?
last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/

View File

@ -1,77 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel
from nova.api.openstack.placement import direct
from nova.tests.functional.api.openstack.placement import base
CONF = cfg.CONF
class TestDirect(base.TestCase):
def test_direct_is_there(self):
with direct.PlacementDirect(CONF) as client:
resp = client.get('/')
self.assertTrue(resp)
data = resp.json()
self.assertEqual('v1.0', data['versions'][0]['id'])
def test_get_resource_providers(self):
with direct.PlacementDirect(CONF) as client:
resp = client.get('/resource_providers')
self.assertTrue(resp)
data = resp.json()
self.assertEqual([], data['resource_providers'])
def test_create_resource_provider(self):
data = {'name': 'fake'}
with direct.PlacementDirect(CONF) as client:
resp = client.post('/resource_providers', json=data)
self.assertTrue(resp)
resp = client.get('/resource_providers')
self.assertTrue(resp)
data = resp.json()
self.assertEqual(1, len(data['resource_providers']))
def test_json_validation_happens(self):
data = {'name': 'fake', 'cowsay': 'moo'}
with direct.PlacementDirect(CONF) as client:
resp = client.post('/resource_providers', json=data)
self.assertFalse(resp)
self.assertEqual(400, resp.status_code)
def test_microversion_handling(self):
with direct.PlacementDirect(CONF) as client:
# create parent
parent_data = {'name': uuidsentinel.p_rp,
'uuid': uuidsentinel.p_rp}
resp = client.post('/resource_providers', json=parent_data)
self.assertTrue(resp, resp.text)
# attempt to create child
data = {'name': 'child', 'parent_provider_uuid': uuidsentinel.p_rp}
# no microversion, 400
resp = client.post('/resource_providers', json=data)
self.assertFalse(resp)
self.assertEqual(400, resp.status_code)
# low microversion, 400
resp = client.post('/resource_providers', json=data,
microversion='1.13')
self.assertFalse(resp)
self.assertEqual(400, resp.status_code)
resp = client.post('/resource_providers', json=data,
microversion='1.14')
self.assertTrue(resp, resp.text)

View File

@ -1,44 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslotest import output
import wsgi_intercept
from gabbi import driver
from nova.tests.functional.api.openstack.placement.fixtures import capture
# TODO(cdent): This whitespace blight will go away post extraction.
from nova.tests.functional.api.openstack.placement.fixtures \
import gabbits as fixtures
# Check that wsgi application response headers are always
# native str.
wsgi_intercept.STRICT_RESPONSE_HEADERS = True
TESTS_DIR = 'gabbits'
def load_tests(loader, tests, pattern):
"""Provide a TestSuite to the discovery process."""
test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
# These inner fixtures provide per test request output and log
# capture, for cleaner results reporting.
inner_fixtures = [
output.CaptureOutput,
capture.Logging,
]
return driver.build_tests(test_dir, loader, host=None,
test_loader_name=__name__,
intercept=fixtures.setup_app,
inner_fixtures=inner_fixtures,
fixture_module=fixtures)

View File

@ -1,50 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.api.openstack.placement import direct
from nova.api.openstack.placement import handler
from nova.tests.functional.api.openstack.placement import base
CONF = cfg.CONF
class TestVerifyPolicy(base.TestCase):
"""Verify that all defined placement routes have a policy."""
# Paths that don't need a policy check
EXCEPTIONS = ['/', '']
def _test_request_403(self, client, method, route):
headers = {
'x-auth-token': 'user',
'content-type': 'application/json'
}
request_method = getattr(client, method.lower())
# We send an empty request body on all requests. Because
# policy handling comes before other processing, the value
# of the body is irrelevant.
response = request_method(route, data='', headers=headers)
self.assertEqual(
403, response.status_code,
'method %s on route %s is open for user, status: %s' %
(method, route, response.status_code))
def test_verify_policy(self):
with direct.PlacementDirect(CONF, latest_microversion=True) as client:
for route, methods in handler.ROUTE_DECLARATIONS.items():
if route in self.EXCEPTIONS:
continue
for method in methods:
self._test_request_403(client, method, route)

View File

@ -11,6 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import os

View File

@ -107,8 +107,6 @@ class ApiSampleTestBaseV21(testscenarios.WithScenarios,
if not self.SUPPORTS_CELLS:
self.useFixture(fixtures.Database())
self.useFixture(fixtures.Database(database='api'))
# FIXME(cdent): Placement db already provided by IntegratedHelpers
self.useFixture(fixtures.Database(database='placement'))
self.useFixture(fixtures.DefaultFlavorsFixture())
self.useFixture(fixtures.SingleCellSimple())

View File

@ -22,7 +22,7 @@ from nova import conf
from nova import context
from nova import objects
from nova import rc_fields as fields
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.functional import test_report_client as test_base
from nova.virt import driver as virt_driver
@ -200,7 +200,7 @@ class IronicResourceTrackerTest(test_base.SchedulerReportClientTestBase):
usage for an instance, the nodes still have their unique stats and
nothing is leaked from node to node.
"""
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
# Before the resource tracker is "initialized", we shouldn't have
# any compute nodes or stats in the RT's cache...
self.assertEqual(0, len(self.rt.compute_nodes))

View File

@ -0,0 +1,150 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures solely for functional tests."""
from __future__ import absolute_import
import fixtures
from keystoneauth1 import adapter as ka
from keystoneauth1 import session as ks
from placement.tests.functional.fixtures import placement as placement_fixtures
from requests import adapters
from nova.tests.functional.api import client
class PlacementApiClient(object):
def __init__(self, placement_fixture):
self.fixture = placement_fixture
def get(self, url, **kwargs):
return client.APIResponse(self.fixture._fake_get(None, url, **kwargs))
def put(self, url, body, **kwargs):
return client.APIResponse(
self.fixture._fake_put(None, url, body, **kwargs))
def post(self, url, body, **kwargs):
return client.APIResponse(
self.fixture._fake_post(None, url, body, **kwargs))
class PlacementFixture(placement_fixtures.PlacementFixture):
"""A fixture to placement operations.
Runs a local WSGI server bound on a free port and having the Placement
application with NoAuth middleware.
This fixture also prevents calling the ServiceCatalog for getting the
endpoint.
It's possible to ask for a specific token when running the fixtures so
all calls would be passing this token.
Most of the time users of this fixture will also want the placement
database fixture to be called first, so that is done automatically. If
that is not desired pass ``db=False`` when initializing the fixture
and establish the database yourself with:
self.useFixture(placement_fixtures.Database(set_config=True))
"""
def setUp(self):
super(PlacementFixture, self).setUp()
# Turn off manipulation of socket_options in TCPKeepAliveAdapter
# to keep wsgi-intercept happy. Replace it with the method
# from its superclass.
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.TCPKeepAliveAdapter.init_poolmanager',
adapters.HTTPAdapter.init_poolmanager))
self._client = ka.Adapter(ks.Session(auth=None), raise_exc=False)
# NOTE(sbauza): We need to mock the scheduler report client because
# we need to fake Keystone by directly calling the endpoint instead
# of looking up the service catalog, like we did for the OSAPIFixture.
self.useFixture(fixtures.MonkeyPatch(
'nova.scheduler.client.report.SchedulerReportClient.get',
self._fake_get))
self.useFixture(fixtures.MonkeyPatch(
'nova.scheduler.client.report.SchedulerReportClient.post',
self._fake_post))
self.useFixture(fixtures.MonkeyPatch(
'nova.scheduler.client.report.SchedulerReportClient.put',
self._fake_put))
self.useFixture(fixtures.MonkeyPatch(
'nova.scheduler.client.report.SchedulerReportClient.delete',
self._fake_delete))
self.api = PlacementApiClient(self)
@staticmethod
def _update_headers_with_version(headers, **kwargs):
version = kwargs.get("version")
if version is not None:
# TODO(mriedem): Perform some version discovery at some point.
headers.update({
'OpenStack-API-Version': 'placement %s' % version
})
def _fake_get(self, *args, **kwargs):
(url,) = args[1:]
# TODO(sbauza): The current placement NoAuthMiddleware returns a 401
# in case a token is not provided. We should change that by creating
# a fake token so we could remove adding the header below.
headers = {'x-auth-token': self.token}
self._update_headers_with_version(headers, **kwargs)
return self._client.get(
url,
endpoint_override=self.endpoint,
headers=headers)
def _fake_post(self, *args, **kwargs):
(url, data) = args[1:]
# NOTE(sdague): using json= instead of data= sets the
# media type to application/json for us. Placement API is
# more sensitive to this than other APIs in the OpenStack
# ecosystem.
# TODO(sbauza): The current placement NoAuthMiddleware returns a 401
# in case a token is not provided. We should change that by creating
# a fake token so we could remove adding the header below.
headers = {'x-auth-token': self.token}
self._update_headers_with_version(headers, **kwargs)
return self._client.post(
url, json=data,
endpoint_override=self.endpoint,
headers=headers)
def _fake_put(self, *args, **kwargs):
(url, data) = args[1:]
# NOTE(sdague): using json= instead of data= sets the
# media type to application/json for us. Placement API is
# more sensitive to this than other APIs in the OpenStack
# ecosystem.
# TODO(sbauza): The current placement NoAuthMiddleware returns a 401
# in case a token is not provided. We should change that by creating
# a fake token so we could remove adding the header below.
headers = {'x-auth-token': self.token}
self._update_headers_with_version(headers, **kwargs)
return self._client.put(
url, json=data,
endpoint_override=self.endpoint,
headers=headers)
def _fake_delete(self, *args, **kwargs):
(url,) = args[1:]
# TODO(sbauza): The current placement NoAuthMiddleware returns a 401
# in case a token is not provided. We should change that by creating
# a fake token so we could remove adding the header below.
return self._client.delete(
url,
endpoint_override=self.endpoint,
headers={'x-auth-token': self.token})

View File

@ -33,6 +33,7 @@ from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client as api_client
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.unit import cast_as_call
from nova.tests.unit import fake_notifier
import nova.tests.unit.image.fake
@ -86,8 +87,7 @@ class _IntegratedTestBase(test.TestCase):
nova.tests.unit.image.fake.stub_out_image_service(self)
self.useFixture(cast_as_call.CastAsCall(self))
self.useFixture(nova_fixtures.Database(database='placement'))
placement = self.useFixture(nova_fixtures.PlacementFixture())
placement = self.useFixture(func_fixtures.PlacementFixture())
self.placement_api = placement.api
self._setup_services()
@ -373,7 +373,7 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
placement = self.useFixture(nova_fixtures.PlacementFixture())
placement = self.useFixture(func_fixtures.PlacementFixture())
self.placement_api = placement.api
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -16,7 +16,7 @@
import fixtures
import mock
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import test_servers as base
from nova.tests.unit.virt.libvirt import fake_imagebackend
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
@ -43,7 +43,7 @@ class ServersTestBase(base.ServersTestBase):
'nova.virt.libvirt.guest.libvirt',
fakelibvirt))
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
# Mock the 'get_connection' function, as we're going to need to provide
# custom capabilities for each test

View File

@ -22,6 +22,7 @@ from oslo_utils import fixture as utils_fixture
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests import json_ref
from nova.tests.unit.api.openstack.compute import test_services
@ -83,7 +84,7 @@ class NotificationSampleTestBase(test.TestCase,
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
context_patcher = self.mock_gen_request_id = mock.patch(
'oslo_context.context.generate_request_id',

View File

@ -20,6 +20,7 @@ import mock
import nova
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.unit import cast_as_call
from nova.tests.unit import policy_fixture
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
@ -34,7 +35,7 @@ class TestSerialConsoleLiveMigrate(test.TestCase):
super(TestSerialConsoleLiveMigrate, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
# Replace libvirt with fakelibvirt

View File

@ -18,6 +18,7 @@ import nova.compute.resource_tracker
from nova import exception
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.unit import cast_as_call
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
@ -50,7 +51,7 @@ class TestRetryBetweenComputeNodeBuilds(test.TestCase):
# We need the computes reporting into placement for the filter
# scheduler to pick a host.
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -21,6 +21,7 @@ from nova.compute import api as compute_api
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.unit import cast_as_call
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
@ -54,7 +55,7 @@ class TestLocalDeleteAttachedVolumes(test.TestCase):
# The NeutronFixture is needed to stub out validate_networks in API.
self.useFixture(nova_fixtures.NeutronFixture(self))
# Use the PlacementFixture to avoid annoying warnings in the logs.
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api

View File

@ -10,8 +10,14 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from placement import conf as placement_conf
from placement.tests import fixtures as placement_db
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
@ -55,10 +61,17 @@ class TestLocalDeleteAllocations(test.TestCase,
have been cleaned up once the nova-compute service restarts.
In this scenario we conditionally use the PlacementFixture to simulate
the case that nova-api isn't configured to talk to placement.
the case that nova-api isn't configured to talk to placement, thus we
need to manage the placement database independently.
"""
config = cfg.ConfigOpts()
placement_config = self.useFixture(config_fixture.Config(config))
placement_conf.register_opts(config)
self.useFixture(placement_db.Database(placement_config,
set_config=True))
# Get allocations, make sure they are 0.
with nova_fixtures.PlacementFixture() as placement:
with func_fixtures.PlacementFixture(
conf_fixture=placement_config, db=False) as placement:
compute = self.start_service('compute')
placement_api = placement.api
resp = placement_api.get('/resource_providers')
@ -89,7 +102,8 @@ class TestLocalDeleteAllocations(test.TestCase,
self.api.delete_server(server['id'])
self._wait_until_deleted(server)
with nova_fixtures.PlacementFixture() as placement:
with func_fixtures.PlacementFixture(
conf_fixture=placement_config, db=False) as placement:
placement_api = placement.api
# Assert usages are still non-zero.
usages_during = self._get_usages(placement_api, rp_uuid)
@ -111,7 +125,7 @@ class TestLocalDeleteAllocations(test.TestCase,
"""Tests that the compute API deletes allocations when the compute
service on which the instance was running is down.
"""
placement_api = self.useFixture(nova_fixtures.PlacementFixture()).api
placement_api = self.useFixture(func_fixtures.PlacementFixture()).api
compute = self.start_service('compute')
# Get allocations, make sure they are 0.
resp = placement_api.get('/resource_providers')

View File

@ -14,6 +14,7 @@
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit import policy_fixture
@ -34,7 +35,7 @@ class ServerTagsFilteringTest(test.TestCase,
# The NeutronFixture is needed to stub out validate_networks in API.
self.useFixture(nova_fixtures.NeutronFixture(self))
# Use the PlacementFixture to avoid annoying warnings in the logs.
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api

View File

@ -13,6 +13,7 @@
from nova.scheduler import weights
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import cast_as_call
from nova.tests.unit.image import fake as image_fake
@ -58,7 +59,7 @@ class SchedulerOnlyChecksTargetTest(test.TestCase,
# We need the computes reporting into placement for the filter
# scheduler to pick a host.
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -18,6 +18,7 @@ from oslo_log import log as logging
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
from nova.tests.unit import fake_notifier
@ -46,7 +47,7 @@ class FailedEvacuateStateTests(test.TestCase,
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -14,6 +14,7 @@ import time
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
@ -46,7 +47,7 @@ class TestLiveMigrateOneOfConcurrentlyCreatedInstances(
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -16,6 +16,7 @@ from nova import objects
from nova.scheduler import weights
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit import policy_fixture
@ -50,7 +51,7 @@ class TestRequestSpecRetryReschedule(test.TestCase,
# We need the computes reporting into placement for the filter
# scheduler to pick a host.
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -13,6 +13,7 @@
from nova import exception
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
@ -46,7 +47,7 @@ class TestRescheduleWithServerGroup(test.TestCase,
# We need the computes reporting into placement for the filter
# scheduler to pick a host.
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -12,6 +12,7 @@
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
from nova.tests.unit import fake_notifier
@ -39,7 +40,7 @@ class TestParallelEvacuationWithServerGroup(
# We need the computes reporting into placement for the filter
# scheduler to pick a host.
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -12,6 +12,7 @@
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
@ -48,7 +49,7 @@ class TestResizeWithNoAllocationScheduler(
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -13,6 +13,7 @@
from nova import config
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as image_fakes
from nova.tests.unit import policy_fixture
@ -40,7 +41,7 @@ class TestBootFromVolumeIsolatedHostsFilter(
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -13,6 +13,7 @@
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
from nova.tests.unit import fake_notifier
@ -43,7 +44,7 @@ class TestEvacuationWithSourceReturningDuringRebuild(
# We need the computes reporting into placement for the filter
# scheduler to pick a host.
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -12,6 +12,7 @@
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit import policy_fixture
@ -40,7 +41,7 @@ class TestMultiCreateServerGroupMemberOverQuota(
self.flags(server_group_members=2, group='quota')
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -14,6 +14,7 @@ from nova.scheduler import filter_scheduler
from nova.scheduler import weights
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit import policy_fixture
@ -49,7 +50,7 @@ class AntiAffinityMultiCreateRequest(test.TestCase,
super(AntiAffinityMultiCreateRequest, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -12,6 +12,7 @@
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
@ -39,7 +40,7 @@ class TestRescheduleWithVolumesAttached(
fake_network.set_stub_network_methods(self)
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -12,6 +12,7 @@
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit import policy_fixture
@ -40,7 +41,7 @@ class ColdMigrateTargetHostThenLiveMigrateTest(
super(ColdMigrateTargetHostThenLiveMigrateTest, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -16,6 +16,7 @@ from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import policy_fixture
@ -50,7 +51,7 @@ class BootFromVolumeOverQuotaRaceDeleteTest(
# Use the standard fixtures.
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
self.api = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1')).api
# Use microversion 2.52 which allows creating a server with tags.

View File

@ -21,6 +21,7 @@ from nova import context as nova_context
from nova.scheduler import weights
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
@ -71,7 +72,7 @@ class AggregateRequestFiltersTest(test.TestCase,
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.AllServicesCurrent())
placement = self.useFixture(nova_fixtures.PlacementFixture())
placement = self.useFixture(func_fixtures.PlacementFixture())
self.placement_api = placement.api
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
@ -381,7 +382,7 @@ class TestAggregateMultiTenancyIsolationFilter(
# Stub out glance, placement and neutron.
nova.tests.unit.image.fake.stub_out_image_service(self)
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.useFixture(nova_fixtures.PlacementFixture())
self.useFixture(func_fixtures.PlacementFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
# Start nova services.
self.start_service('conductor')

Some files were not shown because too many files have changed in this diff Show More