summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Dent <cdent@anticdent.org>2018-11-14 14:02:25 +0100
committerChris Dent <cdent@anticdent.org>2018-12-12 18:46:49 +0000
commit787bb336065155d3923af0d2bfd397ebe857618e (patch)
treee5d179df99fd7d014788e77e2f517b6f3b94072b
parentc72dafad8011198b78fd906f2fc7d1126e3f79f3 (diff)
Use external placement in functional tests
Adjust the fixtures used by the functional tests so they use placement database and web fixtures defined by placement code. To avoid making redundant changes, the solely placement- related unit and functional tests are removed, but the placement code itself is not (yet). openstack-placement is required by the functional tests. It is not added to test-requirements as we do not want unit tests to depend on placement in any way, and we enforce this by not having placement in the test env. The concept of tox-siblings is used to ensure that the placement requirement will be satisfied correctly if there is a depends-on. To make this happen, the functional jobs defined in .zuul.yaml are updated to require openstack/placement. tox.ini has to be updated to use a envdir that is the same name as job. Otherwise the tox siblings role in ansible cannot work. The handling of the placement fixtures is moved out of nova/test.py into the functional tests that actually use it because we do not want unit tests (which get the base test class out of test.py) to have anything to do with placement. This requires adjusting some test files to use absolute import. Similarly, a test of the comparison function for the api samples tests is moved into functional, because it depends on placement functionality, TestUpgradeCheckResourceProviders in unit.cmd.test_status is moved into a new test file: nova/tests/functional/test_nova_status.py. This is done because it requires the PlacementFixture, which is only available to functional tests. A MonkeyPatch is required in the test to make sure that the right context managers are used at the right time in the command itself (otherwise some tables do no exist). In the test itself, to avoid speaking directly to the placement database, which would require manipulating the RequestContext objects, resource providers are now created over the API. Co-Authored-By: Balazs Gibizer <balazs.gibizer@ericsson.com> Change-Id: Idaed39629095f86d24a54334c699a26c218c6593
Notes
Notes (review): Code-Review+2: Balazs Gibizer <balazs.gibizer@ericsson.com> Code-Review+2: Matt Riedemann <mriedem.os@gmail.com> Workflow+1: Matt Riedemann <mriedem.os@gmail.com> Verified+2: Zuul Submitted-by: Zuul Submitted-at: Thu, 13 Dec 2018 22:18:29 +0000 Reviewed-on: https://review.openstack.org/617941 Project: openstack/nova Branch: refs/heads/master
-rw-r--r--.zuul.yaml6
-rw-r--r--nova/cmd/manage.py3
-rw-r--r--nova/cmd/status.py1
-rw-r--r--nova/test.py10
-rw-r--r--nova/tests/fixtures.py145
-rw-r--r--nova/tests/functional/api/openstack/placement/__init__.py0
-rw-r--r--nova/tests/functional/api/openstack/placement/base.py69
-rw-r--r--nova/tests/functional/api/openstack/placement/db/__init__.py0
-rw-r--r--nova/tests/functional/api/openstack/placement/db/test_allocation_candidates.py2800
-rw-r--r--nova/tests/functional/api/openstack/placement/db/test_base.py129
-rw-r--r--nova/tests/functional/api/openstack/placement/db/test_consumer.py329
-rw-r--r--nova/tests/functional/api/openstack/placement/db/test_project.py31
-rw-r--r--nova/tests/functional/api/openstack/placement/db/test_reshape.py359
-rw-r--r--nova/tests/functional/api/openstack/placement/db/test_resource_class_cache.py145
-rw-r--r--nova/tests/functional/api/openstack/placement/db/test_resource_provider.py2391
-rw-r--r--nova/tests/functional/api/openstack/placement/db/test_user.py31
-rw-r--r--nova/tests/functional/api/openstack/placement/fixtures/__init__.py0
-rw-r--r--nova/tests/functional/api/openstack/placement/fixtures/capture.py81
-rw-r--r--nova/tests/functional/api/openstack/placement/fixtures/gabbits.py431
-rw-r--r--nova/tests/functional/api/openstack/placement/fixtures/placement.py49
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/aggregate-policy.yaml39
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/aggregate.yaml204
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocation-bad-class.yaml77
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates-member-of.yaml141
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates-policy.yaml18
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates.yaml416
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocations-1-12.yaml130
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocations-1-8.yaml152
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocations-1.28.yaml255
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1714072.yaml97
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1778591.yaml71
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1778743.yaml70
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1779717.yaml102
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocations-policy.yaml76
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocations-post.yaml399
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocations.yaml509
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/basic-http.yaml207
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/bug-1674694.yaml38
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/confirm-auth.yaml32
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/cors.yaml47
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/ensure-consumer.yaml41
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/granular.yaml474
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/inventory-policy.yaml85
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/inventory.yaml812
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/microversion-bug-1724065.yaml22
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml90
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/non-cors.yaml25
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/reshaper-policy.yaml20
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/reshaper.yaml558
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/resource-class-in-use.yaml80
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/resource-classes-1-6.yaml21
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/resource-classes-1-7.yaml49
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/resource-classes-last-modified.yaml117
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/resource-classes-policy.yaml40
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/resource-classes.yaml325
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/resource-provider-aggregates.yaml181
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/resource-provider-bug-1779818.yaml123
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/resource-provider-duplication.yaml48
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/resource-provider-links.yaml106
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/resource-provider-policy.yaml48
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/resource-provider-resources-query.yaml156
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/resource-provider.yaml775
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/shared-resources.yaml143
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/traits-policy.yaml55
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/traits.yaml487
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/unicode.yaml40
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/usage-policy.yaml33
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/usage.yaml120
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/with-allocations.yaml159
-rw-r--r--nova/tests/functional/api/openstack/placement/test_direct.py77
-rw-r--r--nova/tests/functional/api/openstack/placement/test_placement_api.py44
-rw-r--r--nova/tests/functional/api/openstack/placement/test_verify_policy.py50
-rw-r--r--nova/tests/functional/api_paste_fixture.py1
-rw-r--r--nova/tests/functional/api_sample_tests/api_sample_base.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_compare_result.py (renamed from nova/tests/unit/api_samples_test_base/test_compare_result.py)0
-rw-r--r--nova/tests/functional/compute/test_resource_tracker.py4
-rw-r--r--nova/tests/functional/fixtures.py150
-rw-r--r--nova/tests/functional/integrated_helpers.py6
-rw-r--r--nova/tests/functional/libvirt/base.py4
-rw-r--r--nova/tests/functional/notification_sample_tests/notification_sample_base.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1595962.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1671648.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1675570.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1679750.py22
-rw-r--r--nova/tests/functional/regressions/test_bug_1682693.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1702454.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1713783.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1718455.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1718512.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1719730.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1735407.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1741307.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1746483.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1764883.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1780373.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1781710.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1784353.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1797580.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1806064.py3
-rw-r--r--nova/tests/functional/test_aggregates.py5
-rw-r--r--nova/tests/functional/test_list_servers_ip_filter.py3
-rw-r--r--nova/tests/functional/test_metadata.py1
-rw-r--r--nova/tests/functional/test_nova_manage.py1
-rw-r--r--nova/tests/functional/test_nova_status.py289
-rw-r--r--nova/tests/functional/test_report_client.py13
-rw-r--r--nova/tests/functional/test_server_group.py5
-rw-r--r--nova/tests/functional/wsgi/test_servers.py5
-rw-r--r--nova/tests/unit/api/openstack/placement/__init__.py0
-rw-r--r--nova/tests/unit/api/openstack/placement/handlers/__init__.py0
-rw-r--r--nova/tests/unit/api/openstack/placement/handlers/test_aggregate.py37
-rw-r--r--nova/tests/unit/api/openstack/placement/objects/__init__.py0
-rw-r--r--nova/tests/unit/api/openstack/placement/objects/test_resource_provider.py330
-rw-r--r--nova/tests/unit/api/openstack/placement/test_context.py68
-rw-r--r--nova/tests/unit/api/openstack/placement/test_db_api.py52
-rw-r--r--nova/tests/unit/api/openstack/placement/test_deploy.py47
-rw-r--r--nova/tests/unit/api/openstack/placement/test_fault_wrap.py66
-rw-r--r--nova/tests/unit/api/openstack/placement/test_handler.py194
-rw-r--r--nova/tests/unit/api/openstack/placement/test_microversion.py153
-rw-r--r--nova/tests/unit/api/openstack/placement/test_policy.py80
-rw-r--r--nova/tests/unit/api/openstack/placement/test_requestlog.py72
-rw-r--r--nova/tests/unit/api/openstack/placement/test_util.py1119
-rw-r--r--nova/tests/unit/api_samples_test_base/__init__.py0
-rw-r--r--nova/tests/unit/cmd/test_status.py246
-rw-r--r--nova/tests/unit/test_fixtures.py29
-rw-r--r--tox.ini20
125 files changed, 562 insertions, 18013 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 3824954..585b1cf 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -48,6 +48,8 @@
48 Run tox-based functional tests for the OpenStack Nova project with Nova 48 Run tox-based functional tests for the OpenStack Nova project with Nova
49 specific irrelevant-files list. Uses tox with the ``functional`` 49 specific irrelevant-files list. Uses tox with the ``functional``
50 environment. 50 environment.
51 required-projects:
52 - openstack/placement
51 irrelevant-files: &functional-irrelevant-files 53 irrelevant-files: &functional-irrelevant-files
52 - ^.*\.rst$ 54 - ^.*\.rst$
53 - ^api-.*$ 55 - ^api-.*$
@@ -56,6 +58,7 @@
56 - ^releasenotes/.*$ 58 - ^releasenotes/.*$
57 vars: 59 vars:
58 tox_envlist: functional 60 tox_envlist: functional
61 tox_install_siblings: true
59 timeout: 3600 62 timeout: 3600
60 63
61- job: 64- job:
@@ -65,9 +68,12 @@
65 Run tox-based functional tests for the OpenStack Nova project 68 Run tox-based functional tests for the OpenStack Nova project
66 under cPython version 3.5. with Nova specific irrelevant-files list. 69 under cPython version 3.5. with Nova specific irrelevant-files list.
67 Uses tox with the ``functional-py35`` environment. 70 Uses tox with the ``functional-py35`` environment.
71 required-projects:
72 - openstack/placement
68 irrelevant-files: *functional-irrelevant-files 73 irrelevant-files: *functional-irrelevant-files
69 vars: 74 vars:
70 tox_envlist: functional-py35 75 tox_envlist: functional-py35
76 tox_install_siblings: true
71 timeout: 3600 77 timeout: 3600
72 78
73- job: 79- job:
diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py
index 6e53d27..eb2ea00 100644
--- a/nova/cmd/manage.py
+++ b/nova/cmd/manage.py
@@ -45,6 +45,7 @@ import six
45import six.moves.urllib.parse as urlparse 45import six.moves.urllib.parse as urlparse
46from sqlalchemy.engine import url as sqla_url 46from sqlalchemy.engine import url as sqla_url
47 47
48# FIXME(cdent): This is a speedbump in the extraction process
48from nova.api.openstack.placement.objects import consumer as consumer_obj 49from nova.api.openstack.placement.objects import consumer as consumer_obj
49from nova.cmd import common as cmd_common 50from nova.cmd import common as cmd_common
50from nova.compute import api as compute_api 51from nova.compute import api as compute_api
@@ -416,6 +417,7 @@ class DbCommands(object):
416 # need to be populated if it was not specified during boot time. 417 # need to be populated if it was not specified during boot time.
417 instance_obj.populate_missing_availability_zones, 418 instance_obj.populate_missing_availability_zones,
418 # Added in Rocky 419 # Added in Rocky
420 # FIXME(cdent): This is a factor that needs to be addressed somehow
419 consumer_obj.create_incomplete_consumers, 421 consumer_obj.create_incomplete_consumers,
420 # Added in Rocky 422 # Added in Rocky
421 instance_mapping_obj.populate_queued_for_delete, 423 instance_mapping_obj.populate_queued_for_delete,
@@ -1987,6 +1989,7 @@ class PlacementCommands(object):
1987 1989
1988 return num_processed 1990 return num_processed
1989 1991
1992 # FIXME(cdent): This needs to be addressed as part of extraction.
1990 @action_description( 1993 @action_description(
1991 _("Iterates over non-cell0 cells looking for instances which do " 1994 _("Iterates over non-cell0 cells looking for instances which do "
1992 "not have allocations in the Placement service, or have incomplete " 1995 "not have allocations in the Placement service, or have incomplete "
diff --git a/nova/cmd/status.py b/nova/cmd/status.py
index 091fa39..c5e4f43 100644
--- a/nova/cmd/status.py
+++ b/nova/cmd/status.py
@@ -251,6 +251,7 @@ class UpgradeCommands(object):
251 # and resource class, so we can simply count the number of inventories 251 # and resource class, so we can simply count the number of inventories
252 # records for the given resource class and those will uniquely identify 252 # records for the given resource class and those will uniquely identify
253 # the number of resource providers we care about. 253 # the number of resource providers we care about.
254 # FIXME(cdent): This will be a different project soon.
254 meta = MetaData(bind=placement_db.get_placement_engine()) 255 meta = MetaData(bind=placement_db.get_placement_engine())
255 inventories = Table('inventories', meta, autoload=True) 256 inventories = Table('inventories', meta, autoload=True)
256 return select([sqlfunc.count()]).select_from( 257 return select([sqlfunc.count()]).select_from(
diff --git a/nova/test.py b/nova/test.py
index ea7fa2d..cb6302c 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -49,7 +49,6 @@ from oslotest import moxstubout
49import six 49import six
50import testtools 50import testtools
51 51
52from nova.api.openstack.placement.objects import resource_provider
53from nova import context 52from nova import context
54from nova.db import api as db 53from nova.db import api as db
55from nova import exception 54from nova import exception
@@ -260,7 +259,6 @@ class TestCase(testtools.TestCase):
260 # NOTE(danms): Full database setup involves a cell0, cell1, 259 # NOTE(danms): Full database setup involves a cell0, cell1,
261 # and the relevant mappings. 260 # and the relevant mappings.
262 self.useFixture(nova_fixtures.Database(database='api')) 261 self.useFixture(nova_fixtures.Database(database='api'))
263 self.useFixture(nova_fixtures.Database(database='placement'))
264 self._setup_cells() 262 self._setup_cells()
265 self.useFixture(nova_fixtures.DefaultFlavorsFixture()) 263 self.useFixture(nova_fixtures.DefaultFlavorsFixture())
266 elif not self.USES_DB_SELF: 264 elif not self.USES_DB_SELF:
@@ -281,12 +279,6 @@ class TestCase(testtools.TestCase):
281 # caching of that value. 279 # caching of that value.
282 utils._IS_NEUTRON = None 280 utils._IS_NEUTRON = None
283 281
284 # Reset the traits sync and rc cache flags
285 def _reset_traits():
286 resource_provider._TRAITS_SYNCED = False
287 _reset_traits()
288 self.addCleanup(_reset_traits)
289 resource_provider._RC_CACHE = None
290 # Reset the global QEMU version flag. 282 # Reset the global QEMU version flag.
291 images.QEMU_VERSION = None 283 images.QEMU_VERSION = None
292 284
@@ -296,8 +288,6 @@ class TestCase(testtools.TestCase):
296 self.addCleanup(self._clear_attrs) 288 self.addCleanup(self._clear_attrs)
297 self.useFixture(fixtures.EnvironmentVariable('http_proxy')) 289 self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
298 self.policy = self.useFixture(policy_fixture.PolicyFixture()) 290 self.policy = self.useFixture(policy_fixture.PolicyFixture())
299 self.placement_policy = self.useFixture(
300 policy_fixture.PlacementPolicyFixture())
301 291
302 self.useFixture(nova_fixtures.PoisonFunctions()) 292 self.useFixture(nova_fixtures.PoisonFunctions())
303 293
diff --git a/nova/tests/fixtures.py b/nova/tests/fixtures.py
index 8459a48..3f062b3 100644
--- a/nova/tests/fixtures.py
+++ b/nova/tests/fixtures.py
@@ -26,8 +26,6 @@ import random
26import warnings 26import warnings
27 27
28import fixtures 28import fixtures
29from keystoneauth1 import adapter as ka
30from keystoneauth1 import session as ks
31import mock 29import mock
32from neutronclient.common import exceptions as neutron_client_exc 30from neutronclient.common import exceptions as neutron_client_exc
33from oslo_concurrency import lockutils 31from oslo_concurrency import lockutils
@@ -41,7 +39,6 @@ from requests import adapters
41from wsgi_intercept import interceptor 39from wsgi_intercept import interceptor
42 40
43from nova.api.openstack.compute import tenant_networks 41from nova.api.openstack.compute import tenant_networks
44from nova.api.openstack.placement import db_api as placement_db
45from nova.api.openstack import wsgi_app 42from nova.api.openstack import wsgi_app
46from nova.api import wsgi 43from nova.api import wsgi
47from nova.compute import rpcapi as compute_rpcapi 44from nova.compute import rpcapi as compute_rpcapi
@@ -57,12 +54,11 @@ from nova import quota as nova_quota
57from nova import rpc 54from nova import rpc
58from nova import service 55from nova import service
59from nova.tests.functional.api import client 56from nova.tests.functional.api import client
60from nova.tests.functional.api.openstack.placement.fixtures import placement
61 57
62_TRUE_VALUES = ('True', 'true', '1', 'yes') 58_TRUE_VALUES = ('True', 'true', '1', 'yes')
63 59
64CONF = cfg.CONF 60CONF = cfg.CONF
65DB_SCHEMA = {'main': "", 'api': "", 'placement': ""} 61DB_SCHEMA = {'main': "", 'api': ""}
66SESSION_CONFIGURED = False 62SESSION_CONFIGURED = False
67 63
68 64
@@ -631,7 +627,7 @@ class Database(fixtures.Fixture):
631 def __init__(self, database='main', connection=None): 627 def __init__(self, database='main', connection=None):
632 """Create a database fixture. 628 """Create a database fixture.
633 629
634 :param database: The type of database, 'main', 'api' or 'placement' 630 :param database: The type of database, 'main', or 'api'
635 :param connection: The connection string to use 631 :param connection: The connection string to use
636 """ 632 """
637 super(Database, self).__init__() 633 super(Database, self).__init__()
@@ -640,7 +636,6 @@ class Database(fixtures.Fixture):
640 global SESSION_CONFIGURED 636 global SESSION_CONFIGURED
641 if not SESSION_CONFIGURED: 637 if not SESSION_CONFIGURED:
642 session.configure(CONF) 638 session.configure(CONF)
643 placement_db.configure(CONF)
644 SESSION_CONFIGURED = True 639 SESSION_CONFIGURED = True
645 self.database = database 640 self.database = database
646 if database == 'main': 641 if database == 'main':
@@ -652,8 +647,6 @@ class Database(fixtures.Fixture):
652 self.get_engine = session.get_engine 647 self.get_engine = session.get_engine
653 elif database == 'api': 648 elif database == 'api':
654 self.get_engine = session.get_api_engine 649 self.get_engine = session.get_api_engine
655 elif database == 'placement':
656 self.get_engine = placement_db.get_placement_engine
657 650
658 def _cache_schema(self): 651 def _cache_schema(self):
659 global DB_SCHEMA 652 global DB_SCHEMA
@@ -687,7 +680,7 @@ class DatabaseAtVersion(fixtures.Fixture):
687 """Create a database fixture. 680 """Create a database fixture.
688 681
689 :param version: Max version to sync to (or None for current) 682 :param version: Max version to sync to (or None for current)
690 :param database: The type of database, 'main', 'api', 'placement' 683 :param database: The type of database, 'main', 'api'
691 """ 684 """
692 super(DatabaseAtVersion, self).__init__() 685 super(DatabaseAtVersion, self).__init__()
693 self.database = database 686 self.database = database
@@ -696,8 +689,6 @@ class DatabaseAtVersion(fixtures.Fixture):
696 self.get_engine = session.get_engine 689 self.get_engine = session.get_engine
697 elif database == 'api': 690 elif database == 'api':
698 self.get_engine = session.get_api_engine 691 self.get_engine = session.get_api_engine
699 elif database == 'placement':
700 self.get_engine = placement_db.get_placement_engine
701 692
702 def cleanup(self): 693 def cleanup(self):
703 engine = self.get_engine() 694 engine = self.get_engine()
@@ -1853,136 +1844,6 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
1853 fake_get_all_volume_types) 1844 fake_get_all_volume_types)
1854 1845
1855 1846
1856class PlacementApiClient(object):
1857 def __init__(self, placement_fixture):
1858 self.fixture = placement_fixture
1859
1860 def get(self, url, **kwargs):
1861 return client.APIResponse(self.fixture._fake_get(None, url, **kwargs))
1862
1863 def put(self, url, body, **kwargs):
1864 return client.APIResponse(
1865 self.fixture._fake_put(None, url, body, **kwargs))
1866
1867 def post(self, url, body, **kwargs):
1868 return client.APIResponse(
1869 self.fixture._fake_post(None, url, body, **kwargs))
1870
1871
1872class PlacementFixture(placement.PlacementFixture):
1873 """A fixture to placement operations.
1874
1875 Runs a local WSGI server bound on a free port and having the Placement
1876 application with NoAuth middleware.
1877 This fixture also prevents calling the ServiceCatalog for getting the
1878 endpoint.
1879
1880 It's possible to ask for a specific token when running the fixtures so
1881 all calls would be passing this token.
1882
1883 Most of the time users of this fixture will also want the placement
1884 database fixture (called first) as well:
1885
1886 self.useFixture(nova_fixtures.Database(database='placement'))
1887
1888 That is left as a manual step so tests may have fine grain control, and
1889 because it is likely that these fixtures will continue to evolve as
1890 the separation of nova and placement continues.
1891 """
1892
1893 def setUp(self):
1894 super(PlacementFixture, self).setUp()
1895
1896 # Turn off manipulation of socket_options in TCPKeepAliveAdapter
1897 # to keep wsgi-intercept happy. Replace it with the method
1898 # from its superclass.
1899 self.useFixture(fixtures.MonkeyPatch(
1900 'keystoneauth1.session.TCPKeepAliveAdapter.init_poolmanager',
1901 adapters.HTTPAdapter.init_poolmanager))
1902
1903 self._client = ka.Adapter(ks.Session(auth=None), raise_exc=False)
1904 # NOTE(sbauza): We need to mock the scheduler report client because
1905 # we need to fake Keystone by directly calling the endpoint instead
1906 # of looking up the service catalog, like we did for the OSAPIFixture.
1907 self.useFixture(fixtures.MonkeyPatch(
1908 'nova.scheduler.client.report.SchedulerReportClient.get',
1909 self._fake_get))
1910 self.useFixture(fixtures.MonkeyPatch(
1911 'nova.scheduler.client.report.SchedulerReportClient.post',
1912 self._fake_post))
1913 self.useFixture(fixtures.MonkeyPatch(
1914 'nova.scheduler.client.report.SchedulerReportClient.put',
1915 self._fake_put))
1916 self.useFixture(fixtures.MonkeyPatch(
1917 'nova.scheduler.client.report.SchedulerReportClient.delete',
1918 self._fake_delete))
1919
1920 self.api = PlacementApiClient(self)
1921
1922 @staticmethod
1923 def _update_headers_with_version(headers, **kwargs):
1924 version = kwargs.get("version")
1925 if version is not None:
1926 # TODO(mriedem): Perform some version discovery at some point.
1927 headers.update({
1928 'OpenStack-API-Version': 'placement %s' % version
1929 })
1930
1931 def _fake_get(self, *args, **kwargs):
1932 (url,) = args[1:]
1933 # TODO(sbauza): The current placement NoAuthMiddleware returns a 401
1934 # in case a token is not provided. We should change that by creating
1935 # a fake token so we could remove adding the header below.
1936 headers = {'x-auth-token': self.token}
1937 self._update_headers_with_version(headers, **kwargs)
1938 return self._client.get(
1939 url,
1940 endpoint_override=self.endpoint,
1941 headers=headers)
1942
1943 def _fake_post(self, *args, **kwargs):
1944 (url, data) = args[1:]
1945 # NOTE(sdague): using json= instead of data= sets the
1946 # media type to application/json for us. Placement API is
1947 # more sensitive to this than other APIs in the OpenStack
1948 # ecosystem.
1949 # TODO(sbauza): The current placement NoAuthMiddleware returns a 401
1950 # in case a token is not provided. We should change that by creating
1951 # a fake token so we could remove adding the header below.
1952 headers = {'x-auth-token': self.token}
1953 self._update_headers_with_version(headers, **kwargs)
1954 return self._client.post(
1955 url, json=data,
1956 endpoint_override=self.endpoint,
1957 headers=headers)
1958
1959 def _fake_put(self, *args, **kwargs):
1960 (url, data) = args[1:]
1961 # NOTE(sdague): using json= instead of data= sets the
1962 # media type to application/json for us. Placement API is
1963 # more sensitive to this than other APIs in the OpenStack
1964 # ecosystem.
1965 # TODO(sbauza): The current placement NoAuthMiddleware returns a 401
1966 # in case a token is not provided. We should change that by creating
1967 # a fake token so we could remove adding the header below.
1968 headers = {'x-auth-token': self.token}
1969 self._update_headers_with_version(headers, **kwargs)
1970 return self._client.put(
1971 url, json=data,
1972 endpoint_override=self.endpoint,
1973 headers=headers)
1974
1975 def _fake_delete(self, *args, **kwargs):
1976 (url,) = args[1:]
1977 # TODO(sbauza): The current placement NoAuthMiddleware returns a 401
1978 # in case a token is not provided. We should change that by creating
1979 # a fake token so we could remove adding the header below.
1980 return self._client.delete(
1981 url,
1982 endpoint_override=self.endpoint,
1983 headers={'x-auth-token': self.token})
1984
1985
1986class UnHelperfulClientChannel(privsep_daemon._ClientChannel): 1847class UnHelperfulClientChannel(privsep_daemon._ClientChannel):
1987 def __init__(self, context): 1848 def __init__(self, context):
1988 raise Exception('You have attempted to start a privsep helper. ' 1849 raise Exception('You have attempted to start a privsep helper. '
diff --git a/nova/tests/functional/api/openstack/placement/__init__.py b/nova/tests/functional/api/openstack/placement/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/nova/tests/functional/api/openstack/placement/__init__.py
+++ /dev/null
diff --git a/nova/tests/functional/api/openstack/placement/base.py b/nova/tests/functional/api/openstack/placement/base.py
deleted file mode 100644
index 14164fb..0000000
--- a/nova/tests/functional/api/openstack/placement/base.py
+++ /dev/null
@@ -1,69 +0,0 @@
1# Licensed under the Apache License, Version 2.0 (the "License"); you may
2# not use this file except in compliance with the License. You may obtain
3# a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10# License for the specific language governing permissions and limitations
11# under the License.
12
13from oslo_config import cfg
14from oslo_config import fixture as config_fixture
15from oslotest import output
16import testtools
17
18from nova.api.openstack.placement import context
19from nova.api.openstack.placement import deploy
20from nova.api.openstack.placement.objects import resource_provider
21from nova.tests import fixtures
22from nova.tests.functional.api.openstack.placement.fixtures import capture
23from nova.tests.unit import policy_fixture
24
25
26CONF = cfg.CONF
27
28
29class TestCase(testtools.TestCase):
30 """A base test case for placement functional tests.
31
32 Sets up minimum configuration for database and policy handling
33 and establishes the placement database.
34 """
35
36 def setUp(self):
37 super(TestCase, self).setUp()
38
39 # Manage required configuration
40 conf_fixture = self.useFixture(config_fixture.Config(CONF))
41 # The Database fixture will get confused if only one of the databases
42 # is configured.
43 for group in ('placement_database', 'api_database', 'database'):
44 conf_fixture.config(
45 group=group,
46 connection='sqlite://',
47 sqlite_synchronous=False)
48 CONF([], default_config_files=[])
49
50 self.useFixture(policy_fixture.PlacementPolicyFixture())
51
52 self.useFixture(capture.Logging())
53 self.useFixture(output.CaptureOutput())
54 # Filter ignorable warnings during test runs.
55 self.useFixture(capture.WarningsFixture())
56
57 self.placement_db = self.useFixture(
58 fixtures.Database(database='placement'))
59 self._reset_database()
60 self.context = context.RequestContext()
61 # Do database syncs, such as traits sync.
62 deploy.update_database()
63 self.addCleanup(self._reset_database)
64
65 @staticmethod
66 def _reset_database():
67 """Reset database sync flags to base state."""
68 resource_provider._TRAITS_SYNCED = False
69 resource_provider._RC_CACHE = None
diff --git a/nova/tests/functional/api/openstack/placement/db/__init__.py b/nova/tests/functional/api/openstack/placement/db/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/nova/tests/functional/api/openstack/placement/db/__init__.py
+++ /dev/null
diff --git a/nova/tests/functional/api/openstack/placement/db/test_allocation_candidates.py b/nova/tests/functional/api/openstack/placement/db/test_allocation_candidates.py
deleted file mode 100644
index 8903fed..0000000
--- a/nova/tests/functional/api/openstack/placement/db/test_allocation_candidates.py
+++ /dev/null
@@ -1,2800 +0,0 @@
1# Licensed under the Apache License, Version 2.0 (the "License"); you may
2# not use this file except in compliance with the License. You may obtain
3# a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10# License for the specific language governing permissions and limitations
11# under the License.
12import os_traits
13from oslo_config import cfg
14from oslo_utils.fixture import uuidsentinel as uuids
15import six
16import sqlalchemy as sa
17
18from nova.api.openstack.placement import exception
19from nova.api.openstack.placement import lib as placement_lib
20from nova.api.openstack.placement.objects import resource_provider as rp_obj
21from nova import rc_fields as fields
22from nova.tests.functional.api.openstack.placement.db import test_base as tb
23
24
25CONF = cfg.CONF
26
27
28class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
29
30 def test_get_provider_ids_matching(self):
31 # These RPs are named based on whether we expect them to be 'incl'uded
32 # or 'excl'uded in the result.
33
34 # No inventory records. This one should never show up in a result.
35 self._create_provider('no_inventory')
36
37 # Inventory of adequate CPU and memory, no allocations against it.
38 excl_big_cm_noalloc = self._create_provider('big_cm_noalloc')
39 tb.add_inventory(excl_big_cm_noalloc, fields.ResourceClass.VCPU, 15)
40 tb.add_inventory(excl_big_cm_noalloc, fields.ResourceClass.MEMORY_MB,
41 4096, max_unit=2048)
42
43 # Inventory of adequate memory and disk, no allocations against it.
44 excl_big_md_noalloc = self._create_provider('big_md_noalloc')
45 tb.add_inventory(excl_big_md_noalloc, fields.ResourceClass.MEMORY_MB,
46 4096, max_unit=2048)
47 tb.add_inventory(excl_big_md_noalloc, fields.ResourceClass.DISK_GB,
48 2000)
49
50 # Adequate inventory, no allocations against it.
51 incl_biginv_noalloc = self._create_provider('biginv_noalloc')
52 tb.add_inventory(incl_biginv_noalloc, fields.ResourceClass.VCPU, 15)
53 tb.add_inventory(incl_biginv_noalloc, fields.ResourceClass.MEMORY_MB,
54 4096, max_unit=2048)
55 tb.add_inventory(incl_biginv_noalloc, fields.ResourceClass.DISK_GB,
56 2000)
57
58 # No allocations, but inventory unusable. Try to hit all the possible
59 # reasons for exclusion.
60 # VCPU min_unit too high
61 excl_badinv_min_unit = self._create_provider('badinv_min_unit')
62 tb.add_inventory(excl_badinv_min_unit, fields.ResourceClass.VCPU, 12,
63 min_unit=6)
64 tb.add_inventory(excl_badinv_min_unit, fields.ResourceClass.MEMORY_MB,
65 4096, max_unit=2048)
66 tb.add_inventory(excl_badinv_min_unit, fields.ResourceClass.DISK_GB,
67 2000)
68 # MEMORY_MB max_unit too low
69 excl_badinv_max_unit = self._create_provider('badinv_max_unit')
70 tb.add_inventory(excl_badinv_max_unit, fields.ResourceClass.VCPU, 15)
71 tb.add_inventory(excl_badinv_max_unit, fields.ResourceClass.MEMORY_MB,
72 4096, max_unit=512)
73 tb.add_inventory(excl_badinv_max_unit, fields.ResourceClass.DISK_GB,
74 2000)
75 # DISK_GB unsuitable step_size
76 excl_badinv_step_size = self._create_provider('badinv_step_size')
77 tb.add_inventory(excl_badinv_step_size, fields.ResourceClass.VCPU, 15)
78 tb.add_inventory(excl_badinv_step_size, fields.ResourceClass.MEMORY_MB,
79 4096, max_unit=2048)
80 tb.add_inventory(excl_badinv_step_size, fields.ResourceClass.DISK_GB,
81 2000, step_size=7)
82 # Not enough total VCPU
83 excl_badinv_total = self._create_provider('badinv_total')
84 tb.add_inventory(excl_badinv_total, fields.ResourceClass.VCPU, 4)
85 tb.add_inventory(excl_badinv_total, fields.ResourceClass.MEMORY_MB,
86 4096, max_unit=2048)
87 tb.add_inventory(excl_badinv_total, fields.ResourceClass.DISK_GB, 2000)
88 # Too much reserved MEMORY_MB
89 excl_badinv_reserved = self._create_provider('badinv_reserved')
90 tb.add_inventory(excl_badinv_reserved, fields.ResourceClass.VCPU, 15)
91 tb.add_inventory(excl_badinv_reserved, fields.ResourceClass.MEMORY_MB,
92 4096, max_unit=2048, reserved=3500)
93 tb.add_inventory(excl_badinv_reserved, fields.ResourceClass.DISK_GB,
94 2000)
95 # DISK_GB allocation ratio blows it up
96 excl_badinv_alloc_ratio = self._create_provider('badinv_alloc_ratio')
97 tb.add_inventory(excl_badinv_alloc_ratio, fields.ResourceClass.VCPU,
98 15)
99 tb.add_inventory(excl_badinv_alloc_ratio,
100 fields.ResourceClass.MEMORY_MB, 4096, max_unit=2048)
101 tb.add_inventory(excl_badinv_alloc_ratio, fields.ResourceClass.DISK_GB,
102 2000, allocation_ratio=0.5)
103
104 # Inventory consumed in one RC, but available in the others
105 excl_1invunavail = self._create_provider('1invunavail')
106 tb.add_inventory(excl_1invunavail, fields.ResourceClass.VCPU, 10)
107 self.allocate_from_provider(
108 excl_1invunavail, fields.ResourceClass.VCPU, 7)
109 tb.add_inventory(excl_1invunavail, fields.ResourceClass.MEMORY_MB,
110 4096)
111 self.allocate_from_provider(excl_1invunavail,
112 fields.ResourceClass.MEMORY_MB, 1024)
113 tb.add_inventory(excl_1invunavail, fields.ResourceClass.DISK_GB, 2000)
114 self.allocate_from_provider(excl_1invunavail,
115 fields.ResourceClass.DISK_GB, 400)
116
117 # Inventory all consumed
118 excl_allused = self._create_provider('allused')
119 tb.add_inventory(excl_allused, fields.ResourceClass.VCPU, 10)
120 self.allocate_from_provider(excl_allused, fields.ResourceClass.VCPU, 7)
121 tb.add_inventory(excl_allused, fields.ResourceClass.MEMORY_MB, 4000)
122 self.allocate_from_provider(excl_allused,
123 fields.ResourceClass.MEMORY_MB, 1500)
124 self.allocate_from_provider(excl_allused,
125 fields.ResourceClass.MEMORY_MB, 2000)
126 tb.add_inventory(excl_allused, fields.ResourceClass.DISK_GB, 1500)
127 self.allocate_from_provider(excl_allused, fields.ResourceClass.DISK_GB,
128 1)
129
130 # Inventory available in requested classes, but unavailable in others
131 incl_extra_full = self._create_provider('extra_full')
132 tb.add_inventory(incl_extra_full, fields.ResourceClass.VCPU, 20)
133 self.allocate_from_provider(incl_extra_full, fields.ResourceClass.VCPU,
134 15)
135 tb.add_inventory(incl_extra_full, fields.ResourceClass.MEMORY_MB, 4096)
136 self.allocate_from_provider(incl_extra_full,
137 fields.ResourceClass.MEMORY_MB, 1024)
138 tb.add_inventory(incl_extra_full, fields.ResourceClass.DISK_GB, 2000)
139 self.allocate_from_provider(incl_extra_full,
140 fields.ResourceClass.DISK_GB, 400)
141 tb.add_inventory(incl_extra_full, fields.ResourceClass.PCI_DEVICE, 4)
142 self.allocate_from_provider(incl_extra_full,
143 fields.ResourceClass.PCI_DEVICE, 1)
144 self.allocate_from_provider(incl_extra_full,
145 fields.ResourceClass.PCI_DEVICE, 3)
146
147 # Inventory available in a unrequested classes, not in requested ones
148 excl_extra_avail = self._create_provider('extra_avail')
149 # Incompatible step size
150 tb.add_inventory(excl_extra_avail, fields.ResourceClass.VCPU, 10,
151 step_size=3)
152 # Not enough left after reserved + used
153 tb.add_inventory(excl_extra_avail, fields.ResourceClass.MEMORY_MB,
154 4096, max_unit=2048, reserved=2048)
155 self.allocate_from_provider(excl_extra_avail,
156 fields.ResourceClass.MEMORY_MB, 1040)
157 # Allocation ratio math
158 tb.add_inventory(excl_extra_avail, fields.ResourceClass.DISK_GB, 2000,
159 allocation_ratio=0.5)
160 tb.add_inventory(excl_extra_avail, fields.ResourceClass.IPV4_ADDRESS,
161 48)
162 custom_special = rp_obj.ResourceClass(self.ctx, name='CUSTOM_SPECIAL')
163 custom_special.create()
164 tb.add_inventory(excl_extra_avail, 'CUSTOM_SPECIAL', 100)
165 self.allocate_from_provider(excl_extra_avail, 'CUSTOM_SPECIAL', 99)
166
167 resources = {
168 fields.ResourceClass.STANDARD.index(fields.ResourceClass.VCPU): 5,
169 fields.ResourceClass.STANDARD.index(
170 fields.ResourceClass.MEMORY_MB): 1024,
171 fields.ResourceClass.STANDARD.index(
172 fields.ResourceClass.DISK_GB): 1500
173 }
174
175 # Run it!
176 res = rp_obj._get_provider_ids_matching(self.ctx, resources, {}, {})
177
178 # We should get all the incl_* RPs
179 expected = [incl_biginv_noalloc, incl_extra_full]
180
181 self.assertEqual(set((rp.id, rp.id) for rp in expected), set(res))
182
183 # Now request that the providers must have a set of required traits and
184 # that this results in no results returned, since we haven't yet
185 # associated any traits with the providers
186 avx2_t = rp_obj.Trait.get_by_name(self.ctx, os_traits.HW_CPU_X86_AVX2)
187 # _get_provider_ids_matching()'s required_traits and forbidden_traits
188 # arguments maps, keyed by trait name, of the trait internal ID
189 req_traits = {os_traits.HW_CPU_X86_AVX2: avx2_t.id}
190 res = rp_obj._get_provider_ids_matching(self.ctx, resources,
191 req_traits, {})
192
193 self.assertEqual([], res)
194
195 # Next let's set the required trait to an excl_* RPs.
196 # This should result in no results returned as well.
197 excl_big_md_noalloc.set_traits([avx2_t])
198 res = rp_obj._get_provider_ids_matching(self.ctx, resources,
199 req_traits, {})
200 self.assertEqual([], res)
201
202 # OK, now add the trait to one of the incl_* providers and verify that
203 # provider now shows up in our results
204 incl_biginv_noalloc.set_traits([avx2_t])
205 res = rp_obj._get_provider_ids_matching(self.ctx, resources,
206 req_traits, {})
207
208 rp_ids = [r[0] for r in res]
209 self.assertEqual([incl_biginv_noalloc.id], rp_ids)
210
211 def test_get_provider_ids_matching_with_multiple_forbidden(self):
212 rp1 = self._create_provider('rp1', uuids.agg1)
213 tb.add_inventory(rp1, fields.ResourceClass.VCPU, 64)
214
215 rp2 = self._create_provider('rp2', uuids.agg1)
216 trait_two, = tb.set_traits(rp2, 'CUSTOM_TWO')
217 tb.add_inventory(rp2, fields.ResourceClass.VCPU, 64)
218
219 rp3 = self._create_provider('rp3')
220 trait_three, = tb.set_traits(rp3, 'CUSTOM_THREE')
221 tb.add_inventory(rp3, fields.ResourceClass.VCPU, 64)
222
223 resources = {
224 fields.ResourceClass.STANDARD.index(fields.ResourceClass.VCPU): 4}
225 res = rp_obj._get_provider_ids_matching(
226 self.ctx, resources, {},
227 {trait_two.name: trait_two.id,
228 trait_three.name: trait_three.id}, member_of=[[uuids.agg1]])
229 self.assertEqual({(rp1.id, rp1.id)}, set(res))
230
231 def test_get_provider_ids_having_all_traits(self):
232 def run(traitnames, expected_ids):
233 tmap = {}
234 if traitnames:
235 tmap = rp_obj._trait_ids_from_names(self.ctx, traitnames)
236 obs = rp_obj._get_provider_ids_having_all_traits(self.ctx, tmap)
237 self.assertEqual(sorted(expected_ids), sorted(obs))
238
239 # No traits. This will never be returned, because it's illegal to
240 # invoke the method with no traits.
241 self._create_provider('cn1')
242
243 # One trait
244 cn2 = self._create_provider('cn2')
245 tb.set_traits(cn2, 'HW_CPU_X86_TBM')
246
247 # One the same as cn2
248 cn3 = self._create_provider('cn3')
249 tb.set_traits(cn3, 'HW_CPU_X86_TBM', 'HW_CPU_X86_TSX',
250 'HW_CPU_X86_SGX')
251
252 # Disjoint
253 cn4 = self._create_provider('cn4')
254 tb.set_traits(cn4, 'HW_CPU_X86_SSE2', 'HW_CPU_X86_SSE3', 'CUSTOM_FOO')
255
256 # Request with no traits not allowed
257 self.assertRaises(
258 ValueError,
259 rp_obj._get_provider_ids_having_all_traits, self.ctx, None)
260 self.assertRaises(
261 ValueError,
262 rp_obj._get_provider_ids_having_all_traits, self.ctx, {})
263
264 # Common trait returns both RPs having it
265 run(['HW_CPU_X86_TBM'], [cn2.id, cn3.id])
266 # Just the one
267 run(['HW_CPU_X86_TSX'], [cn3.id])
268 run(['HW_CPU_X86_TSX', 'HW_CPU_X86_SGX'], [cn3.id])
269 run(['CUSTOM_FOO'], [cn4.id])
270 # Including the common one still just gets me cn3
271 run(['HW_CPU_X86_TBM', 'HW_CPU_X86_SGX'], [cn3.id])
272 run(['HW_CPU_X86_TBM', 'HW_CPU_X86_TSX', 'HW_CPU_X86_SGX'], [cn3.id])
273 # Can't be satisfied
274 run(['HW_CPU_X86_TBM', 'HW_CPU_X86_TSX', 'CUSTOM_FOO'], [])
275 run(['HW_CPU_X86_TBM', 'HW_CPU_X86_TSX', 'HW_CPU_X86_SGX',
276 'CUSTOM_FOO'], [])
277 run(['HW_CPU_X86_SGX', 'HW_CPU_X86_SSE3'], [])
278 run(['HW_CPU_X86_TBM', 'CUSTOM_FOO'], [])
279 run(['HW_CPU_X86_BMI'], [])
280 rp_obj.Trait(self.ctx, name='CUSTOM_BAR').create()
281 run(['CUSTOM_BAR'], [])
282
283
284class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
285 """Tests a variety of scenarios with both shared and non-shared resource
286 providers that the AllocationCandidates.get_by_requests() method returns a
287 set of alternative allocation requests and provider summaries that may be
288 used by the scheduler to sort/weigh the options it has for claiming
289 resources against providers.
290 """
291
292 def setUp(self):
293 super(AllocationCandidatesTestCase, self).setUp()
294 self.requested_resources = {
295 fields.ResourceClass.VCPU: 1,
296 fields.ResourceClass.MEMORY_MB: 64,
297 fields.ResourceClass.DISK_GB: 1500,
298 }
299 # For debugging purposes, populated by _create_provider and used by
300 # _validate_allocation_requests to make failure results more readable.
301 self.rp_uuid_to_name = {}
302
303 def _get_allocation_candidates(self, requests=None, limit=None):
304 if requests is None:
305 requests = {'': placement_lib.RequestGroup(
306 use_same_provider=False,
307 resources=self.requested_resources)}
308 return rp_obj.AllocationCandidates.get_by_requests(self.ctx, requests,
309 limit)
310
311 def _validate_allocation_requests(self, expected, candidates):
312 """Assert correctness of allocation requests in allocation candidates.
313
314 This is set up to make it easy for the caller to specify the expected
315 result, to make that expected structure readable for someone looking at
316 the test case, and to make test failures readable for debugging.
317
318 :param expected: A list of lists of tuples representing the expected
319 allocation requests, of the form:
320 [
321 [(resource_provider_name, resource_class_name, resource_count),
322 ...,
323 ],
324 ...
325 ]
326 :param candidates: The result from AllocationCandidates.get_by_requests
327 """
328 # Extract/convert allocation requests from candidates
329 observed = []
330 for ar in candidates.allocation_requests:
331 rrs = []
332 for rr in ar.resource_requests:
333 rrs.append((self.rp_uuid_to_name[rr.resource_provider.uuid],
334 rr.resource_class, rr.amount))
335 rrs.sort()
336 observed.append(rrs)
337 observed.sort()
338
339 # Sort the guts of the expected structure
340 for rr in expected:
341 rr.sort()
342 expected.sort()
343
344 # Now we ought to be able to compare them
345 self.assertEqual(expected, observed)
346
347 def _validate_provider_summary_resources(self, expected, candidates):
348 """Assert correctness of the resources in provider summaries in
349 allocation candidates.
350
351 This is set up to make it easy for the caller to specify the expected
352 result, to make that expected structure readable for someone looking at
353 the test case, and to make test failures readable for debugging.
354
355 :param expected: A dict, keyed by resource provider name, of sets of
356 3-tuples containing resource class, capacity, and
357 amount used:
358 { resource_provider_name: set([
359 (resource_class, capacity, used),
360 ...,
361 ]),
362 ...,
363 }
364 :param candidates: The result from AllocationCandidates.get_by_requests
365 """
366 observed = {}
367 for psum in candidates.provider_summaries:
368 rpname = self.rp_uuid_to_name[psum.resource_provider.uuid]
369 reslist = set()
370 for res in psum.resources:
371 reslist.add((res.resource_class, res.capacity, res.used))
372 if rpname in observed:
373 self.fail("Found resource provider %s more than once in "
374 "provider_summaries!" % rpname)
375 observed[rpname] = reslist
376
377 # Now we ought to be able to compare them
378 self.assertEqual(expected, observed)
379
380 def _validate_provider_summary_traits(self, expected, candidates):
381 """Assert correctness of the traits in provider summaries in allocation
382 candidates.
383
384 This is set up to make it easy for the caller to specify the expected
385 result, to make that expected structure readable for someone looking at
386 the test case, and to make test failures readable for debugging.
387
388 :param expected: A dict, keyed by resource provider name, of sets of
389 string trait names:
390 { resource_provider_name: set([
391 trait_name, ...
392 ]),
393 ...,
394 }
395 :param candidates: The result from AllocationCandidates.get_by_requests
396 """
397 observed = {}
398 for psum in candidates.provider_summaries:
399 rpname = self.rp_uuid_to_name[psum.resource_provider.uuid]
400 observed[rpname] = set(trait.name for trait in psum.traits)
401
402 self.assertEqual(expected, observed)
403
404 def test_unknown_traits(self):
405 missing = set(['UNKNOWN_TRAIT'])
406 requests = {'': placement_lib.RequestGroup(
407 use_same_provider=False, resources=self.requested_resources,
408 required_traits=missing)}
409 self.assertRaises(exception.TraitNotFound,
410 rp_obj.AllocationCandidates.get_by_requests,
411 self.ctx, requests)
412
413 def test_allc_req_and_prov_summary(self):
414 """Simply test with one resource provider that the allocation
415 requests returned by AllocationCandidates have valid
416 allocation_requests and provider_summaries.
417 """
418 cn1 = self._create_provider('cn1')
419 tb.add_inventory(cn1, fields.ResourceClass.VCPU, 8)
420 tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048)
421 tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2000)
422
423 alloc_cands = self._get_allocation_candidates(
424 {'': placement_lib.RequestGroup(
425 use_same_provider=False,
426 resources={
427 fields.ResourceClass.VCPU: 1
428 }
429 )}
430 )
431
432 expected = [
433 [('cn1', fields.ResourceClass.VCPU, 1)]
434 ]
435 self._validate_allocation_requests(expected, alloc_cands)
436
437 expected = {
438 'cn1': set([
439 (fields.ResourceClass.VCPU, 8, 0),
440 (fields.ResourceClass.MEMORY_MB, 2048, 0),
441 (fields.ResourceClass.DISK_GB, 2000, 0)
442 ]),
443 }
444 self._validate_provider_summary_resources(expected, alloc_cands)
445
446 def test_get_allc_req_old_records(self):
447 """Simulate an old resource provider record in the database that has no
448 root_provider_uuid set and ensure that we still get that candidate
449 returned.
450 """
451 # Passing a non-existing resource provider UUID should return an empty
452 # list
453 rp_tbl = rp_obj._RP_TBL
454 inv_tbl = rp_obj._INV_TBL
455 alloc_tbl = rp_obj._ALLOC_TBL
456 conn = self.placement_db.get_engine().connect()
457
458 # First, set up a record for an "old-style" resource provider with no
459 # root provider UUID.
460 ins_rptbl = rp_tbl.insert().values(
461 id=1,
462 uuid=uuids.rp1,
463 name='cn1',
464 root_provider_id=None,
465 parent_provider_id=None,
466 generation=42,
467 )
468 conn.execute(ins_rptbl)
469
470 # This is needed for _validate_allocation_requests() at the end
471 self.rp_uuid_to_name[uuids.rp1] = 'cn1'
472
473 # Add VCPU(resource_class_id=0) inventory to the provider.
474 ins_invtbl = inv_tbl.insert().values(
475 id=1,
476 resource_provider_id=1,
477 resource_class_id=0,
478 total=8,
479 reserved=0,
480 min_unit=1,
481 max_unit=8,
482 step_size=1,
483 allocation_ratio=1.0,
484 )
485 conn.execute(ins_invtbl)
486
487 # Consume VCPU inventory
488 ins_alloctbl = alloc_tbl.insert().values(
489 id=1,
490 resource_provider_id=1,
491 consumer_id=uuids.consumer,
492 resource_class_id=0,
493 used=4
494 )
495 conn.execute(ins_alloctbl)
496
497 alloc_cands = self._get_allocation_candidates(
498 {'': placement_lib.RequestGroup(
499 use_same_provider=False,
500 resources={
501 fields.ResourceClass.VCPU: 1
502 }
503 )}
504 )
505
506 expected = [
507 [('cn1', fields.ResourceClass.VCPU, 1)]
508 ]
509 self._validate_allocation_requests(expected, alloc_cands)
510
511 expected = {
512 'cn1': set([
513 (fields.ResourceClass.VCPU, 8, 4)
514 ]),
515 }
516 self._validate_provider_summary_resources(expected, alloc_cands)
517
518 # NOTE(tetsuro): Getting allocation candidates goes through a
519 # different path when sharing/nested providers exist, let's test
520 # that case and the path creating a new sharing provider.
521 # We omit the direct database insertion of 'ss1' here since 'cn1',
522 # which has no root id in the database, is the actual target of the
523 # following test.
524 ss1 = self._create_provider('ss1', uuids.agg1)
525 tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
526 tb.add_inventory(ss1, fields.ResourceClass.VCPU, 8)
527
528 alloc_cands = self._get_allocation_candidates(
529 {'': placement_lib.RequestGroup(
530 use_same_provider=False,
531 resources={
532 fields.ResourceClass.VCPU: 1
533 }
534 )}
535 )
536
537 expected = [
538 [('cn1', fields.ResourceClass.VCPU, 1)],
539 [('ss1', fields.ResourceClass.VCPU, 1)]
540 ]
541 self._validate_allocation_requests(expected, alloc_cands)
542
543 expected = {
544 'cn1': set([
545 (fields.ResourceClass.VCPU, 8, 4)
546 ]),
547 'ss1': set([
548 (fields.ResourceClass.VCPU, 8, 0)
549 ]),
550 }
551 self._validate_provider_summary_resources(expected, alloc_cands)
552
553 def test_all_local(self):
554 """Create some resource providers that can satisfy the request for
555 resources with local (non-shared) resources and verify that the
556 allocation requests returned by AllocationCandidates correspond with
557 each of these resource providers.
558 """
559 # Create three compute node providers with VCPU, RAM and local disk
560 cn1, cn2, cn3 = (self._create_provider(name)
561 for name in ('cn1', 'cn2', 'cn3'))
562 for cn in (cn1, cn2, cn3):
563 tb.add_inventory(cn, fields.ResourceClass.VCPU, 24,
564 allocation_ratio=16.0)
565 tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768,
566 min_unit=64, step_size=64, allocation_ratio=1.5)
567 total_gb = 1000 if cn.name == 'cn3' else 2000
568 tb.add_inventory(cn, fields.ResourceClass.DISK_GB, total_gb,
569 reserved=100, min_unit=10, step_size=10,
570 allocation_ratio=1.0)
571
572 # Ask for the alternative placement possibilities and verify each
573 # provider is returned
574 alloc_cands = self._get_allocation_candidates()
575
576 # Verify the provider summary information indicates 0 usage and
577 # capacity calculated from above inventory numbers for the first two
578 # compute nodes. The third doesn't show up because it lacks sufficient
579 # disk capacity.
580 expected = {
581 'cn1': set([
582 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
583 (fields.ResourceClass.MEMORY_MB, 32768 * 1.5, 0),
584 (fields.ResourceClass.DISK_GB, 2000 - 100, 0),
585 ]),
586 'cn2': set([
587 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
588 (fields.ResourceClass.MEMORY_MB, 32768 * 1.5, 0),
589 (fields.ResourceClass.DISK_GB, 2000 - 100, 0),
590 ]),
591 }
592 self._validate_provider_summary_resources(expected, alloc_cands)
593
594 # Verify the allocation requests that are returned. There should be 2
595 # allocation requests, one for each compute node, containing 3
596 # resources in each allocation request, one each for VCPU, RAM, and
597 # disk. The amounts of the requests should correspond to the requested
598 # resource amounts in the filter:resources dict passed to
599 # AllocationCandidates.get_by_requests().
600 expected = [
601 [('cn1', fields.ResourceClass.VCPU, 1),
602 ('cn1', fields.ResourceClass.MEMORY_MB, 64),
603 ('cn1', fields.ResourceClass.DISK_GB, 1500)],
604 [('cn2', fields.ResourceClass.VCPU, 1),
605 ('cn2', fields.ResourceClass.MEMORY_MB, 64),
606 ('cn2', fields.ResourceClass.DISK_GB, 1500)],
607 ]
608 self._validate_allocation_requests(expected, alloc_cands)
609
610 # Now let's add traits into the mix. Currently, none of the compute
611 # nodes has the AVX2 trait associated with it, so we should get 0
612 # results if we required AVX2
613 alloc_cands = self._get_allocation_candidates(
614 {'': placement_lib.RequestGroup(
615 use_same_provider=False,
616 resources=self.requested_resources,
617 required_traits=set([os_traits.HW_CPU_X86_AVX2])
618 )},
619 )
620 self._validate_allocation_requests([], alloc_cands)
621
622 # If we then associate the AVX2 trait to just compute node 2, we should
623 # get back just that compute node in the provider summaries
624 tb.set_traits(cn2, 'HW_CPU_X86_AVX2')
625
626 alloc_cands = self._get_allocation_candidates(
627 {'': placement_lib.RequestGroup(
628 use_same_provider=False,
629 resources=self.requested_resources,
630 required_traits=set([os_traits.HW_CPU_X86_AVX2])
631 )},
632 )
633 # Only cn2 should be in our allocation requests now since that's the
634 # only one with the required trait
635 expected = [
636 [('cn2', fields.ResourceClass.VCPU, 1),
637 ('cn2', fields.ResourceClass.MEMORY_MB, 64),
638 ('cn2', fields.ResourceClass.DISK_GB, 1500)],
639 ]
640 self._validate_allocation_requests(expected, alloc_cands)
641 p_sums = alloc_cands.provider_summaries
642 self.assertEqual(1, len(p_sums))
643
644 expected = {
645 'cn2': set([
646 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
647 (fields.ResourceClass.MEMORY_MB, 32768 * 1.5, 0),
648 (fields.ResourceClass.DISK_GB, 2000 - 100, 0),
649 ]),
650 }
651 self._validate_provider_summary_resources(expected, alloc_cands)
652
653 expected = {
654 'cn2': set(['HW_CPU_X86_AVX2'])
655 }
656 self._validate_provider_summary_traits(expected, alloc_cands)
657
658 # Confirm that forbidden traits changes the results to get cn1.
659 alloc_cands = self._get_allocation_candidates(
660 {'': placement_lib.RequestGroup(
661 use_same_provider=False,
662 resources=self.requested_resources,
663 forbidden_traits=set([os_traits.HW_CPU_X86_AVX2])
664 )},
665 )
666 expected = [
667 [('cn1', fields.ResourceClass.VCPU, 1),
668 ('cn1', fields.ResourceClass.MEMORY_MB, 64),
669 ('cn1', fields.ResourceClass.DISK_GB, 1500)],
670 ]
671 self._validate_allocation_requests(expected, alloc_cands)
672
673 def test_all_local_limit(self):
674 """Create some resource providers that can satisfy the request for
675 resources with local (non-shared) resources, limit them, and verify
676 that the allocation requests returned by AllocationCandidates
677 correspond with each of these resource providers.
678 """
679 # Create three compute node providers with VCPU, RAM and local disk
680 for name in ('cn1', 'cn2', 'cn3'):
681 cn = self._create_provider(name)
682 tb.add_inventory(cn, fields.ResourceClass.VCPU, 24,
683 allocation_ratio=16.0)
684 tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768,
685 min_unit=64, step_size=64, allocation_ratio=1.5)
686 total_gb = 1000 if name == 'cn3' else 2000
687 tb.add_inventory(cn, fields.ResourceClass.DISK_GB, total_gb,
688 reserved=100, min_unit=10, step_size=10,
689 allocation_ratio=1.0)
690
691 # Ask for just one candidate.
692 limit = 1
693 alloc_cands = self._get_allocation_candidates(limit=limit)
694 allocation_requests = alloc_cands.allocation_requests
695 self.assertEqual(limit, len(allocation_requests))
696
697 # provider summaries should have only one rp
698 self.assertEqual(limit, len(alloc_cands.provider_summaries))
699
700 # Do it again, with conf set to randomize. We can't confirm the
701 # random-ness but we can be sure the code path doesn't explode.
702 CONF.set_override('randomize_allocation_candidates', True,
703 group='placement')
704
705 # Ask for two candidates.
706 limit = 2
707 alloc_cands = self._get_allocation_candidates(limit=limit)
708 allocation_requests = alloc_cands.allocation_requests
709 self.assertEqual(limit, len(allocation_requests))
710
711 # provider summaries should have two rps
712 self.assertEqual(limit, len(alloc_cands.provider_summaries))
713
714 # Do it again, asking for more than are available.
715 limit = 5
716 # We still only expect 2 because cn3 does not match default requests.
717 expected_length = 2
718 alloc_cands = self._get_allocation_candidates(limit=limit)
719 allocation_requests = alloc_cands.allocation_requests
720 self.assertEqual(expected_length, len(allocation_requests))
721
722 # provider summaries should have two rps
723 self.assertEqual(expected_length, len(alloc_cands.provider_summaries))
724
725 def test_local_with_shared_disk(self):
726 """Create some resource providers that can satisfy the request for
727 resources with local VCPU and MEMORY_MB but rely on a shared storage
728 pool to satisfy DISK_GB and verify that the allocation requests
729 returned by AllocationCandidates have DISK_GB served up by the shared
730 storage pool resource provider and VCPU/MEMORY_MB by the compute node
731 providers
732 """
733 # Create two compute node providers with VCPU, RAM and NO local disk,
734 # associated with the aggregate.
735 cn1, cn2 = (self._create_provider(name, uuids.agg)
736 for name in ('cn1', 'cn2'))
737 for cn in (cn1, cn2):
738 tb.add_inventory(cn, fields.ResourceClass.VCPU, 24,
739 allocation_ratio=16.0)
740 tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 1024,
741 min_unit=64, allocation_ratio=1.5)
742
743 # Create the shared storage pool, asociated with the same aggregate
744 ss = self._create_provider('shared storage', uuids.agg)
745
746 # Give the shared storage pool some inventory of DISK_GB
747 tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000, reserved=100,
748 min_unit=10)
749
750 # Mark the shared storage pool as having inventory shared among any
751 # provider associated via aggregate
752 tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE")
753
754 # Ask for the alternative placement possibilities and verify each
755 # compute node provider is listed in the allocation requests as well as
756 # the shared storage pool provider
757 alloc_cands = self._get_allocation_candidates()
758
759 # Verify the provider summary information indicates 0 usage and
760 # capacity calculated from above inventory numbers for both compute
761 # nodes and the shared provider.
762 expected = {
763 'cn1': set([
764 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
765 (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0),
766 ]),
767 'cn2': set([
768 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
769 (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0),
770 ]),
771 'shared storage': set([
772 (fields.ResourceClass.DISK_GB, 2000 - 100, 0)
773 ]),
774 }
775 self._validate_provider_summary_resources(expected, alloc_cands)
776
777 # Verify the allocation requests that are returned. There should be 2
778 # allocation requests, one for each compute node, containing 3
779 # resources in each allocation request, one each for VCPU, RAM, and
780 # disk. The amounts of the requests should correspond to the requested
781 # resource amounts in the filter:resources dict passed to
782 # AllocationCandidates.get_by_requests(). The providers for VCPU and
783 # MEMORY_MB should be the compute nodes while the provider for the
784 # DISK_GB should be the shared storage pool
785 expected = [
786 [('cn1', fields.ResourceClass.VCPU, 1),
787 ('cn1', fields.ResourceClass.MEMORY_MB, 64),
788 ('shared storage', fields.ResourceClass.DISK_GB, 1500)],
789 [('cn2', fields.ResourceClass.VCPU, 1),
790 ('cn2', fields.ResourceClass.MEMORY_MB, 64),
791 ('shared storage', fields.ResourceClass.DISK_GB, 1500)],
792 ]
793 self._validate_allocation_requests(expected, alloc_cands)
794
795 # Test for bug #1705071. We query for allocation candidates with a
796 # request for ONLY the DISK_GB (the resource that is shared with
797 # compute nodes) and no VCPU/MEMORY_MB. Before the fix for bug
798 # #1705071, this resulted in a KeyError
799
800 alloc_cands = self._get_allocation_candidates(
801 requests={'': placement_lib.RequestGroup(
802 use_same_provider=False,
803 resources={
804 'DISK_GB': 10,
805 }
806 )}
807 )
808
809 # We should only have provider summary information for the sharing
810 # storage provider, since that's the only provider that can be
811 # allocated against for this request. In the future, we may look into
812 # returning the shared-with providers in the provider summaries, but
813 # that's a distant possibility.
814 expected = {
815 'shared storage': set([
816 (fields.ResourceClass.DISK_GB, 2000 - 100, 0),
817 ]),
818 }
819 self._validate_provider_summary_resources(expected, alloc_cands)
820
821 # The allocation_requests will only include the shared storage
822 # provider because the only thing we're requesting to allocate is
823 # against the provider of DISK_GB, which happens to be the shared
824 # storage provider.
825 expected = [[('shared storage', fields.ResourceClass.DISK_GB, 10)]]
826 self._validate_allocation_requests(expected, alloc_cands)
827
828 # Now we're going to add a set of required traits into the request mix.
829 # To start off, let's request a required trait that we know has not
830 # been associated yet with any provider, and ensure we get no results
831 alloc_cands = self._get_allocation_candidates(
832 {'': placement_lib.RequestGroup(
833 use_same_provider=False,
834 resources=self.requested_resources,
835 required_traits=set([os_traits.HW_CPU_X86_AVX2]),
836 )}
837 )
838
839 # We have not yet associated the AVX2 trait to any provider, so we
840 # should get zero allocation candidates
841 p_sums = alloc_cands.provider_summaries
842 self.assertEqual(0, len(p_sums))
843
844 # Now, if we then associate the required trait with both of our compute
845 # nodes, we should get back both compute nodes since they both now
846 # satisfy the required traits as well as the resource request
847 avx2_t = rp_obj.Trait.get_by_name(self.ctx, os_traits.HW_CPU_X86_AVX2)
848 cn1.set_traits([avx2_t])
849 cn2.set_traits([avx2_t])
850
851 alloc_cands = self._get_allocation_candidates(
852 {'': placement_lib.RequestGroup(
853 use_same_provider=False,
854 resources=self.requested_resources,
855 required_traits=set([os_traits.HW_CPU_X86_AVX2]),
856 )}
857 )
858
859 # There should be 2 compute node providers and 1 shared storage
860 # provider in the summaries.
861 expected = {
862 'cn1': set([
863 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
864 (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0),
865 ]),
866 'cn2': set([
867 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
868 (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0),
869 ]),
870 'shared storage': set([
871 (fields.ResourceClass.DISK_GB, 2000 - 100, 0)
872 ]),
873 }
874 self._validate_provider_summary_resources(expected, alloc_cands)
875
876 # Let's check that the traits listed for the compute nodes include the
877 # AVX2 trait, and the shared storage provider in the provider summaries
878 # does NOT have the AVX2 trait.
879 expected = {
880 'cn1': set(['HW_CPU_X86_AVX2']),
881 'cn2': set(['HW_CPU_X86_AVX2']),
882 'shared storage': set(['MISC_SHARES_VIA_AGGREGATE']),
883 }
884 self._validate_provider_summary_traits(expected, alloc_cands)
885
886 # Forbid the AVX2 trait
887 alloc_cands = self._get_allocation_candidates(
888 {'': placement_lib.RequestGroup(
889 use_same_provider=False,
890 resources=self.requested_resources,
891 forbidden_traits=set([os_traits.HW_CPU_X86_AVX2]),
892 )}
893 )
894 # Should be no results as both cn1 and cn2 have the trait.
895 expected = []
896 self._validate_allocation_requests(expected, alloc_cands)
897
898 # Require the AVX2 trait but forbid CUSTOM_EXTRA_FASTER, which is
899 # added to cn2
900 tb.set_traits(cn2, 'CUSTOM_EXTRA_FASTER')
901 alloc_cands = self._get_allocation_candidates(
902 {'': placement_lib.RequestGroup(
903 use_same_provider=False,
904 resources=self.requested_resources,
905 required_traits=set([os_traits.HW_CPU_X86_AVX2]),
906 forbidden_traits=set(['CUSTOM_EXTRA_FASTER']),
907 )}
908 )
909 expected = [
910 [('cn1', fields.ResourceClass.VCPU, 1),
911 ('cn1', fields.ResourceClass.MEMORY_MB, 64),
912 ('shared storage', fields.ResourceClass.DISK_GB, 1500)],
913 ]
914 self._validate_allocation_requests(expected, alloc_cands)
915
916 # Add disk to cn1, forbid sharing, and require the AVX2 trait.
917 # This should result in getting only cn1.
918 tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2048,
919 allocation_ratio=1.5)
920 alloc_cands = self._get_allocation_candidates(
921 {'': placement_lib.RequestGroup(
922 use_same_provider=False,
923 resources=self.requested_resources,
924 required_traits=set([os_traits.HW_CPU_X86_AVX2]),
925 forbidden_traits=set(['MISC_SHARES_VIA_AGGREGATE']),
926 )}
927 )
928 expected = [
929 [('cn1', fields.ResourceClass.VCPU, 1),
930 ('cn1', fields.ResourceClass.MEMORY_MB, 64),
931 ('cn1', fields.ResourceClass.DISK_GB, 1500)],
932 ]
933 self._validate_allocation_requests(expected, alloc_cands)
934
935 def test_local_with_shared_custom_resource(self):
936 """Create some resource providers that can satisfy the request for
937 resources with local VCPU and MEMORY_MB but rely on a shared resource
938 provider to satisfy a custom resource requirement and verify that the
939 allocation requests returned by AllocationCandidates have the custom
940 resource served up by the shared custom resource provider and
941 VCPU/MEMORY_MB by the compute node providers
942 """
943 # The aggregate that will be associated to everything...
944 agg_uuid = uuids.agg
945
946 # Create two compute node providers with VCPU, RAM and NO local
947 # CUSTOM_MAGIC resources, associated with the aggregate.
948 for name in ('cn1', 'cn2'):
949 cn = self._create_provider(name, agg_uuid)
950 tb.add_inventory(cn, fields.ResourceClass.VCPU, 24,
951 allocation_ratio=16.0)
952 tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 1024,
953 min_unit=64, allocation_ratio=1.5)
954
955 # Create a custom resource called MAGIC
956 magic_rc = rp_obj.ResourceClass(
957 self.ctx,
958 name='CUSTOM_MAGIC',
959 )
960 magic_rc.create()
961
962 # Create the shared provider that serves CUSTOM_MAGIC, associated with
963 # the same aggregate
964 magic_p = self._create_provider('shared custom resource provider',
965 agg_uuid)
966 tb.add_inventory(magic_p, magic_rc.name, 2048, reserved=1024,
967 min_unit=10)
968
969 # Mark the magic provider as having inventory shared among any provider
970 # associated via aggregate
971 tb.set_traits(magic_p, "MISC_SHARES_VIA_AGGREGATE")
972
973 # The resources we will request
974 requested_resources = {
975 fields.ResourceClass.VCPU: 1,
976 fields.ResourceClass.MEMORY_MB: 64,
977 magic_rc.name: 512,
978 }
979
980 alloc_cands = self._get_allocation_candidates(
981 requests={'': placement_lib.RequestGroup(
982 use_same_provider=False, resources=requested_resources)})
983
984 # Verify the allocation requests that are returned. There should be 2
985 # allocation requests, one for each compute node, containing 3
986 # resources in each allocation request, one each for VCPU, RAM, and
987 # MAGIC. The amounts of the requests should correspond to the requested
988 # resource amounts in the filter:resources dict passed to
989 # AllocationCandidates.get_by_requests(). The providers for VCPU and
990 # MEMORY_MB should be the compute nodes while the provider for the
991 # MAGIC should be the shared custom resource provider.
992 expected = [
993 [('cn1', fields.ResourceClass.VCPU, 1),
994 ('cn1', fields.ResourceClass.MEMORY_MB, 64),
995 ('shared custom resource provider', magic_rc.name, 512)],
996 [('cn2', fields.ResourceClass.VCPU, 1),
997 ('cn2', fields.ResourceClass.MEMORY_MB, 64),
998 ('shared custom resource provider', magic_rc.name, 512)],
999 ]
1000 self._validate_allocation_requests(expected, alloc_cands)
1001
1002 expected = {
1003 'cn1': set([
1004 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
1005 (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0),
1006 ]),
1007 'cn2': set([
1008 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
1009 (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0),
1010 ]),
1011 'shared custom resource provider': set([
1012 (magic_rc.name, 1024, 0)
1013 ]),
1014 }
1015 self._validate_provider_summary_resources(expected, alloc_cands)
1016
1017 def test_mix_local_and_shared(self):
1018 # Create three compute node providers with VCPU and RAM, but only
1019 # the third compute node has DISK. The first two computes will
1020 # share the storage from the shared storage pool.
1021 cn1, cn2 = (self._create_provider(name, uuids.agg)
1022 for name in ('cn1', 'cn2'))
1023 # cn3 is not associated with the aggregate
1024 cn3 = self._create_provider('cn3')
1025 for cn in (cn1, cn2, cn3):
1026 tb.add_inventory(cn, fields.ResourceClass.VCPU, 24,
1027 allocation_ratio=16.0)
1028 tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 1024,
1029 min_unit=64, allocation_ratio=1.5)
1030 # Only cn3 has disk
1031 tb.add_inventory(cn3, fields.ResourceClass.DISK_GB, 2000,
1032 reserved=100, min_unit=10)
1033
1034 # Create the shared storage pool in the same aggregate as the first two
1035 # compute nodes
1036 ss = self._create_provider('shared storage', uuids.agg)
1037
1038 # Give the shared storage pool some inventory of DISK_GB
1039 tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000, reserved=100,
1040 min_unit=10)
1041
1042 tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE")
1043
1044 alloc_cands = self._get_allocation_candidates()
1045
1046 # Expect cn1, cn2, cn3 and ss in the summaries
1047 expected = {
1048 'cn1': set([
1049 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
1050 (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0),
1051 ]),
1052 'cn2': set([
1053 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
1054 (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0),
1055 ]),
1056 'cn3': set([
1057 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
1058 (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0),
1059 (fields.ResourceClass.DISK_GB, 2000 - 100, 0),
1060 ]),
1061 'shared storage': set([
1062 (fields.ResourceClass.DISK_GB, 2000 - 100, 0),
1063 ]),
1064 }
1065 self._validate_provider_summary_resources(expected, alloc_cands)
1066
1067 # Expect three allocation requests: (cn1, ss), (cn2, ss), (cn3)
1068 expected = [
1069 [('cn1', fields.ResourceClass.VCPU, 1),
1070 ('cn1', fields.ResourceClass.MEMORY_MB, 64),
1071 ('shared storage', fields.ResourceClass.DISK_GB, 1500)],
1072 [('cn2', fields.ResourceClass.VCPU, 1),
1073 ('cn2', fields.ResourceClass.MEMORY_MB, 64),
1074 ('shared storage', fields.ResourceClass.DISK_GB, 1500)],
1075 [('cn3', fields.ResourceClass.VCPU, 1),
1076 ('cn3', fields.ResourceClass.MEMORY_MB, 64),
1077 ('cn3', fields.ResourceClass.DISK_GB, 1500)],
1078 ]
1079 self._validate_allocation_requests(expected, alloc_cands)
1080
1081 # Now we're going to add a set of required traits into the request mix.
1082 # To start off, let's request a required trait that we know has not
1083 # been associated yet with any provider, and ensure we get no results
1084 alloc_cands = self._get_allocation_candidates(
1085 {'': placement_lib.RequestGroup(
1086 use_same_provider=False,
1087 resources=self.requested_resources,
1088 required_traits=set([os_traits.HW_CPU_X86_AVX2]),
1089 )}
1090 )
1091
1092 # We have not yet associated the AVX2 trait to any provider, so we
1093 # should get zero allocation candidates
1094 p_sums = alloc_cands.provider_summaries
1095 self.assertEqual(0, len(p_sums))
1096 a_reqs = alloc_cands.allocation_requests
1097 self.assertEqual(0, len(a_reqs))
1098
1099 # Now, if we then associate the required trait with all of our compute
1100 # nodes, we should get back all compute nodes since they all now
1101 # satisfy the required traits as well as the resource request
1102 for cn in (cn1, cn2, cn3):
1103 tb.set_traits(cn, os_traits.HW_CPU_X86_AVX2)
1104
1105 alloc_cands = self._get_allocation_candidates(requests={'':
1106 placement_lib.RequestGroup(
1107 use_same_provider=False,
1108 resources=self.requested_resources,
1109 required_traits=set([os_traits.HW_CPU_X86_AVX2]),
1110 )}
1111 )
1112
1113 # There should be 3 compute node providers and 1 shared storage
1114 # provider in the summaries.
1115 expected = {
1116 'cn1': set([
1117 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
1118 (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0),
1119 ]),
1120 'cn2': set([
1121 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
1122 (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0),
1123 ]),
1124 'cn3': set([
1125 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
1126 (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0),
1127 (fields.ResourceClass.DISK_GB, 2000 - 100, 0),
1128 ]),
1129 'shared storage': set([
1130 (fields.ResourceClass.DISK_GB, 2000 - 100, 0),
1131 ]),
1132 }
1133 self._validate_provider_summary_resources(expected, alloc_cands)
1134
1135 # Let's check that the traits listed for the compute nodes include the
1136 # AVX2 trait, and the shared storage provider in the provider summaries
1137 # does NOT have the AVX2 trait
1138 expected = {
1139 'cn1': set(['HW_CPU_X86_AVX2']),
1140 'cn2': set(['HW_CPU_X86_AVX2']),
1141 'cn3': set(['HW_CPU_X86_AVX2']),
1142 'shared storage': set(['MISC_SHARES_VIA_AGGREGATE']),
1143 }
1144 self._validate_provider_summary_traits(expected, alloc_cands)
1145
1146 # Now, let's add a new wrinkle to the equation and add a required trait
1147 # that will ONLY be satisfied by a compute node with local disk that
1148 # has SSD drives. Set this trait only on the compute node with local
1149 # disk (cn3)
1150 tb.set_traits(cn3, os_traits.HW_CPU_X86_AVX2,
1151 os_traits.STORAGE_DISK_SSD)
1152
1153 alloc_cands = self._get_allocation_candidates(
1154 {'':
1155 placement_lib.RequestGroup(
1156 use_same_provider=False,
1157 resources=self.requested_resources,
1158 required_traits=set([
1159 os_traits.HW_CPU_X86_AVX2, os_traits.STORAGE_DISK_SSD
1160 ]),
1161 )}
1162 )
1163
1164 # There should be only cn3 in the returned allocation candidates
1165 expected = [
1166 [('cn3', fields.ResourceClass.VCPU, 1),
1167 ('cn3', fields.ResourceClass.MEMORY_MB, 64),
1168 ('cn3', fields.ResourceClass.DISK_GB, 1500)],
1169 ]
1170 self._validate_allocation_requests(expected, alloc_cands)
1171
1172 expected = {
1173 'cn3': set([
1174 (fields.ResourceClass.VCPU, 24 * 16.0, 0),
1175 (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0),
1176 (fields.ResourceClass.DISK_GB, 2000 - 100, 0),
1177 ]),
1178 }
1179 self._validate_provider_summary_resources(expected, alloc_cands)
1180
1181 expected = {
1182 'cn3': set(['HW_CPU_X86_AVX2', 'STORAGE_DISK_SSD'])
1183 }
1184 self._validate_provider_summary_traits(expected, alloc_cands)
1185
1186 def test_common_rc(self):
1187 """Candidates when cn and shared have inventory in the same class."""
1188 cn = self._create_provider('cn', uuids.agg1)
1189 tb.add_inventory(cn, fields.ResourceClass.VCPU, 24)
1190 tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 2048)
1191 tb.add_inventory(cn, fields.ResourceClass.DISK_GB, 1600)
1192
1193 ss = self._create_provider('ss', uuids.agg1)
1194 tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE")
1195 tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000)
1196
1197 alloc_cands = self._get_allocation_candidates()
1198
1199 # One allocation_request should have cn + ss; the other should have
1200 # just the cn.
1201 expected = [
1202 [('cn', fields.ResourceClass.VCPU, 1),
1203 ('cn', fields.ResourceClass.MEMORY_MB, 64),
1204 ('cn', fields.ResourceClass.DISK_GB, 1500)],
1205 [('cn', fields.ResourceClass.VCPU, 1),
1206 ('cn', fields.ResourceClass.MEMORY_MB, 64),
1207 ('ss', fields.ResourceClass.DISK_GB, 1500)],
1208 ]
1209
1210 self._validate_allocation_requests(expected, alloc_cands)
1211
1212 expected = {
1213 'cn': set([
1214 (fields.ResourceClass.VCPU, 24, 0),
1215 (fields.ResourceClass.MEMORY_MB, 2048, 0),
1216 (fields.ResourceClass.DISK_GB, 1600, 0),
1217 ]),
1218 'ss': set([
1219 (fields.ResourceClass.DISK_GB, 2000, 0),
1220 ]),
1221 }
1222 self._validate_provider_summary_resources(expected, alloc_cands)
1223
1224 # Next let's increase the requested DISK_GB
1225 requested_resources = {
1226 fields.ResourceClass.VCPU: 1,
1227 fields.ResourceClass.MEMORY_MB: 64,
1228 fields.ResourceClass.DISK_GB: 1800,
1229 }
1230 alloc_cands = self._get_allocation_candidates(
1231 {'': placement_lib.RequestGroup(
1232 use_same_provider=False,
1233 resources=requested_resources,
1234 )}
1235 )
1236
1237 expected = [
1238 [('cn', fields.ResourceClass.VCPU, 1),
1239 ('cn', fields.ResourceClass.MEMORY_MB, 64),
1240 ('ss', fields.ResourceClass.DISK_GB, 1800)],
1241 ]
1242
1243 self._validate_allocation_requests(expected, alloc_cands)
1244
1245 expected = {
1246 'cn': set([
1247 (fields.ResourceClass.VCPU, 24, 0),
1248 (fields.ResourceClass.MEMORY_MB, 2048, 0),
1249 (fields.ResourceClass.DISK_GB, 1600, 0),
1250 ]),
1251 'ss': set([
1252 (fields.ResourceClass.DISK_GB, 2000, 0),
1253 ]),
1254 }
1255 self._validate_provider_summary_resources(expected, alloc_cands)
1256
1257 def test_common_rc_traits_split(self):
1258 """Validate filters when traits are split across cn and shared RPs."""
1259 # NOTE(efried): This test case only applies to the scenario where we're
1260 # requesting resources via the RequestGroup where
1261 # use_same_provider=False
1262
1263 cn = self._create_provider('cn', uuids.agg1)
1264 tb.add_inventory(cn, fields.ResourceClass.VCPU, 24)
1265 tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 2048)
1266 tb.add_inventory(cn, fields.ResourceClass.DISK_GB, 1600)
1267 # The compute node's disk is SSD
1268 tb.set_traits(cn, 'HW_CPU_X86_SSE', 'STORAGE_DISK_SSD')
1269
1270 ss = self._create_provider('ss', uuids.agg1)
1271 tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 1600)
1272 # The shared storage's disk is RAID
1273 tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_RAID')
1274
1275 alloc_cands = self._get_allocation_candidates(
1276 {'': placement_lib.RequestGroup(
1277 use_same_provider=False,
1278 resources=self.requested_resources,
1279 required_traits=set(['HW_CPU_X86_SSE', 'STORAGE_DISK_SSD',
1280 'CUSTOM_RAID'])
1281 )}
1282 )
1283
1284 # TODO(efried): Bug #1724633: we'd *like* to get no candidates, because
1285 # there's no single DISK_GB resource with both STORAGE_DISK_SSD and
1286 # CUSTOM_RAID traits.
1287 # expected = []
1288 expected = [
1289 [('cn', fields.ResourceClass.VCPU, 1),
1290 ('cn', fields.ResourceClass.MEMORY_MB, 64),
1291 ('ss', fields.ResourceClass.DISK_GB, 1500)],
1292 ]
1293 self._validate_allocation_requests(expected, alloc_cands)
1294
1295 # expected = {}
1296 expected = {
1297 'cn': set([
1298 (fields.ResourceClass.VCPU, 24, 0),
1299 (fields.ResourceClass.MEMORY_MB, 2048, 0),
1300 (fields.ResourceClass.DISK_GB, 1600, 0),
1301 ]),
1302 'ss': set([
1303 (fields.ResourceClass.DISK_GB, 1600, 0),
1304 ]),
1305 }
1306 self._validate_provider_summary_resources(expected, alloc_cands)
1307
1308 def test_only_one_sharing_provider(self):
1309 ss1 = self._create_provider('ss1', uuids.agg1)
1310 tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
1311 tb.add_inventory(ss1, fields.ResourceClass.IPV4_ADDRESS, 24)
1312 tb.add_inventory(ss1, fields.ResourceClass.SRIOV_NET_VF, 16)
1313 tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600)
1314
1315 alloc_cands = self._get_allocation_candidates(
1316 {'': placement_lib.RequestGroup(
1317 use_same_provider=False,
1318 resources={
1319 'IPV4_ADDRESS': 2,
1320 'SRIOV_NET_VF': 1,
1321 'DISK_GB': 1500,
1322 }
1323 )}
1324 )
1325
1326 expected = [
1327 [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2),
1328 ('ss1', fields.ResourceClass.SRIOV_NET_VF, 1),
1329 ('ss1', fields.ResourceClass.DISK_GB, 1500)]
1330 ]
1331 self._validate_allocation_requests(expected, alloc_cands)
1332
1333 expected = {
1334 'ss1': set([
1335 (fields.ResourceClass.IPV4_ADDRESS, 24, 0),
1336 (fields.ResourceClass.SRIOV_NET_VF, 16, 0),
1337 (fields.ResourceClass.DISK_GB, 1600, 0),
1338 ]),
1339 }
1340 self._validate_provider_summary_resources(expected, alloc_cands)
1341
1342 def test_all_sharing_providers_no_rc_overlap(self):
1343 ss1 = self._create_provider('ss1', uuids.agg1)
1344 tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
1345 tb.add_inventory(ss1, fields.ResourceClass.IPV4_ADDRESS, 24)
1346
1347 ss2 = self._create_provider('ss2', uuids.agg1)
1348 tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
1349 tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600)
1350
1351 alloc_cands = self._get_allocation_candidates(
1352 {'': placement_lib.RequestGroup(
1353 use_same_provider=False,
1354 resources={
1355 'IPV4_ADDRESS': 2,
1356 'DISK_GB': 1500,
1357 }
1358 )}
1359 )
1360
1361 expected = [
1362 [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2),
1363 ('ss2', fields.ResourceClass.DISK_GB, 1500)],
1364 ]
1365 self._validate_allocation_requests(expected, alloc_cands)
1366
1367 expected = {
1368 'ss1': set([
1369 (fields.ResourceClass.IPV4_ADDRESS, 24, 0),
1370 ]),
1371 'ss2': set([
1372 (fields.ResourceClass.DISK_GB, 1600, 0),
1373 ]),
1374 }
1375 self._validate_provider_summary_resources(expected, alloc_cands)
1376
1377 def test_all_sharing_providers_no_rc_overlap_more_classes(self):
1378 ss1 = self._create_provider('ss1', uuids.agg1)
1379 tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
1380 tb.add_inventory(ss1, fields.ResourceClass.IPV4_ADDRESS, 24)
1381 tb.add_inventory(ss1, fields.ResourceClass.SRIOV_NET_VF, 16)
1382
1383 ss2 = self._create_provider('ss2', uuids.agg1)
1384 tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
1385 tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600)
1386
1387 alloc_cands = self._get_allocation_candidates(
1388 {'': placement_lib.RequestGroup(
1389 use_same_provider=False,
1390 resources={
1391 'IPV4_ADDRESS': 2,
1392 'SRIOV_NET_VF': 1,
1393 'DISK_GB': 1500,
1394 }
1395 )}
1396 )
1397
1398 expected = [
1399 [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2),
1400 ('ss1', fields.ResourceClass.SRIOV_NET_VF, 1),
1401 ('ss2', fields.ResourceClass.DISK_GB, 1500)]
1402 ]
1403 self._validate_allocation_requests(expected, alloc_cands)
1404
1405 expected = {
1406 'ss1': set([
1407 (fields.ResourceClass.IPV4_ADDRESS, 24, 0),
1408 (fields.ResourceClass.SRIOV_NET_VF, 16, 0)
1409 ]),
1410 'ss2': set([
1411 (fields.ResourceClass.DISK_GB, 1600, 0),
1412 ]),
1413 }
1414 self._validate_provider_summary_resources(expected, alloc_cands)
1415
1416 def test_all_sharing_providers(self):
1417 ss1 = self._create_provider('ss1', uuids.agg1)
1418 tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
1419 tb.add_inventory(ss1, fields.ResourceClass.IPV4_ADDRESS, 24)
1420 tb.add_inventory(ss1, fields.ResourceClass.SRIOV_NET_VF, 16)
1421 tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600)
1422
1423 ss2 = self._create_provider('ss2', uuids.agg1)
1424 tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
1425 tb.add_inventory(ss2, fields.ResourceClass.SRIOV_NET_VF, 16)
1426 tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600)
1427
1428 alloc_cands = self._get_allocation_candidates(requests={
1429 '': placement_lib.RequestGroup(
1430 use_same_provider=False,
1431 resources={
1432 'IPV4_ADDRESS': 2,
1433 'SRIOV_NET_VF': 1,
1434 'DISK_GB': 1500,
1435 }
1436 )}
1437 )
1438
1439 # We expect four candidates:
1440 # - gets all the resources from ss1,
1441 # - gets the SRIOV_NET_VF from ss2 and the rest from ss1,
1442 # - gets the DISK_GB from ss2 and the rest from ss1,
1443 # - gets SRIOV_NET_VF and DISK_GB from ss2 and rest from ss1
1444 expected = [
1445 [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2),
1446 ('ss1', fields.ResourceClass.SRIOV_NET_VF, 1),
1447 ('ss1', fields.ResourceClass.DISK_GB, 1500)],
1448 [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2),
1449 ('ss1', fields.ResourceClass.SRIOV_NET_VF, 1),
1450 ('ss2', fields.ResourceClass.DISK_GB, 1500)],
1451 [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2),
1452 ('ss2', fields.ResourceClass.SRIOV_NET_VF, 1),
1453 ('ss1', fields.ResourceClass.DISK_GB, 1500)],
1454 [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2),
1455 ('ss2', fields.ResourceClass.SRIOV_NET_VF, 1),
1456 ('ss2', fields.ResourceClass.DISK_GB, 1500)],
1457 ]
1458 self._validate_allocation_requests(expected, alloc_cands)
1459
1460 expected = {
1461 'ss1': set([
1462 (fields.ResourceClass.IPV4_ADDRESS, 24, 0),
1463 (fields.ResourceClass.SRIOV_NET_VF, 16, 0),
1464 (fields.ResourceClass.DISK_GB, 1600, 0)
1465 ]),
1466 'ss2': set([
1467 (fields.ResourceClass.SRIOV_NET_VF, 16, 0),
1468 (fields.ResourceClass.DISK_GB, 1600, 0),
1469 ]),
1470 }
1471 self._validate_provider_summary_resources(expected, alloc_cands)
1472
1473 def test_two_non_sharing_connect_to_one_sharing_different_aggregate(self):
1474 # Covering the following setup:
1475 #
1476 # CN1 (VCPU) CN2 (VCPU)
1477 # \ agg1 / agg2
1478 # SS1 (DISK_GB)
1479 #
1480 # It is different from test_mix_local_and_shared as it uses two
1481 # different aggregates to connect the two CNs to the share RP
1482 cn1 = self._create_provider('cn1', uuids.agg1)
1483 tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24)
1484 tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048)
1485
1486 cn2 = self._create_provider('cn2', uuids.agg2)
1487 tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24)
1488 tb.add_inventory(cn2, fields.ResourceClass.MEMORY_MB, 2048)
1489
1490 ss1 = self._create_provider('ss1', uuids.agg1, uuids.agg2)
1491 tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
1492 tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600)
1493
1494 alloc_cands = self._get_allocation_candidates(
1495 {'': placement_lib.RequestGroup(
1496 use_same_provider=False,
1497 resources={
1498 'VCPU': 2,
1499 'DISK_GB': 1500,
1500 }
1501 )}
1502 )
1503 expected = [
1504 [('cn1', fields.ResourceClass.VCPU, 2),
1505 ('ss1', fields.ResourceClass.DISK_GB, 1500)],
1506 [('cn2', fields.ResourceClass.VCPU, 2),
1507 ('ss1', fields.ResourceClass.DISK_GB, 1500)],
1508 ]
1509 self._validate_allocation_requests(expected, alloc_cands)
1510
1511 expected = {
1512 'cn1': set([
1513 (fields.ResourceClass.VCPU, 24, 0),
1514 (fields.ResourceClass.MEMORY_MB, 2048, 0),
1515 ]),
1516 'cn2': set([
1517 (fields.ResourceClass.VCPU, 24, 0),
1518 (fields.ResourceClass.MEMORY_MB, 2048, 0),
1519 ]),
1520 'ss1': set([
1521 (fields.ResourceClass.DISK_GB, 1600, 0),
1522 ]),
1523 }
1524 self._validate_provider_summary_resources(expected, alloc_cands)
1525
1526 def test_two_non_sharing_one_common_and_two_unique_sharing(self):
1527 # Covering the following setup:
1528 #
1529 # CN1 (VCPU) CN2 (VCPU)
1530 # / agg3 \ agg1 / agg1 \ agg2
1531 # SS3 (IPV4) SS1 (DISK_GB) SS2 (IPV4)
1532 cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg3)
1533 tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24)
1534 tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048)
1535
1536 cn2 = self._create_provider('cn2', uuids.agg1, uuids.agg2)
1537 tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24)
1538 tb.add_inventory(cn2, fields.ResourceClass.MEMORY_MB, 2048)
1539
1540 # ss1 is connected to both cn1 and cn2
1541 ss1 = self._create_provider('ss1', uuids.agg1)
1542 tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
1543 tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600)
1544
1545 # ss2 only connected to cn2
1546 ss2 = self._create_provider('ss2', uuids.agg2)
1547 tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
1548 tb.add_inventory(ss2, fields.ResourceClass.IPV4_ADDRESS, 24)
1549
1550 # ss3 only connected to cn1
1551 ss3 = self._create_provider('ss3', uuids.agg3)
1552 tb.set_traits(ss3, "MISC_SHARES_VIA_AGGREGATE")
1553 tb.add_inventory(ss3, fields.ResourceClass.IPV4_ADDRESS, 24)
1554
1555 alloc_cands = self._get_allocation_candidates(
1556 {'': placement_lib.RequestGroup(
1557 use_same_provider=False,
1558 resources={
1559 'VCPU': 2,
1560 'DISK_GB': 1500,
1561 'IPV4_ADDRESS': 2,
1562 }
1563 )}
1564 )
1565
1566 expected = [
1567 [('cn1', fields.ResourceClass.VCPU, 2),
1568 ('ss1', fields.ResourceClass.DISK_GB, 1500),
1569 ('ss3', fields.ResourceClass.IPV4_ADDRESS, 2)],
1570 [('cn2', fields.ResourceClass.VCPU, 2),
1571 ('ss1', fields.ResourceClass.DISK_GB, 1500),
1572 ('ss2', fields.ResourceClass.IPV4_ADDRESS, 2)],
1573 ]
1574 self._validate_allocation_requests(expected, alloc_cands)
1575
1576 expected = {
1577 'cn1': set([
1578 (fields.ResourceClass.VCPU, 24, 0),
1579 (fields.ResourceClass.MEMORY_MB, 2048, 0),
1580 ]),
1581 'cn2': set([
1582 (fields.ResourceClass.VCPU, 24, 0),
1583 (fields.ResourceClass.MEMORY_MB, 2048, 0),
1584 ]),
1585 'ss1': set([
1586 (fields.ResourceClass.DISK_GB, 1600, 0),
1587 ]),
1588 'ss2': set([
1589 (fields.ResourceClass.IPV4_ADDRESS, 24, 0),
1590 ]),
1591 'ss3': set([
1592 (fields.ResourceClass.IPV4_ADDRESS, 24, 0),
1593 ]),
1594 }
1595 self._validate_provider_summary_resources(expected, alloc_cands)
1596
1597 def test_rc_not_split_between_sharing_and_non_sharing(self):
1598 # cn1(VCPU,MEM) Non-sharing RP with some of the resources
1599 # | agg1 aggregated with
1600 # ss1(DISK) sharing RP that has the rest of the resources
1601 #
1602 # cn2(VCPU) Non-sharing with one of the resources;
1603 # / agg2 \ aggregated with multiple sharing providers
1604 # ss2_1(MEM) ss2_2(DISK) with different resources.
1605
1606 cn1 = self._create_provider('cn1', uuids.agg1)
1607 tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24)
1608 tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048)
1609 ss1 = self._create_provider('ss1', uuids.agg1)
1610 tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 2000)
1611 tb.set_traits(ss1, 'MISC_SHARES_VIA_AGGREGATE')
1612
1613 cn2 = self._create_provider('cn2', uuids.agg2)
1614 tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24)
1615 ss2_1 = self._create_provider('ss2_1', uuids.agg2)
1616 tb.add_inventory(ss2_1, fields.ResourceClass.MEMORY_MB, 2048)
1617 tb.set_traits(ss2_1, 'MISC_SHARES_VIA_AGGREGATE')
1618 ss2_2 = self._create_provider('ss2_2', uuids.agg2)
1619 tb.add_inventory(ss2_2, fields.ResourceClass.DISK_GB, 2000)
1620 tb.set_traits(ss2_2, 'MISC_SHARES_VIA_AGGREGATE')
1621
1622 alloc_cands = self._get_allocation_candidates()
1623 expected = [
1624 [('cn1', fields.ResourceClass.VCPU, 1),
1625 ('cn1', fields.ResourceClass.MEMORY_MB, 64),
1626 ('ss1', fields.ResourceClass.DISK_GB, 1500)],
1627 [('cn2', fields.ResourceClass.VCPU, 1),
1628 ('ss2_1', fields.ResourceClass.MEMORY_MB, 64),
1629 ('ss2_2', fields.ResourceClass.DISK_GB, 1500)],
1630 ]
1631
1632 self._validate_allocation_requests(expected, alloc_cands)
1633
1634 expected = {
1635 'cn1': set([
1636 (fields.ResourceClass.VCPU, 24, 0),
1637 (fields.ResourceClass.MEMORY_MB, 2048, 0),
1638 ]),
1639 'ss1': set([
1640 (fields.ResourceClass.DISK_GB, 2000, 0),
1641 ]),
1642 'cn2': set([
1643 (fields.ResourceClass.VCPU, 24, 0),
1644 ]),
1645 'ss2_1': set([
1646 (fields.ResourceClass.MEMORY_MB, 2048, 0),
1647 ]),
1648 'ss2_2': set([
1649 (fields.ResourceClass.DISK_GB, 2000, 0),
1650 ]),
1651 }
1652 self._validate_provider_summary_resources(expected, alloc_cands)
1653
1654 def test_multiple_sharing_providers_with_same_rc(self):
1655 # cn1(VCPU,MEM) Non-sharing with some of the resources;
1656 # / agg1 \ aggregated with multiple sharing providers
1657 # ss1_1(DISK) ss1_2(DISK) with the same resource.
1658 #
1659 # cn2(VCPU) Non-sharing with one of the resources;
1660 # / agg2 \ aggregated with multiple sharing providers
1661 # ss2_1(MEM) ss2_2(DISK) with different resources.
1662
1663 cn1 = self._create_provider('cn1', uuids.agg1)
1664 tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24)
1665 tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048)
1666 ss1_1 = self._create_provider('ss1_1', uuids.agg1)
1667 tb.add_inventory(ss1_1, fields.ResourceClass.DISK_GB, 2000)
1668 tb.set_traits(ss1_1, 'MISC_SHARES_VIA_AGGREGATE')
1669 ss1_2 = self._create_provider('ss1_2', uuids.agg1)
1670 tb.add_inventory(ss1_2, fields.ResourceClass.DISK_GB, 2000)
1671 tb.set_traits(ss1_2, 'MISC_SHARES_VIA_AGGREGATE')
1672
1673 cn2 = self._create_provider('cn2', uuids.agg2)
1674 tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24)
1675 ss2_1 = self._create_provider('ss2_1', uuids.agg2)
1676 tb.add_inventory(ss2_1, fields.ResourceClass.MEMORY_MB, 2048)
1677 tb.set_traits(ss2_1, 'MISC_SHARES_VIA_AGGREGATE')
1678 ss2_2 = self._create_provider('ss2_2', uuids.agg2)
1679 tb.add_inventory(ss2_2, fields.ResourceClass.DISK_GB, 2000)
1680 tb.set_traits(ss2_2, 'MISC_SHARES_VIA_AGGREGATE')
1681
1682 alloc_cands = self._get_allocation_candidates()
1683 expected = [
1684 [('cn1', fields.ResourceClass.VCPU, 1),
1685 ('cn1', fields.ResourceClass.MEMORY_MB, 64),
1686 ('ss1_1', fields.ResourceClass.DISK_GB, 1500)],
1687 [('cn1', fields.ResourceClass.VCPU, 1),
1688 ('cn1', fields.ResourceClass.MEMORY_MB, 64),
1689 ('ss1_2', fields.ResourceClass.DISK_GB, 1500)],
1690 [('cn2', fields.ResourceClass.VCPU, 1),
1691 ('ss2_1', fields.ResourceClass.MEMORY_MB, 64),
1692 ('ss2_2', fields.ResourceClass.DISK_GB, 1500)],
1693 ]
1694 self._validate_allocation_requests(expected, alloc_cands)
1695
1696 expected = {
1697 'cn1': set([
1698 (fields.ResourceClass.VCPU, 24, 0),
1699 (fields.ResourceClass.MEMORY_MB, 2048, 0),
1700 ]),
1701 'ss1_1': set([
1702 (fields.ResourceClass.DISK_GB, 2000, 0),
1703 ]),
1704 'ss1_2': set([
1705 (fields.ResourceClass.DISK_GB, 2000, 0),
1706 ]),
1707 'cn2': set([
1708 (fields.ResourceClass.VCPU, 24, 0),
1709 ]),
1710 'ss2_1': set([
1711 (fields.ResourceClass.MEMORY_MB, 2048, 0),
1712 ]),
1713 'ss2_2': set([
1714 (fields.ResourceClass.DISK_GB, 2000, 0),
1715 ]),
1716 }
1717 self._validate_provider_summary_resources(expected, alloc_cands)
1718
1719 def test_sharing_providers_member_of(self):
1720 # Covering the following setup:
1721 #
1722 # CN1 (VCPU, DISK_GB) CN2 (VCPU, DISK_GB)
1723 # / agg1 \ agg2 / agg2 \ agg3
1724 # SS1 (DISK_GB) SS2 (DISK_GB) SS3 (DISK_GB)
1725 cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg2)
1726 tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24)
1727 tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 1600)
1728
1729 cn2 = self._create_provider('cn2', uuids.agg2, uuids.agg3)
1730 tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24)
1731 tb.add_inventory(cn2, fields.ResourceClass.DISK_GB, 1600)
1732
1733 # ss1 is connected to cn1
1734 ss1 = self._create_provider('ss1', uuids.agg1)
1735 tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
1736 tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600)
1737
1738 # ss2 is connected to both cn1 and cn2
1739 ss2 = self._create_provider('ss2', uuids.agg2)
1740 tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
1741 tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600)
1742
1743 # ss3 is connected to cn2
1744 ss3 = self._create_provider('ss3', uuids.agg3)
1745 tb.set_traits(ss3, "MISC_SHARES_VIA_AGGREGATE")
1746 tb.add_inventory(ss3, fields.ResourceClass.DISK_GB, 1600)
1747
1748 # Let's get allocation candidates from agg1
1749 alloc_cands = self._get_allocation_candidates(
1750 {'': placement_lib.RequestGroup(
1751 use_same_provider=False,
1752 resources={
1753 'VCPU': 2,
1754 'DISK_GB': 1500,
1755 },
1756 member_of=[[uuids.agg1]]
1757 )}
1758 )
1759
1760 expected = [
1761 [('cn1', fields.ResourceClass.VCPU, 2),
1762 ('cn1', fields.ResourceClass.DISK_GB, 1500)],
1763 [('cn1', fields.ResourceClass.VCPU, 2),
1764 ('ss1', fields.ResourceClass.DISK_GB, 1500)],
1765 ]
1766 self._validate_allocation_requests(expected, alloc_cands)
1767
1768 expected = {
1769 'cn1': set([
1770 (fields.ResourceClass.VCPU, 24, 0),
1771 (fields.ResourceClass.DISK_GB, 1600, 0),
1772 ]),
1773 'ss1': set([
1774 (fields.ResourceClass.DISK_GB, 1600, 0),
1775 ]),
1776 }
1777 self._validate_provider_summary_resources(expected, alloc_cands)
1778
1779 # Let's get allocation candidates from agg2
1780 alloc_cands = self._get_allocation_candidates(
1781 {'': placement_lib.RequestGroup(
1782 use_same_provider=False,
1783 resources={
1784 'VCPU': 2,
1785 'DISK_GB': 1500,
1786 },
1787 member_of=[[uuids.agg2]]
1788 )}
1789 )
1790
1791 expected = [
1792 [('cn1', fields.ResourceClass.VCPU, 2),
1793 ('cn1', fields.ResourceClass.DISK_GB, 1500)],
1794 [('cn1', fields.ResourceClass.VCPU, 2),
1795 ('ss2', fields.ResourceClass.DISK_GB, 1500)],
1796 [('cn2', fields.ResourceClass.VCPU, 2),
1797 ('cn2', fields.ResourceClass.DISK_GB, 1500)],
1798 [('cn2', fields.ResourceClass.VCPU, 2),
1799 ('ss2', fields.ResourceClass.DISK_GB, 1500)],
1800 ]
1801 self._validate_allocation_requests(expected, alloc_cands)
1802
1803 expected = {
1804 'cn1': set([
1805 (fields.ResourceClass.VCPU, 24, 0),
1806 (fields.ResourceClass.DISK_GB, 1600, 0),
1807 ]),
1808 'cn2': set([
1809 (fields.ResourceClass.VCPU, 24, 0),
1810 (fields.ResourceClass.DISK_GB, 1600, 0),
1811 ]),
1812 'ss2': set([
1813 (fields.ResourceClass.DISK_GB, 1600, 0),
1814 ]),
1815 }
1816 self._validate_provider_summary_resources(expected, alloc_cands)
1817
1818 # Let's move to validate multiple member_of scenario
1819 # The request from agg1 *AND* agg2 would provide only
1820 # resources from cn1 with its local DISK
1821 alloc_cands = self._get_allocation_candidates(
1822 {'': placement_lib.RequestGroup(
1823 use_same_provider=False,
1824 resources={
1825 'VCPU': 2,
1826 'DISK_GB': 1500,
1827 },
1828 member_of=[[uuids.agg1], [uuids.agg2]]
1829 )}
1830 )
1831
1832 expected = [
1833 [('cn1', fields.ResourceClass.VCPU, 2),
1834 ('cn1', fields.ResourceClass.DISK_GB, 1500)],
1835 ]
1836 self._validate_allocation_requests(expected, alloc_cands)
1837
1838 expected = {
1839 'cn1': set([
1840 (fields.ResourceClass.VCPU, 24, 0),
1841 (fields.ResourceClass.DISK_GB, 1600, 0),
1842 ]),
1843 }
1844 self._validate_provider_summary_resources(expected, alloc_cands)
1845
1846 # The request from agg1 *OR* agg2 would provide five candidates
1847 alloc_cands = self._get_allocation_candidates(
1848 {'': placement_lib.RequestGroup(
1849 use_same_provider=False,
1850 resources={
1851 'VCPU': 2,
1852 'DISK_GB': 1500,
1853 },
1854 member_of=[[uuids.agg1, uuids.agg2]]
1855 )}
1856 )
1857
1858 expected = [
1859 [('cn1', fields.ResourceClass.VCPU, 2),
1860 ('cn1', fields.ResourceClass.DISK_GB, 1500)],
1861 [('cn1', fields.ResourceClass.VCPU, 2),
1862 ('ss1', fields.ResourceClass.DISK_GB, 1500)],
1863 [('cn1', fields.ResourceClass.VCPU, 2),
1864 ('ss2', fields.ResourceClass.DISK_GB, 1500)],
1865 [('cn2', fields.ResourceClass.VCPU, 2),
1866 ('cn2', fields.ResourceClass.DISK_GB, 1500)],
1867 [('cn2', fields.ResourceClass.VCPU, 2),
1868 ('ss2', fields.ResourceClass.DISK_GB, 1500)],
1869 ]
1870 self._validate_allocation_requests(expected, alloc_cands)
1871
1872 expected = {
1873 'cn1': set([
1874 (fields.ResourceClass.VCPU, 24, 0),
1875 (fields.ResourceClass.DISK_GB, 1600, 0),
1876 ]),
1877 'cn2': set([
1878 (fields.ResourceClass.VCPU, 24, 0),
1879 (fields.ResourceClass.DISK_GB, 1600, 0),
1880 ]),
1881 'ss1': set([
1882 (fields.ResourceClass.DISK_GB, 1600, 0),
1883 ]),
1884 'ss2': set([
1885 (fields.ResourceClass.DISK_GB, 1600, 0),
1886 ]),
1887 }
1888 self._validate_provider_summary_resources(expected, alloc_cands)
1889
1890 def test_two_sharing_indirectly_connected_connecting_not_give_resource(
1891 self):
1892 # This covers the following setup
1893 # CN1 (VCPU, MEMORY_MB)
1894 # / \
1895 # /agg1 \agg2
1896 # / \
1897 # SS1 ( SS2 (
1898 # DISK_GB) IPV4_ADDRESS
1899 # SRIOV_NET_VF)
1900 # The request then made for resources from the sharing RPs only
1901
1902 ss1 = self._create_provider('ss1', uuids.agg1)
1903 tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
1904 tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600)
1905
1906 cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg2)
1907 tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24)
1908 tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048)
1909
1910 ss2 = self._create_provider('ss2', uuids.agg2)
1911 tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
1912 tb.add_inventory(ss2, fields.ResourceClass.IPV4_ADDRESS, 24)
1913 tb.add_inventory(ss2, fields.ResourceClass.SRIOV_NET_VF, 16)
1914
1915 alloc_cands = self._get_allocation_candidates(
1916 {'': placement_lib.RequestGroup(
1917 use_same_provider=False,
1918 resources={
1919 'IPV4_ADDRESS': 2,
1920 'SRIOV_NET_VF': 1,
1921 'DISK_GB': 1500,
1922 }
1923 )}
1924 )
1925
1926 expected = [
1927 [('ss1', fields.ResourceClass.DISK_GB, 1500),
1928 ('ss2', fields.ResourceClass.IPV4_ADDRESS, 2),
1929 ('ss2', fields.ResourceClass.SRIOV_NET_VF, 1)],
1930 ]
1931 self._validate_allocation_requests(expected, alloc_cands)
1932
1933 expected = {
1934 'ss1': set([
1935 (fields.ResourceClass.DISK_GB, 1600, 0),
1936 ]),
1937 'ss2': set([
1938 (fields.ResourceClass.IPV4_ADDRESS, 24, 0),
1939 (fields.ResourceClass.SRIOV_NET_VF, 16, 0),
1940 ]),
1941 }
1942 self._validate_provider_summary_resources(expected, alloc_cands)
1943
1944 def test_two_sharing_indirectly_connected_connecting_gives_resource(self):
1945 # This covers the following setup
1946 # CN1 (VCPU, MEMORY_MB)
1947 # / \
1948 # /agg1 \agg2
1949 # / \
1950 # SS1 ( SS2 (
1951 # DISK_GB) IPV4_ADDRESS
1952 # SRIOV_NET_VF)
1953 # The request then made for resources from all three RPs
1954
1955 ss1 = self._create_provider('ss1', uuids.agg1)
1956 tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
1957 tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600)
1958
1959 cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg2)
1960 tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24)
1961 tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048)
1962
1963 ss2 = self._create_provider('ss2', uuids.agg2)
1964 tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
1965 tb.add_inventory(ss2, fields.ResourceClass.IPV4_ADDRESS, 24)
1966 tb.add_inventory(ss2, fields.ResourceClass.SRIOV_NET_VF, 16)
1967
1968 alloc_cands = self._get_allocation_candidates(
1969 {'': placement_lib.RequestGroup(
1970 use_same_provider=False,
1971 resources={
1972 'VCPU': 2,
1973 'IPV4_ADDRESS': 2,
1974 'SRIOV_NET_VF': 1,
1975 'DISK_GB': 1500,
1976 }
1977 )}
1978 )
1979
1980 expected = [
1981 [('cn1', fields.ResourceClass.VCPU, 2),
1982 ('ss1', fields.ResourceClass.DISK_GB, 1500),
1983 ('ss2', fields.ResourceClass.IPV4_ADDRESS, 2),
1984 ('ss2', fields.ResourceClass.SRIOV_NET_VF, 1)],
1985 ]
1986 self._validate_allocation_requests(expected, alloc_cands)
1987
1988 expected = {
1989 'cn1': set([
1990 (fields.ResourceClass.VCPU, 24, 0),
1991 (fields.ResourceClass.MEMORY_MB, 2048, 0),
1992 ]),
1993 'ss1': set([
1994 (fields.ResourceClass.DISK_GB, 1600, 0),
1995 ]),
1996 'ss2': set([
1997 (fields.ResourceClass.IPV4_ADDRESS, 24, 0),
1998 (fields.ResourceClass.SRIOV_NET_VF, 16, 0),
1999 ]),
2000 }
2001 self._validate_provider_summary_resources(expected, alloc_cands)
2002
2003 def test_simple_tree_of_providers(self):
2004 """Tests that we properly winnow allocation requests when including
2005 traits in the request group and that the traits appear in the provider
2006 summaries of the returned allocation candidates
2007 """
2008 # We are setting up a single tree that looks like this:
2009 #
2010 # compute node (cn)
2011 # / \
2012 # / \
2013 # numa cell 0 numa cell 1
2014 # | |
2015 # | |
2016 # pf 0 pf 1
2017 #
2018 # The second physical function will be associated with the
2019 # HW_NIC_OFFLOAD_GENEVE trait, but not the first physical function.
2020 #
2021 # We will issue a request to _get_allocation_candidates() for VCPU,
2022 # MEMORY_MB and SRIOV_NET_VF **without** required traits, then include
2023 # a request that includes HW_NIC_OFFLOAD_GENEVE. In the latter case,
2024 # the compute node tree should be returned but the allocation requests
2025 # should only include the second physical function since the required
2026 # trait is only associated with that PF.
2027 #
2028 # Subsequently, we will consume all the SRIOV_NET_VF resources from the
2029 # second PF's inventory and attempt the same request of resources and
2030 # HW_NIC_OFFLOAD_GENEVE. We should get 0 returned results because now
2031 # the only PF that has the required trait has no inventory left.
2032 cn = self._create_provider('cn')
2033
2034 tb.add_inventory(cn, fields.ResourceClass.VCPU, 16)
2035 tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768)
2036
2037 numa_cell0 = self._create_provider('cn_numa0', parent=cn.uuid)
2038 numa_cell1 = self._create_provider('cn_numa1', parent=cn.uuid)
2039
2040 pf0 = self._create_provider('cn_numa0_pf0', parent=numa_cell0.uuid)
2041 tb.add_inventory(pf0, fields.ResourceClass.SRIOV_NET_VF, 8)
2042 pf1 = self._create_provider('cn_numa1_pf1', parent=numa_cell1.uuid)
2043 tb.add_inventory(pf1, fields.ResourceClass.SRIOV_NET_VF, 8)
2044 tb.set_traits(pf1, os_traits.HW_NIC_OFFLOAD_GENEVE)
2045
2046 alloc_cands = self._get_allocation_candidates(
2047 {'': placement_lib.RequestGroup(
2048 use_same_provider=False,
2049 resources={
2050 fields.ResourceClass.VCPU: 2,
2051 fields.ResourceClass.MEMORY_MB: 256,
2052 fields.ResourceClass.SRIOV_NET_VF: 1,
2053 }
2054 )}
2055 )
2056
2057 expected = [
2058 [('cn', fields.ResourceClass.VCPU, 2),
2059 ('cn', fields.ResourceClass.MEMORY_MB, 256),
2060 ('cn_numa0_pf0', fields.ResourceClass.SRIOV_NET_VF, 1)],
2061 [('cn', fields.ResourceClass.VCPU, 2),
2062 ('cn', fields.ResourceClass.MEMORY_MB, 256),
2063 ('cn_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1)],
2064 ]
2065 self._validate_allocation_requests(expected, alloc_cands)
2066
2067 expected = {
2068 'cn': set([
2069 (fields.ResourceClass.VCPU, 16, 0),
2070 (fields.ResourceClass.MEMORY_MB, 32768, 0),
2071 ]),
2072 'cn_numa0': set([]),
2073 'cn_numa1': set([]),
2074 'cn_numa0_pf0': set([
2075 (fields.ResourceClass.SRIOV_NET_VF, 8, 0),
2076 ]),
2077 'cn_numa1_pf1': set([
2078 (fields.ResourceClass.SRIOV_NET_VF, 8, 0),
2079 ]),
2080 }
2081 self._validate_provider_summary_resources(expected, alloc_cands)
2082
2083 expected = {
2084 'cn': set([]),
2085 'cn_numa0': set([]),
2086 'cn_numa1': set([]),
2087 'cn_numa0_pf0': set([]),
2088 'cn_numa1_pf1': set([os_traits.HW_NIC_OFFLOAD_GENEVE]),
2089 }
2090 self._validate_provider_summary_traits(expected, alloc_cands)
2091
2092 # Now add required traits to the mix and verify we still get the same
2093 # result (since we haven't yet consumed the second physical function's
2094 # inventory of SRIOV_NET_VF.
2095 alloc_cands = self._get_allocation_candidates(
2096 {'': placement_lib.RequestGroup(
2097 use_same_provider=False,
2098 resources={
2099 fields.ResourceClass.VCPU: 2,
2100 fields.ResourceClass.MEMORY_MB: 256,
2101 fields.ResourceClass.SRIOV_NET_VF: 1,
2102 },
2103 required_traits=set([os_traits.HW_NIC_OFFLOAD_GENEVE]),
2104 )}
2105 )
2106
2107 expected = [
2108 [('cn', fields.ResourceClass.VCPU, 2),
2109 ('cn', fields.ResourceClass.MEMORY_MB, 256),
2110 ('cn_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1)],
2111 ]
2112 self._validate_allocation_requests(expected, alloc_cands)
2113
2114 expected = {
2115 'cn': set([
2116 (fields.ResourceClass.VCPU, 16, 0),
2117 (fields.ResourceClass.MEMORY_MB, 32768, 0),
2118 ]),
2119 'cn_numa0': set([]),
2120 'cn_numa1': set([]),
2121 'cn_numa0_pf0': set([
2122 (fields.ResourceClass.SRIOV_NET_VF, 8, 0),
2123 ]),
2124 'cn_numa1_pf1': set([
2125 (fields.ResourceClass.SRIOV_NET_VF, 8, 0),
2126 ]),
2127 }
2128 self._validate_provider_summary_resources(expected, alloc_cands)
2129
2130 expected = {
2131 'cn': set([]),
2132 'cn_numa0': set([]),
2133 'cn_numa1': set([]),
2134 'cn_numa0_pf0': set([]),
2135 'cn_numa1_pf1': set([os_traits.HW_NIC_OFFLOAD_GENEVE]),
2136 }
2137 self._validate_provider_summary_traits(expected, alloc_cands)
2138
2139 # Next we test that we get resources only on non-root providers
2140 # without root providers involved
2141 alloc_cands = self._get_allocation_candidates(
2142 {'': placement_lib.RequestGroup(
2143 use_same_provider=False,
2144 resources={
2145 fields.ResourceClass.SRIOV_NET_VF: 1,
2146 },
2147 )}
2148 )
2149
2150 expected = [
2151 [('cn_numa0_pf0', fields.ResourceClass.SRIOV_NET_VF, 1)],
2152 [('cn_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1)],
2153 ]
2154 self._validate_allocation_requests(expected, alloc_cands)
2155
2156 expected = {
2157 'cn': set([
2158 (fields.ResourceClass.VCPU, 16, 0),
2159 (fields.ResourceClass.MEMORY_MB, 32768, 0),
2160 ]),
2161 'cn_numa0': set([]),
2162 'cn_numa1': set([]),
2163 'cn_numa0_pf0': set([
2164 (fields.ResourceClass.SRIOV_NET_VF, 8, 0),
2165 ]),
2166 'cn_numa1_pf1': set([
2167 (fields.ResourceClass.SRIOV_NET_VF, 8, 0),
2168 ]),
2169 }
2170 self._validate_provider_summary_resources(expected, alloc_cands)
2171
2172 expected = {
2173 'cn': set([]),
2174 'cn_numa0': set([]),
2175 'cn_numa1': set([]),
2176 'cn_numa0_pf0': set([]),
2177 'cn_numa1_pf1': set([os_traits.HW_NIC_OFFLOAD_GENEVE]),
2178 }
2179 self._validate_provider_summary_traits(expected, alloc_cands)
2180
2181 # Same, but with the request in a granular group, which hits a
2182 # different code path.
2183 alloc_cands = self._get_allocation_candidates(
2184 {'': placement_lib.RequestGroup(
2185 use_same_provider=True,
2186 resources={
2187 fields.ResourceClass.SRIOV_NET_VF: 1,
2188 },
2189 )}
2190 )
2191
2192 expected = [
2193 [('cn_numa0_pf0', fields.ResourceClass.SRIOV_NET_VF, 1)],
2194 [('cn_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1)],
2195 ]
2196 self._validate_allocation_requests(expected, alloc_cands)
2197
2198 expected = {
2199 'cn': set([
2200 (fields.ResourceClass.VCPU, 16, 0),
2201 (fields.ResourceClass.MEMORY_MB, 32768, 0),
2202 ]),
2203 'cn_numa0': set([]),
2204 'cn_numa1': set([]),
2205 'cn_numa0_pf0': set([
2206 (fields.ResourceClass.SRIOV_NET_VF, 8, 0),
2207 ]),
2208 'cn_numa1_pf1': set([
2209 (fields.ResourceClass.SRIOV_NET_VF, 8, 0),
2210 ]),
2211 }
2212 self._validate_provider_summary_resources(expected, alloc_cands)
2213
2214 expected = {
2215 'cn': set([]),
2216 'cn_numa0': set([]),
2217 'cn_numa1': set([]),
2218 'cn_numa0_pf0': set([]),
2219 'cn_numa1_pf1': set([os_traits.HW_NIC_OFFLOAD_GENEVE]),
2220 }
2221 self._validate_provider_summary_traits(expected, alloc_cands)
2222
2223 # Now consume all the inventory of SRIOV_NET_VF on the second physical
2224 # function (the one with HW_NIC_OFFLOAD_GENEVE associated with it) and
2225 # verify that the same request still results in 0 results since the
2226 # function with the required trait no longer has any inventory.
2227 self.allocate_from_provider(pf1, fields.ResourceClass.SRIOV_NET_VF, 8)
2228
2229 alloc_cands = self._get_allocation_candidates(
2230 {'':
2231 placement_lib.RequestGroup(
2232 use_same_provider=False,
2233 resources={
2234 fields.ResourceClass.VCPU: 2,
2235 fields.ResourceClass.MEMORY_MB: 256,
2236 fields.ResourceClass.SRIOV_NET_VF: 1,
2237 },
2238 required_traits=set([os_traits.HW_NIC_OFFLOAD_GENEVE]),
2239 )}
2240 )
2241
2242 self._validate_allocation_requests([], alloc_cands)
2243 self._validate_provider_summary_resources({}, alloc_cands)
2244 self._validate_provider_summary_traits({}, alloc_cands)
2245
2246 def _get_rp_ids_matching_names(self, names):
2247 """Utility function to look up resource provider IDs from a set of
2248 supplied provider names directly from the API DB.
2249 """
2250 names = map(six.text_type, names)
2251 sel = sa.select([rp_obj._RP_TBL.c.id])
2252 sel = sel.where(rp_obj._RP_TBL.c.name.in_(names))
2253 with self.placement_db.get_engine().connect() as conn:
2254 rp_ids = set([r[0] for r in conn.execute(sel)])
2255 return rp_ids
2256
2257 def test_trees_matching_all(self):
2258 """Creates a few provider trees having different inventories and
2259 allocations and tests the _get_trees_matching_all_resources() utility
2260 function to ensure that only the root provider IDs of matching provider
2261 trees are returned.
2262 """
2263 # NOTE(jaypipes): _get_trees_matching_all() expects a dict of resource
2264 # class internal identifiers, not string names
2265 resources = {
2266 fields.ResourceClass.STANDARD.index(
2267 fields.ResourceClass.VCPU): 2,
2268 fields.ResourceClass.STANDARD.index(
2269 fields.ResourceClass.MEMORY_MB): 256,
2270 fields.ResourceClass.STANDARD.index(
2271 fields.ResourceClass.SRIOV_NET_VF): 1,
2272 }
2273 req_traits = {}
2274 forbidden_traits = {}
2275 member_of = []
2276 sharing = {}
2277
2278 # Before we even set up any providers, verify that the short-circuits
2279 # work to return empty lists
2280 trees = rp_obj._get_trees_matching_all(self.ctx,
2281 resources, req_traits, forbidden_traits, sharing, member_of)
2282 self.assertEqual([], trees)
2283
2284 # We are setting up 3 trees of providers that look like this:
2285 #
2286 # compute node (cn)
2287 # / \
2288 # / \
2289 # numa cell 0 numa cell 1
2290 # | |
2291 # | |
2292 # pf 0 pf 1
2293 cn_names = []
2294 for x in ('1', '2', '3'):
2295 name = 'cn' + x
2296 cn_name = name
2297 cn_names.append(cn_name)
2298 cn = self._create_provider(name)
2299
2300 tb.add_inventory(cn, fields.ResourceClass.VCPU, 16)
2301 tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768)
2302
2303 name = 'cn' + x + '_numa0'
2304 numa_cell0 = self._create_provider(name, parent=cn.uuid)
2305 name = 'cn' + x + '_numa1'
2306 numa_cell1 = self._create_provider(name, parent=cn.uuid)
2307
2308 name = 'cn' + x + '_numa0_pf0'
2309 pf0 = self._create_provider(name, parent=numa_cell0.uuid)
2310 tb.add_inventory(pf0, fields.ResourceClass.SRIOV_NET_VF, 8)
2311 name = 'cn' + x + '_numa1_pf1'
2312 pf1 = self._create_provider(name, parent=numa_cell1.uuid)
2313 tb.add_inventory(pf1, fields.ResourceClass.SRIOV_NET_VF, 8)
2314 # Mark only the second PF on the third compute node as having
2315 # GENEVE offload enabled
2316 if x == '3':
2317 tb.set_traits(pf1, os_traits.HW_NIC_OFFLOAD_GENEVE)
2318 # Doesn't really make a whole lot of logical sense, but allows
2319 # us to test situations where the same trait is associated with
2320 # multiple providers in the same tree and one of the providers
2321 # has inventory we will use...
2322 tb.set_traits(cn, os_traits.HW_NIC_OFFLOAD_GENEVE)
2323
2324 trees = rp_obj._get_trees_matching_all(self.ctx,
2325 resources, req_traits, forbidden_traits, sharing, member_of)
2326 # trees is a list of two-tuples of (provider ID, root provider ID)
2327 tree_root_ids = set(p[1] for p in trees)
2328 expect_root_ids = self._get_rp_ids_matching_names(cn_names)
2329 self.assertEqual(expect_root_ids, tree_root_ids)
2330
2331 # let's validate providers in tree as well
2332 provider_ids = set(p[0] for p in trees)
2333 provider_names = cn_names + ['cn1_numa0_pf0', 'cn1_numa1_pf1',
2334 'cn2_numa0_pf0', 'cn2_numa1_pf1',
2335 'cn3_numa0_pf0', 'cn3_numa1_pf1']
2336 expect_provider_ids = self._get_rp_ids_matching_names(provider_names)
2337 self.assertEqual(expect_provider_ids, provider_ids)
2338
2339 # OK, now consume all the VFs in the second compute node and verify
2340 # only the first and third computes are returned as root providers from
2341 # _get_trees_matching_all()
2342 cn2_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
2343 uuids.cn2_numa0_pf0)
2344 self.allocate_from_provider(cn2_pf0, fields.ResourceClass.SRIOV_NET_VF,
2345 8)
2346
2347 cn2_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
2348 uuids.cn2_numa1_pf1)
2349 self.allocate_from_provider(cn2_pf1, fields.ResourceClass.SRIOV_NET_VF,
2350 8)
2351
2352 trees = rp_obj._get_trees_matching_all(self.ctx,
2353 resources, req_traits, forbidden_traits, sharing, member_of)
2354 tree_root_ids = set(p[1] for p in trees)
2355 self.assertEqual(2, len(tree_root_ids))
2356
2357 # cn2 had all its VFs consumed, so we should only get cn1 and cn3's IDs
2358 # as the root provider IDs.
2359 cn_names = ['cn1', 'cn3']
2360 expect_root_ids = self._get_rp_ids_matching_names(cn_names)
2361 self.assertEqual(expect_root_ids, set(tree_root_ids))
2362
2363 # let's validate providers in tree as well
2364 provider_ids = set(p[0] for p in trees)
2365 provider_names = cn_names + ['cn1_numa0_pf0', 'cn1_numa1_pf1',
2366 'cn3_numa0_pf0', 'cn3_numa1_pf1']
2367 expect_provider_ids = self._get_rp_ids_matching_names(provider_names)
2368 self.assertEqual(expect_provider_ids, provider_ids)
2369
2370 # OK, now we're going to add a required trait to the mix. The only
2371 # provider that is decorated with the HW_NIC_OFFLOAD_GENEVE trait is
2372 # the second physical function on the third compute host. So we should
2373 # only get the third compute node back if we require that trait
2374
2375 geneve_t = rp_obj.Trait.get_by_name(
2376 self.ctx, os_traits.HW_NIC_OFFLOAD_GENEVE)
2377 # required_traits parameter is a dict of trait name to internal ID
2378 req_traits = {
2379 geneve_t.name: geneve_t.id,
2380 }
2381 trees = rp_obj._get_trees_matching_all(self.ctx,
2382 resources, req_traits, forbidden_traits, sharing, member_of)
2383 tree_root_ids = set(p[1] for p in trees)
2384 self.assertEqual(1, len(tree_root_ids))
2385
2386 cn_names = ['cn3']
2387 expect_root_ids = self._get_rp_ids_matching_names(cn_names)
2388 self.assertEqual(expect_root_ids, set(tree_root_ids))
2389
2390 # let's validate providers in tree as well
2391 provider_ids = set(p[0] for p in trees)
2392 # NOTE(tetsuro): Actually we also get providers without traits here.
2393 # This is reported as bug#1771707 and from users' view the bug is now
2394 # fixed out of this _get_trees_matching_all() function by checking
2395 # traits later again in _check_traits_for_alloc_request().
2396 # But ideally, we'd like to have only pf1 from cn3 here using SQL
2397 # query in _get_trees_matching_all() function for optimization.
2398 # provider_names = cn_names + ['cn3_numa1_pf1']
2399 provider_names = cn_names + ['cn3_numa0_pf0', 'cn3_numa1_pf1']
2400 expect_provider_ids = self._get_rp_ids_matching_names(provider_names)
2401 self.assertEqual(expect_provider_ids, provider_ids)
2402
2403 # Add in a required trait that no provider has associated with it and
2404 # verify that there are no returned allocation candidates
2405 avx2_t = rp_obj.Trait.get_by_name(
2406 self.ctx, os_traits.HW_CPU_X86_AVX2)
2407 # required_traits parameter is a dict of trait name to internal ID
2408 req_traits = {
2409 geneve_t.name: geneve_t.id,
2410 avx2_t.name: avx2_t.id,
2411 }
2412 trees = rp_obj._get_trees_matching_all(self.ctx,
2413 resources, req_traits, forbidden_traits, sharing, member_of)
2414 tree_root_ids = set(p[1] for p in trees)
2415 self.assertEqual(0, len(tree_root_ids))
2416
2417 # If we add the AVX2 trait as forbidden, not required, then we
2418 # should get back the original cn3
2419 req_traits = {
2420 geneve_t.name: geneve_t.id,
2421 }
2422 forbidden_traits = {
2423 avx2_t.name: avx2_t.id,
2424 }
2425 trees = rp_obj._get_trees_matching_all(self.ctx,
2426 resources, req_traits, forbidden_traits, sharing, member_of)
2427 tree_root_ids = set(p[1] for p in trees)
2428 self.assertEqual(1, len(tree_root_ids))
2429
2430 cn_names = ['cn3']
2431 expect_root_ids = self._get_rp_ids_matching_names(cn_names)
2432 self.assertEqual(expect_root_ids, set(tree_root_ids))
2433
2434 # let's validate providers in tree as well
2435 provider_ids = set(p[0] for p in trees)
2436 # NOTE(tetsuro): Actually we also get providers without traits here.
2437 # This is reported as bug#1771707 and from users' view the bug is now
2438 # fixed out of this _get_trees_matching_all() function by checking
2439 # traits later again in _check_traits_for_alloc_request().
2440 # But ideally, we'd like to have only pf1 from cn3 here using SQL
2441 # query in _get_trees_matching_all() function for optimization.
2442 # provider_names = cn_names + ['cn3_numa1_pf1']
2443 provider_names = cn_names + ['cn3_numa0_pf0', 'cn3_numa1_pf1']
2444 expect_provider_ids = self._get_rp_ids_matching_names(provider_names)
2445 self.assertEqual(expect_provider_ids, provider_ids)
2446
2447 # Consume all the VFs in first and third compute nodes and verify
2448 # no more providers are returned
2449 cn1_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
2450 uuids.cn1_numa0_pf0)
2451 self.allocate_from_provider(
2452 cn1_pf0, fields.ResourceClass.SRIOV_NET_VF, 8)
2453
2454 cn1_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
2455 uuids.cn1_numa1_pf1)
2456 self.allocate_from_provider(
2457 cn1_pf1, fields.ResourceClass.SRIOV_NET_VF, 8)
2458 cn3_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
2459 uuids.cn3_numa0_pf0)
2460 self.allocate_from_provider(
2461 cn3_pf0, fields.ResourceClass.SRIOV_NET_VF, 8)
2462
2463 cn3_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
2464 uuids.cn3_numa1_pf1)
2465 self.allocate_from_provider(
2466 cn3_pf1, fields.ResourceClass.SRIOV_NET_VF, 8)
2467
2468 trees = rp_obj._get_trees_matching_all(self.ctx,
2469 resources, req_traits, forbidden_traits, sharing, member_of)
2470 self.assertEqual([], trees)
2471
2472 def test_simple_tree_with_shared_provider(self):
2473 """Tests that we properly winnow allocation requests when including
2474 shared and nested providers
2475 """
2476 # We are setting up 2 cn trees with 2 shared storages
2477 # that look like this:
2478 #
2479 # compute node (cn1) ----- shared storage (ss1)
2480 # / \ agg1 with 2000 DISK_GB
2481 # / \
2482 # numa cell 1_0 numa cell 1_1
2483 # | |
2484 # | |
2485 # pf 1_0 pf 1_1(HW_NIC_OFFLOAD_GENEVE)
2486 #
2487 # compute node (cn2) ----- shared storage (ss2)
2488 # / \ agg2 with 1000 DISK_GB
2489 # / \
2490 # numa cell 2_0 numa cell 2_1
2491 # | |
2492 # | |
2493 # pf 2_0 pf 2_1(HW_NIC_OFFLOAD_GENEVE)
2494 #
2495 # The second physical function in both trees (pf1_1, pf 2_1) will be
2496 # associated with the HW_NIC_OFFLOAD_GENEVE trait, but not the first
2497 # physical function.
2498 #
2499 # We will issue a request to _get_allocation_candidates() for VCPU,
2500 # SRIOV_NET_VF and DISK_GB **without** required traits, then include
2501 # a request that includes HW_NIC_OFFLOAD_GENEVE. In the latter case,
2502 # the compute node tree should be returned but the allocation requests
2503 # should only include the second physical function since the required
2504 # trait is only associated with that PF.
2505
2506 cn1 = self._create_provider('cn1', uuids.agg1)
2507 cn2 = self._create_provider('cn2', uuids.agg2)
2508 tb.add_inventory(cn1, fields.ResourceClass.VCPU, 16)
2509 tb.add_inventory(cn2, fields.ResourceClass.VCPU, 16)
2510
2511 numa1_0 = self._create_provider('cn1_numa0', parent=cn1.uuid)
2512 numa1_1 = self._create_provider('cn1_numa1', parent=cn1.uuid)
2513 numa2_0 = self._create_provider('cn2_numa0', parent=cn2.uuid)
2514 numa2_1 = self._create_provider('cn2_numa1', parent=cn2.uuid)
2515
2516 pf1_0 = self._create_provider('cn1_numa0_pf0', parent=numa1_0.uuid)
2517 pf1_1 = self._create_provider('cn1_numa1_pf1', parent=numa1_1.uuid)
2518 pf2_0 = self._create_provider('cn2_numa0_pf0', parent=numa2_0.uuid)
2519 pf2_1 = self._create_provider('cn2_numa1_pf1', parent=numa2_1.uuid)
2520
2521 tb.add_inventory(pf1_0, fields.ResourceClass.SRIOV_NET_VF, 8)
2522 tb.add_inventory(pf1_1, fields.ResourceClass.SRIOV_NET_VF, 8)
2523 tb.add_inventory(pf2_0, fields.ResourceClass.SRIOV_NET_VF, 8)
2524 tb.add_inventory(pf2_1, fields.ResourceClass.SRIOV_NET_VF, 8)
2525 tb.set_traits(pf2_1, os_traits.HW_NIC_OFFLOAD_GENEVE)
2526 tb.set_traits(pf1_1, os_traits.HW_NIC_OFFLOAD_GENEVE)
2527
2528 ss1 = self._create_provider('ss1', uuids.agg1)
2529 ss2 = self._create_provider('ss2', uuids.agg2)
2530 tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 2000)
2531 tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1000)
2532 tb.set_traits(ss1, 'MISC_SHARES_VIA_AGGREGATE')
2533 tb.set_traits(ss2, 'MISC_SHARES_VIA_AGGREGATE')
2534
2535 alloc_cands = self._get_allocation_candidates(
2536 {'': placement_lib.RequestGroup(
2537 use_same_provider=False,
2538 resources={
2539 fields.ResourceClass.VCPU: 2,
2540 fields.ResourceClass.SRIOV_NET_VF: 1,
2541 fields.ResourceClass.DISK_GB: 1500,
2542 })
2543 }
2544 )
2545
2546 # cn2 is not in the allocation candidates because it doesn't have
2547 # enough DISK_GB resource with shared providers.
2548 expected = [
2549 [('cn1', fields.ResourceClass.VCPU, 2),
2550 ('cn1_numa0_pf0', fields.ResourceClass.SRIOV_NET_VF, 1),
2551 ('ss1', fields.ResourceClass.DISK_GB, 1500)],
2552 [('cn1', fields.ResourceClass.VCPU, 2),
2553 ('cn1_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1),
2554 ('ss1', fields.ResourceClass.DISK_GB, 1500)]
2555 ]
2556
2557 self._validate_allocation_requests(expected, alloc_cands)
2558
2559 expected = {
2560 'cn1': set([
2561 (fields.ResourceClass.VCPU, 16, 0)
2562 ]),
2563 'cn1_numa0': set([]),
2564 'cn1_numa1': set([]),
2565 'cn1_numa0_pf0': set([
2566 (fields.ResourceClass.SRIOV_NET_VF, 8, 0)
2567 ]),
2568 'cn1_numa1_pf1': set([
2569 (fields.ResourceClass.SRIOV_NET_VF, 8, 0)
2570 ]),
2571 'ss1': set([
2572 (fields.ResourceClass.DISK_GB, 2000, 0)
2573 ]),
2574 }
2575 self._validate_provider_summary_resources(expected, alloc_cands)
2576
2577 # Now add required traits to the mix and verify we still get the
2578 # inventory of SRIOV_NET_VF.
2579 alloc_cands = self._get_allocation_candidates(
2580 {'': placement_lib.RequestGroup(
2581 use_same_provider=False,
2582 resources={
2583 fields.ResourceClass.VCPU: 2,