From 7866a016f604c527e97662484c61856e9804dbfa Mon Sep 17 00:00:00 2001 From: Eric Fried Date: Tue, 30 Jan 2018 15:10:03 -0600 Subject: [PATCH] Make generation optional in ProviderTree Since ProviderTree methods are expected to be used in ComputeDriver.update_provider_tree [1], wherein the generation should not be specified (it will be ignored by SchedulerReportClient.update_from_provider_tree [2] anyway), this change makes the generation argument optional for ProviderTree methods new_root and update_inventory. (It was already optional for all the other methods.) TODOs are added to deal with generation properly from some places in the report client - see the related bug. [1] https://review.openstack.org/#/c/521187/30/nova/virt/driver.py [2] https://review.openstack.org/#/c/533821/15/nova/scheduler/client/report.py@1454 Change-Id: I0d979802865f22195f53d461ab0dd1df6334a066 Related-Bug: #1746075 blueprint: update-provider-tree --- nova/compute/provider_tree.py | 17 ++++++-- nova/scheduler/client/report.py | 14 +++++-- .../openstack/placement/test_report_client.py | 14 +++---- nova/tests/functional/test_servers.py | 10 ++--- nova/tests/unit/compute/test_provider_tree.py | 23 ++++++----- .../unit/scheduler/client/test_report.py | 40 +++++++++---------- 6 files changed, 69 insertions(+), 49 deletions(-) diff --git a/nova/compute/provider_tree.py b/nova/compute/provider_tree.py index 0d6744738f5e..d68857778918 100644 --- a/nova/compute/provider_tree.py +++ b/nova/compute/provider_tree.py @@ -346,8 +346,16 @@ class ProviderTree(object): with self.lock: self._remove_with_lock(name_or_uuid) - def new_root(self, name, uuid, generation): - """Adds a new root provider to the tree, returning its UUID.""" + def new_root(self, name, uuid, generation=None): + """Adds a new root provider to the tree, returning its UUID. + + :param name: The name of the new root provider + :param uuid: The UUID of the new root provider + :param generation: Generation to set for the new root provider + :returns: the UUID of the new provider + :raises: ValueError if a provider with the specified uuid already + exists in the tree. + """ with self.lock: exists = True @@ -448,7 +456,7 @@ class ProviderTree(object): provider = self._find_with_lock(name_or_uuid) return provider.has_inventory_changed(inventory) - def update_inventory(self, name_or_uuid, inventory, generation): + def update_inventory(self, name_or_uuid, inventory, generation=None): """Given a name or UUID of a provider and a dict of inventory resource records, update the provider's inventory and set the provider's generation. @@ -464,7 +472,8 @@ class ProviderTree(object): update inventory for. :param inventory: dict, keyed by resource class, of inventory information. - :param generation: The resource provider generation to set + :param generation: The resource provider generation to set. If not + specified, the provider's generation is not changed. """ with self.lock: provider = self._find_with_lock(name_or_uuid) diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py index 8d5ae986eee8..17eeb77c05ce 100644 --- a/nova/scheduler/client/report.py +++ b/nova/scheduler/client/report.py @@ -797,9 +797,15 @@ class SchedulerReportClient(object): return None cur_gen = curr['resource_provider_generation'] + # TODO(efried): This condition banks on the generation for a new RP + # starting at zero, which isn't part of the API. It also is only + # useful as an optimization on a freshly-created RP to which nothing + # has ever been done. And it's not much of an optimization, because + # updating the cache is super cheap. We should remove the condition. if cur_gen: curr_inv = curr['inventories'] - self._provider_tree.update_inventory(rp_uuid, curr_inv, cur_gen) + self._provider_tree.update_inventory(rp_uuid, curr_inv, + generation=cur_gen) return curr def _refresh_associations(self, context, rp_uuid, generation=None, @@ -861,7 +867,8 @@ class SchedulerReportClient(object): # context of this compute's RP, it doesn't matter if a # sharing RP is part of a tree. self._provider_tree.new_root( - rp['name'], rp['uuid'], rp['generation']) + rp['name'], rp['uuid'], + generation=rp['generation']) # Now we have to (populate or) refresh that guy's traits # and aggregates (but not *his* aggregate-associated # providers). No need to override force=True for newly- @@ -991,7 +998,8 @@ class SchedulerReportClient(object): updated_inventories_result = result.json() new_gen = updated_inventories_result['resource_provider_generation'] - self._provider_tree.update_inventory(rp_uuid, inv_data, new_gen) + self._provider_tree.update_inventory(rp_uuid, inv_data, + generation=new_gen) LOG.debug('Updated inventory for %s at generation %i', rp_uuid, new_gen) return True diff --git a/nova/tests/functional/api/openstack/placement/test_report_client.py b/nova/tests/functional/api/openstack/placement/test_report_client.py index a89cede6a0cf..48c9b576e2b8 100644 --- a/nova/tests/functional/api/openstack/placement/test_report_client.py +++ b/nova/tests/functional/api/openstack/placement/test_report_client.py @@ -724,7 +724,7 @@ class SchedulerReportClientTests(test.TestCase): with self._interceptor(): # Populate with a provider with no inventories, aggregates, traits - new_tree.new_root('root', uuids.root, None) + new_tree.new_root('root', uuids.root) self.client.update_from_provider_tree(self.context, new_tree) assert_ptrees_equal() @@ -734,7 +734,7 @@ class SchedulerReportClientTests(test.TestCase): new_tree.update_aggregates('child1', [uuids.agg1, uuids.agg2]) new_tree.new_child('grandchild1_1', uuids.child1, uuid=uuids.gc1_1) new_tree.update_traits(uuids.gc1_1, ['CUSTOM_PHYSNET_2']) - new_tree.new_root('ssp', uuids.ssp, None) + new_tree.new_root('ssp', uuids.ssp) new_tree.update_inventory('ssp', { fields.ResourceClass.DISK_GB: { 'total': 100, @@ -744,7 +744,7 @@ class SchedulerReportClientTests(test.TestCase): 'step_size': 2, 'allocation_ratio': 10.0, }, - }, None) + }) self.client.update_from_provider_tree(self.context, new_tree) assert_ptrees_equal() @@ -767,7 +767,7 @@ class SchedulerReportClientTests(test.TestCase): 'step_size': 1024, 'allocation_ratio': 1.0, }, - }, None) + }) new_tree.update_aggregates(uuids.root, [uuids.agg1]) new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2']) @@ -784,7 +784,7 @@ class SchedulerReportClientTests(test.TestCase): 'allocation_ratio': 1.0, }, } - new_tree.update_inventory('grandchild1_1', ipv4_inv, None) + new_tree.update_inventory('grandchild1_1', ipv4_inv) # Shared storage provider gets traits new_tree.update_traits('ssp', set(['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_SSD'])) @@ -803,7 +803,7 @@ class SchedulerReportClientTests(test.TestCase): 'max_unit': 250000, 'step_size': 5000, 'allocation_ratio': 8.0, - }), None) + })) self.assertRaises( exception.ResourceProviderSyncFailed, self.client.update_from_provider_tree, self.context, new_tree) @@ -825,7 +825,7 @@ class SchedulerReportClientTests(test.TestCase): 'max_unit': 250000, 'step_size': 5000, 'allocation_ratio': 8.0, - }), None) + })) # Add a bogus trait new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py index bc0b37fa1542..a1aa4303f80e 100644 --- a/nova/tests/functional/test_servers.py +++ b/nova/tests/functional/test_servers.py @@ -1694,7 +1694,7 @@ class ProviderTreeTests(ProviderUsageBaseTestCase): aggs = set([uuids.agg1, uuids.agg2]) def update_provider_tree(prov_tree, nodename): - prov_tree.update_inventory(self.compute.host, inv, None) + prov_tree.update_inventory(self.compute.host, inv) prov_tree.update_traits(self.compute.host, traits) prov_tree.update_aggregates(self.compute.host, aggs) self.mock_upt.side_effect = update_provider_tree @@ -1719,7 +1719,7 @@ class ProviderTreeTests(ProviderUsageBaseTestCase): """ def update_provider_tree(prov_tree, nodename): # Create a shared storage provider as a root - prov_tree.new_root('ssp', uuids.ssp, None) + prov_tree.new_root('ssp', uuids.ssp) prov_tree.update_traits( 'ssp', ['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_SSD']) prov_tree.update_aggregates('ssp', [uuids.agg]) @@ -1750,7 +1750,7 @@ class ProviderTreeTests(ProviderUsageBaseTestCase): 'allocation_ratio': 1.0, }, } - prov_tree.update_inventory('numa%d' % n, inv, None) + prov_tree.update_inventory('numa%d' % n, inv) # Each NUMA node has two PFs providing VF inventory on one of two # networks for n in (1, 2): @@ -1771,7 +1771,7 @@ class ProviderTreeTests(ProviderUsageBaseTestCase): 'allocation_ratio': 1.0, }, } - prov_tree.update_inventory(name, inv, None) + prov_tree.update_inventory(name, inv) self.mock_upt.side_effect = update_provider_tree self._run_periodics() @@ -1858,7 +1858,7 @@ class ProviderTreeTests(ProviderUsageBaseTestCase): def test_update_provider_tree_bogus_resource_class(self): def update_provider_tree(prov_tree, nodename): - prov_tree.update_inventory(self.compute.host, {'FOO': {}}, None) + prov_tree.update_inventory(self.compute.host, {'FOO': {}}) self.mock_upt.side_effect = update_provider_tree rcs = self._get_all_resource_classes() diff --git a/nova/tests/unit/compute/test_provider_tree.py b/nova/tests/unit/compute/test_provider_tree.py index 9da4d6ca9f69..cc0b8a238b4d 100644 --- a/nova/tests/unit/compute/test_provider_tree.py +++ b/nova/tests/unit/compute/test_provider_tree.py @@ -48,7 +48,6 @@ class TestProviderTree(test.NoDBTestCase): pt.new_root, cn1.hypervisor_hostname, cn1.uuid, - 1, ) self.assertTrue(pt.exists(cn1.uuid)) @@ -117,7 +116,7 @@ class TestProviderTree(test.NoDBTestCase): ) self.assertFalse(pt.exists(cn3.uuid)) self.assertFalse(pt.exists(cn3.hypervisor_hostname)) - pt.new_root(cn3.hypervisor_hostname, cn3.uuid, 1) + pt.new_root(cn3.hypervisor_hostname, cn3.uuid) self.assertTrue(pt.exists(cn3.uuid)) self.assertTrue(pt.exists(cn3.hypervisor_hostname)) @@ -127,7 +126,6 @@ class TestProviderTree(test.NoDBTestCase): pt.new_root, cn3.hypervisor_hostname, cn3.uuid, - 1, ) self.assertRaises( @@ -432,7 +430,6 @@ class TestProviderTree(test.NoDBTestCase): pt.update_inventory, uuids.non_existing_rp, {}, - 1, ) def test_has_inventory_changed(self): @@ -467,11 +464,13 @@ class TestProviderTree(test.NoDBTestCase): }, } self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv)) - self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, rp_gen)) + self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, + generation=rp_gen)) # Updating with the same inventory info should return False self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv)) - self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, rp_gen)) + self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, + generation=rp_gen)) # A data-grab's inventory should be "equal" to the original cndata = pt.data(cn.uuid) @@ -479,23 +478,27 @@ class TestProviderTree(test.NoDBTestCase): cn_inv['VCPU']['total'] = 6 self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv)) - self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, rp_gen)) + self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, + generation=rp_gen)) # The data() result was not affected; now the tree's copy is different self.assertTrue(pt.has_inventory_changed(cn.uuid, cndata.inventory)) self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv)) - self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, rp_gen)) + self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, + generation=rp_gen)) # Deleting a key in the new record should NOT result in changes being # recorded... del cn_inv['VCPU']['allocation_ratio'] self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv)) - self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, rp_gen)) + self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, + generation=rp_gen)) del cn_inv['MEMORY_MB'] self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv)) - self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, rp_gen)) + self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, + generation=rp_gen)) def test_have_traits_changed_no_existing_rp(self): pt = self._pt_with_cns() diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py index 445051843ac8..836d91bd7ac0 100644 --- a/nova/tests/unit/scheduler/client/test_report.py +++ b/nova/tests/unit/scheduler/client/test_report.py @@ -232,10 +232,9 @@ class SchedulerReportClientTestCase(test.NoDBTestCase): rp_uuid = self.client._provider_tree.new_root( cn.hypervisor_hostname, cn.uuid, - generation, + generation=generation, ) - self.client._provider_tree.update_inventory(rp_uuid, resources, - generation) + self.client._provider_tree.update_inventory(rp_uuid, resources) def _validate_provider(self, name_or_uuid, **kwargs): """Validates existence and values of a provider in this client's @@ -1923,7 +1922,8 @@ class TestProviderOperations(SchedulerReportClientTestCase): for status_code in (204, 404): delete_mock.status_code = status_code # Seed the caches - self.client._provider_tree.new_root('compute', uuids.root, 0) + self.client._provider_tree.new_root('compute', uuids.root, + generation=0) self.client._association_refresh_time[uuids.root] = 1234 self.client._delete_provider(uuids.root, global_request_id='gri') @@ -1961,7 +1961,7 @@ class TestProviderOperations(SchedulerReportClientTestCase): self.ks_adap_mock.put.return_value = resp_mock # Prime the provider tree cache - self.client._provider_tree.new_root('rp', uuids.rp, 0) + self.client._provider_tree.new_root('rp', uuids.rp, generation=0) self.assertEqual(set(), self.client._provider_tree.data(uuids.rp).aggregates) @@ -1978,7 +1978,7 @@ class TestProviderOperations(SchedulerReportClientTestCase): def test_set_aggregates_for_provider_fail(self): self.ks_adap_mock.put.return_value = mock.Mock(status_code=503) # Prime the provider tree cache - self.client._provider_tree.new_root('rp', uuids.rp, 0) + self.client._provider_tree.new_root('rp', uuids.rp, generation=0) self.assertRaises( exception.ResourceProviderUpdateFailed, self.client.set_aggregates_for_provider, @@ -2169,7 +2169,7 @@ class TestTraits(SchedulerReportClientTestCase): self.ks_adap_mock.get.return_value = get_mock # Prime the provider tree cache - self.client._provider_tree.new_root('rp', uuids.rp, 0) + self.client._provider_tree.new_root('rp', uuids.rp, generation=0) # Mock the /rp/{u}/traits PUT to succeed put_mock = mock.Mock(status_code=200) @@ -2204,7 +2204,7 @@ class TestTraits(SchedulerReportClientTestCase): self.ks_adap_mock.get.return_value = get_mock # Prime the provider tree cache - self.client._provider_tree.new_root('rp', uuids.rp, 0) + self.client._provider_tree.new_root('rp', uuids.rp, generation=0) # _ensure_traits exception bubbles up get_mock.status_code = 400 @@ -2244,7 +2244,7 @@ class TestAssociations(SchedulerReportClientTestCase): """Test that associations are refreshed when stale.""" uuid = uuids.compute_node # Seed the provider tree so _refresh_associations finds the provider - self.client._provider_tree.new_root('compute', uuid, 1) + self.client._provider_tree.new_root('compute', uuid, generation=1) mock_agg_get.return_value = set([uuids.agg1]) mock_trait_get.return_value = set(['CUSTOM_GOLD']) self.client._refresh_associations(self.context, uuid) @@ -2274,7 +2274,7 @@ class TestAssociations(SchedulerReportClientTestCase): """Test refresh_sharing=False.""" uuid = uuids.compute_node # Seed the provider tree so _refresh_associations finds the provider - self.client._provider_tree.new_root('compute', uuid, 1) + self.client._provider_tree.new_root('compute', uuid, generation=1) mock_agg_get.return_value = set([uuids.agg1]) mock_trait_get.return_value = set(['CUSTOM_GOLD']) self.client._refresh_associations(self.context, uuid, @@ -2325,7 +2325,7 @@ class TestAssociations(SchedulerReportClientTestCase): """Test that refresh associations is called when the map is stale.""" uuid = uuids.compute_node # Seed the provider tree so _refresh_associations finds the provider - self.client._provider_tree.new_root('compute', uuid, 1) + self.client._provider_tree.new_root('compute', uuid, generation=1) mock_agg_get.return_value = set([]) mock_trait_get.return_value = set([]) mock_shr_get.return_value = [] @@ -2714,7 +2714,7 @@ class TestInventory(SchedulerReportClientTestCase): self.client._provider_tree.new_root( compute_node.hypervisor_hostname, compute_node.uuid, - 42, + generation=42, ) mock_get.return_value = { @@ -2753,7 +2753,7 @@ class TestInventory(SchedulerReportClientTestCase): self.client._provider_tree.new_root( compute_node.hypervisor_hostname, compute_node.uuid, - 42, + generation=42, ) mock_get.return_value = { @@ -2793,7 +2793,7 @@ class TestInventory(SchedulerReportClientTestCase): self.client._provider_tree.new_root( compute_node.hypervisor_hostname, compute_node.uuid, - 42, + generation=42, ) mock_get.return_value = { @@ -2829,7 +2829,7 @@ class TestInventory(SchedulerReportClientTestCase): self.client._provider_tree.new_root( compute_node.hypervisor_hostname, compute_node.uuid, - 42, + generation=42, ) mock_get.return_value = { @@ -2874,7 +2874,7 @@ class TestInventory(SchedulerReportClientTestCase): self.client._provider_tree.new_root( cn.hypervisor_hostname, cn.uuid, - 42, + generation=42, ) result = self.client._update_inventory( self.context, cn.uuid, mock.sentinel.inv_data @@ -2899,7 +2899,7 @@ class TestInventory(SchedulerReportClientTestCase): self.client._provider_tree.new_root( cn.hypervisor_hostname, cn.uuid, - 42, + generation=42, ) result = self.client._update_inventory( self.context, cn.uuid, mock.sentinel.inv_data @@ -3306,7 +3306,7 @@ class TestAllocations(SchedulerReportClientTestCase): @mock.patch("nova.objects.InstanceList.get_by_host_and_node") def test_delete_resource_provider_cascade(self, mock_by_host, mock_del_alloc, mock_delete): - self.client._provider_tree.new_root(uuids.cn, uuids.cn, 1) + self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1) cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host", hypervisor_hostname="fake_hostname", ) inst1 = objects.Instance(uuid=uuids.inst1) @@ -3329,7 +3329,7 @@ class TestAllocations(SchedulerReportClientTestCase): @mock.patch("nova.objects.InstanceList.get_by_host_and_node") def test_delete_resource_provider_no_cascade(self, mock_by_host, mock_del_alloc, mock_delete): - self.client._provider_tree.new_root(uuids.cn, uuids.cn, 1) + self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1) self.client._association_refresh_time[uuids.cn] = mock.Mock() cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host", hypervisor_hostname="fake_hostname", ) @@ -3351,7 +3351,7 @@ class TestAllocations(SchedulerReportClientTestCase): @mock.patch('nova.scheduler.client.report.LOG') def test_delete_resource_provider_log_calls(self, mock_log, mock_delete): # First, check a successful call - self.client._provider_tree.new_root(uuids.cn, uuids.cn, 1) + self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1) cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host", hypervisor_hostname="fake_hostname", ) resp_mock = mock.MagicMock(status_code=204)