Merge "Make generation optional in ProviderTree"

This commit is contained in:
Zuul 2018-04-05 21:03:32 +00:00 committed by Gerrit Code Review
commit 5b30526919
6 changed files with 69 additions and 49 deletions

View File

@ -346,8 +346,16 @@ class ProviderTree(object):
with self.lock:
self._remove_with_lock(name_or_uuid)
def new_root(self, name, uuid, generation):
"""Adds a new root provider to the tree, returning its UUID."""
def new_root(self, name, uuid, generation=None):
"""Adds a new root provider to the tree, returning its UUID.
:param name: The name of the new root provider
:param uuid: The UUID of the new root provider
:param generation: Generation to set for the new root provider
:returns: the UUID of the new provider
:raises: ValueError if a provider with the specified uuid already
exists in the tree.
"""
with self.lock:
exists = True
@ -448,7 +456,7 @@ class ProviderTree(object):
provider = self._find_with_lock(name_or_uuid)
return provider.has_inventory_changed(inventory)
def update_inventory(self, name_or_uuid, inventory, generation):
def update_inventory(self, name_or_uuid, inventory, generation=None):
"""Given a name or UUID of a provider and a dict of inventory resource
records, update the provider's inventory and set the provider's
generation.
@ -464,7 +472,8 @@ class ProviderTree(object):
update inventory for.
:param inventory: dict, keyed by resource class, of inventory
information.
:param generation: The resource provider generation to set
:param generation: The resource provider generation to set. If not
specified, the provider's generation is not changed.
"""
with self.lock:
provider = self._find_with_lock(name_or_uuid)

View File

@ -797,9 +797,15 @@ class SchedulerReportClient(object):
return None
cur_gen = curr['resource_provider_generation']
# TODO(efried): This condition banks on the generation for a new RP
# starting at zero, which isn't part of the API. It also is only
# useful as an optimization on a freshly-created RP to which nothing
# has ever been done. And it's not much of an optimization, because
# updating the cache is super cheap. We should remove the condition.
if cur_gen:
curr_inv = curr['inventories']
self._provider_tree.update_inventory(rp_uuid, curr_inv, cur_gen)
self._provider_tree.update_inventory(rp_uuid, curr_inv,
generation=cur_gen)
return curr
def _refresh_associations(self, context, rp_uuid, generation=None,
@ -861,7 +867,8 @@ class SchedulerReportClient(object):
# context of this compute's RP, it doesn't matter if a
# sharing RP is part of a tree.
self._provider_tree.new_root(
rp['name'], rp['uuid'], rp['generation'])
rp['name'], rp['uuid'],
generation=rp['generation'])
# Now we have to (populate or) refresh that guy's traits
# and aggregates (but not *his* aggregate-associated
# providers). No need to override force=True for newly-
@ -991,7 +998,8 @@ class SchedulerReportClient(object):
updated_inventories_result = result.json()
new_gen = updated_inventories_result['resource_provider_generation']
self._provider_tree.update_inventory(rp_uuid, inv_data, new_gen)
self._provider_tree.update_inventory(rp_uuid, inv_data,
generation=new_gen)
LOG.debug('Updated inventory for %s at generation %i',
rp_uuid, new_gen)
return True

View File

@ -724,7 +724,7 @@ class SchedulerReportClientTests(test.TestCase):
with self._interceptor():
# Populate with a provider with no inventories, aggregates, traits
new_tree.new_root('root', uuids.root, None)
new_tree.new_root('root', uuids.root)
self.client.update_from_provider_tree(self.context, new_tree)
assert_ptrees_equal()
@ -734,7 +734,7 @@ class SchedulerReportClientTests(test.TestCase):
new_tree.update_aggregates('child1', [uuids.agg1, uuids.agg2])
new_tree.new_child('grandchild1_1', uuids.child1, uuid=uuids.gc1_1)
new_tree.update_traits(uuids.gc1_1, ['CUSTOM_PHYSNET_2'])
new_tree.new_root('ssp', uuids.ssp, None)
new_tree.new_root('ssp', uuids.ssp)
new_tree.update_inventory('ssp', {
fields.ResourceClass.DISK_GB: {
'total': 100,
@ -744,7 +744,7 @@ class SchedulerReportClientTests(test.TestCase):
'step_size': 2,
'allocation_ratio': 10.0,
},
}, None)
})
self.client.update_from_provider_tree(self.context, new_tree)
assert_ptrees_equal()
@ -767,7 +767,7 @@ class SchedulerReportClientTests(test.TestCase):
'step_size': 1024,
'allocation_ratio': 1.0,
},
}, None)
})
new_tree.update_aggregates(uuids.root, [uuids.agg1])
new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX',
'HW_CPU_X86_AVX2'])
@ -784,7 +784,7 @@ class SchedulerReportClientTests(test.TestCase):
'allocation_ratio': 1.0,
},
}
new_tree.update_inventory('grandchild1_1', ipv4_inv, None)
new_tree.update_inventory('grandchild1_1', ipv4_inv)
# Shared storage provider gets traits
new_tree.update_traits('ssp', set(['MISC_SHARES_VIA_AGGREGATE',
'STORAGE_DISK_SSD']))
@ -803,7 +803,7 @@ class SchedulerReportClientTests(test.TestCase):
'max_unit': 250000,
'step_size': 5000,
'allocation_ratio': 8.0,
}), None)
}))
self.assertRaises(
exception.ResourceProviderSyncFailed,
self.client.update_from_provider_tree, self.context, new_tree)
@ -825,7 +825,7 @@ class SchedulerReportClientTests(test.TestCase):
'max_unit': 250000,
'step_size': 5000,
'allocation_ratio': 8.0,
}), None)
}))
# Add a bogus trait
new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX',

View File

@ -1694,7 +1694,7 @@ class ProviderTreeTests(ProviderUsageBaseTestCase):
aggs = set([uuids.agg1, uuids.agg2])
def update_provider_tree(prov_tree, nodename):
prov_tree.update_inventory(self.compute.host, inv, None)
prov_tree.update_inventory(self.compute.host, inv)
prov_tree.update_traits(self.compute.host, traits)
prov_tree.update_aggregates(self.compute.host, aggs)
self.mock_upt.side_effect = update_provider_tree
@ -1719,7 +1719,7 @@ class ProviderTreeTests(ProviderUsageBaseTestCase):
"""
def update_provider_tree(prov_tree, nodename):
# Create a shared storage provider as a root
prov_tree.new_root('ssp', uuids.ssp, None)
prov_tree.new_root('ssp', uuids.ssp)
prov_tree.update_traits(
'ssp', ['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_SSD'])
prov_tree.update_aggregates('ssp', [uuids.agg])
@ -1750,7 +1750,7 @@ class ProviderTreeTests(ProviderUsageBaseTestCase):
'allocation_ratio': 1.0,
},
}
prov_tree.update_inventory('numa%d' % n, inv, None)
prov_tree.update_inventory('numa%d' % n, inv)
# Each NUMA node has two PFs providing VF inventory on one of two
# networks
for n in (1, 2):
@ -1771,7 +1771,7 @@ class ProviderTreeTests(ProviderUsageBaseTestCase):
'allocation_ratio': 1.0,
},
}
prov_tree.update_inventory(name, inv, None)
prov_tree.update_inventory(name, inv)
self.mock_upt.side_effect = update_provider_tree
self._run_periodics()
@ -1858,7 +1858,7 @@ class ProviderTreeTests(ProviderUsageBaseTestCase):
def test_update_provider_tree_bogus_resource_class(self):
def update_provider_tree(prov_tree, nodename):
prov_tree.update_inventory(self.compute.host, {'FOO': {}}, None)
prov_tree.update_inventory(self.compute.host, {'FOO': {}})
self.mock_upt.side_effect = update_provider_tree
rcs = self._get_all_resource_classes()

View File

@ -48,7 +48,6 @@ class TestProviderTree(test.NoDBTestCase):
pt.new_root,
cn1.hypervisor_hostname,
cn1.uuid,
1,
)
self.assertTrue(pt.exists(cn1.uuid))
@ -117,7 +116,7 @@ class TestProviderTree(test.NoDBTestCase):
)
self.assertFalse(pt.exists(cn3.uuid))
self.assertFalse(pt.exists(cn3.hypervisor_hostname))
pt.new_root(cn3.hypervisor_hostname, cn3.uuid, 1)
pt.new_root(cn3.hypervisor_hostname, cn3.uuid)
self.assertTrue(pt.exists(cn3.uuid))
self.assertTrue(pt.exists(cn3.hypervisor_hostname))
@ -127,7 +126,6 @@ class TestProviderTree(test.NoDBTestCase):
pt.new_root,
cn3.hypervisor_hostname,
cn3.uuid,
1,
)
self.assertRaises(
@ -432,7 +430,6 @@ class TestProviderTree(test.NoDBTestCase):
pt.update_inventory,
uuids.non_existing_rp,
{},
1,
)
def test_has_inventory_changed(self):
@ -467,11 +464,13 @@ class TestProviderTree(test.NoDBTestCase):
},
}
self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv))
self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, rp_gen))
self.assertTrue(pt.update_inventory(cn.uuid, cn_inv,
generation=rp_gen))
# Updating with the same inventory info should return False
self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv))
self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, rp_gen))
self.assertFalse(pt.update_inventory(cn.uuid, cn_inv,
generation=rp_gen))
# A data-grab's inventory should be "equal" to the original
cndata = pt.data(cn.uuid)
@ -479,23 +478,27 @@ class TestProviderTree(test.NoDBTestCase):
cn_inv['VCPU']['total'] = 6
self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv))
self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, rp_gen))
self.assertTrue(pt.update_inventory(cn.uuid, cn_inv,
generation=rp_gen))
# The data() result was not affected; now the tree's copy is different
self.assertTrue(pt.has_inventory_changed(cn.uuid, cndata.inventory))
self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv))
self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, rp_gen))
self.assertFalse(pt.update_inventory(cn.uuid, cn_inv,
generation=rp_gen))
# Deleting a key in the new record should NOT result in changes being
# recorded...
del cn_inv['VCPU']['allocation_ratio']
self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv))
self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, rp_gen))
self.assertFalse(pt.update_inventory(cn.uuid, cn_inv,
generation=rp_gen))
del cn_inv['MEMORY_MB']
self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv))
self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, rp_gen))
self.assertTrue(pt.update_inventory(cn.uuid, cn_inv,
generation=rp_gen))
def test_have_traits_changed_no_existing_rp(self):
pt = self._pt_with_cns()

View File

@ -232,10 +232,9 @@ class SchedulerReportClientTestCase(test.NoDBTestCase):
rp_uuid = self.client._provider_tree.new_root(
cn.hypervisor_hostname,
cn.uuid,
generation,
generation=generation,
)
self.client._provider_tree.update_inventory(rp_uuid, resources,
generation)
self.client._provider_tree.update_inventory(rp_uuid, resources)
def _validate_provider(self, name_or_uuid, **kwargs):
"""Validates existence and values of a provider in this client's
@ -1913,7 +1912,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
for status_code in (204, 404):
delete_mock.status_code = status_code
# Seed the caches
self.client._provider_tree.new_root('compute', uuids.root, 0)
self.client._provider_tree.new_root('compute', uuids.root,
generation=0)
self.client._association_refresh_time[uuids.root] = 1234
self.client._delete_provider(uuids.root, global_request_id='gri')
@ -1951,7 +1951,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.ks_adap_mock.put.return_value = resp_mock
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, 0)
self.client._provider_tree.new_root('rp', uuids.rp, generation=0)
self.assertEqual(set(),
self.client._provider_tree.data(uuids.rp).aggregates)
@ -1968,7 +1968,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
def test_set_aggregates_for_provider_fail(self):
self.ks_adap_mock.put.return_value = mock.Mock(status_code=503)
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, 0)
self.client._provider_tree.new_root('rp', uuids.rp, generation=0)
self.assertRaises(
exception.ResourceProviderUpdateFailed,
self.client.set_aggregates_for_provider,
@ -2157,7 +2157,7 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.get.return_value = get_mock
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, 0)
self.client._provider_tree.new_root('rp', uuids.rp, generation=0)
# Mock the /rp/{u}/traits PUT to succeed
put_mock = mock.Mock(status_code=200)
@ -2192,7 +2192,7 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.get.return_value = get_mock
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, 0)
self.client._provider_tree.new_root('rp', uuids.rp, generation=0)
# _ensure_traits exception bubbles up
get_mock.status_code = 400
@ -2232,7 +2232,7 @@ class TestAssociations(SchedulerReportClientTestCase):
"""Test that associations are refreshed when stale."""
uuid = uuids.compute_node
# Seed the provider tree so _refresh_associations finds the provider
self.client._provider_tree.new_root('compute', uuid, 1)
self.client._provider_tree.new_root('compute', uuid, generation=1)
mock_agg_get.return_value = set([uuids.agg1])
mock_trait_get.return_value = set(['CUSTOM_GOLD'])
self.client._refresh_associations(self.context, uuid)
@ -2262,7 +2262,7 @@ class TestAssociations(SchedulerReportClientTestCase):
"""Test refresh_sharing=False."""
uuid = uuids.compute_node
# Seed the provider tree so _refresh_associations finds the provider
self.client._provider_tree.new_root('compute', uuid, 1)
self.client._provider_tree.new_root('compute', uuid, generation=1)
mock_agg_get.return_value = set([uuids.agg1])
mock_trait_get.return_value = set(['CUSTOM_GOLD'])
self.client._refresh_associations(self.context, uuid,
@ -2313,7 +2313,7 @@ class TestAssociations(SchedulerReportClientTestCase):
"""Test that refresh associations is called when the map is stale."""
uuid = uuids.compute_node
# Seed the provider tree so _refresh_associations finds the provider
self.client._provider_tree.new_root('compute', uuid, 1)
self.client._provider_tree.new_root('compute', uuid, generation=1)
mock_agg_get.return_value = set([])
mock_trait_get.return_value = set([])
mock_shr_get.return_value = []
@ -2702,7 +2702,7 @@ class TestInventory(SchedulerReportClientTestCase):
self.client._provider_tree.new_root(
compute_node.hypervisor_hostname,
compute_node.uuid,
42,
generation=42,
)
mock_get.return_value = {
@ -2741,7 +2741,7 @@ class TestInventory(SchedulerReportClientTestCase):
self.client._provider_tree.new_root(
compute_node.hypervisor_hostname,
compute_node.uuid,
42,
generation=42,
)
mock_get.return_value = {
@ -2781,7 +2781,7 @@ class TestInventory(SchedulerReportClientTestCase):
self.client._provider_tree.new_root(
compute_node.hypervisor_hostname,
compute_node.uuid,
42,
generation=42,
)
mock_get.return_value = {
@ -2817,7 +2817,7 @@ class TestInventory(SchedulerReportClientTestCase):
self.client._provider_tree.new_root(
compute_node.hypervisor_hostname,
compute_node.uuid,
42,
generation=42,
)
mock_get.return_value = {
@ -2857,7 +2857,7 @@ class TestInventory(SchedulerReportClientTestCase):
self.client._provider_tree.new_root(
cn.hypervisor_hostname,
cn.uuid,
42,
generation=42,
)
result = self.client._update_inventory(
self.context, cn.uuid, mock.sentinel.inv_data
@ -2882,7 +2882,7 @@ class TestInventory(SchedulerReportClientTestCase):
self.client._provider_tree.new_root(
cn.hypervisor_hostname,
cn.uuid,
42,
generation=42,
)
result = self.client._update_inventory(
self.context, cn.uuid, mock.sentinel.inv_data
@ -3275,7 +3275,7 @@ class TestAllocations(SchedulerReportClientTestCase):
@mock.patch("nova.objects.InstanceList.get_by_host_and_node")
def test_delete_resource_provider_cascade(self, mock_by_host,
mock_del_alloc, mock_delete):
self.client._provider_tree.new_root(uuids.cn, uuids.cn, 1)
self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1)
cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host",
hypervisor_hostname="fake_hostname", )
inst1 = objects.Instance(uuid=uuids.inst1)
@ -3298,7 +3298,7 @@ class TestAllocations(SchedulerReportClientTestCase):
@mock.patch("nova.objects.InstanceList.get_by_host_and_node")
def test_delete_resource_provider_no_cascade(self, mock_by_host,
mock_del_alloc, mock_delete):
self.client._provider_tree.new_root(uuids.cn, uuids.cn, 1)
self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1)
self.client._association_refresh_time[uuids.cn] = mock.Mock()
cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host",
hypervisor_hostname="fake_hostname", )
@ -3320,7 +3320,7 @@ class TestAllocations(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.LOG')
def test_delete_resource_provider_log_calls(self, mock_log, mock_delete):
# First, check a successful call
self.client._provider_tree.new_root(uuids.cn, uuids.cn, 1)
self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1)
cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host",
hypervisor_hostname="fake_hostname", )
resp_mock = fake_requests.FakeResponse(204)