Merge "Fix an error in _get_host_states when deleting a compute node" into stable/ocata

This commit is contained in:
Zuul 2018-01-13 00:54:09 +00:00 committed by Gerrit Code Review
commit 6b5d3c0b97
3 changed files with 18 additions and 5 deletions

View File

@ -97,7 +97,7 @@ class FilterScheduler(driver.Scheduler):
# host, we virtually consume resources on it so subsequent
# selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# Note: remember, we are using a generator-iterator here. So only
# traverse this list once. This can bite you if the hosts
# are being scanned in a filter or weighing function.
hosts = self._get_all_host_states(elevated, spec_obj)

View File

@ -587,7 +587,7 @@ class HostManager(object):
return self._get_host_states(context, compute_nodes)
def get_all_host_states(self, context):
"""Returns a list of HostStates that represents all the hosts
"""Returns a generator of HostStates that represents all the hosts
the HostManager knows about. Also, each of the consumable resources
in HostState are pre-populated and adjusted based on data in the db.
"""
@ -595,7 +595,7 @@ class HostManager(object):
return self._get_host_states(context, compute_nodes)
def _get_host_states(self, context, compute_nodes):
"""Returns a tuple of HostStates given a list of computes.
"""Returns a generator over HostStates given a list of computes.
Also updates the HostStates internal mapping for the HostManager.
"""
@ -637,7 +637,13 @@ class HostManager(object):
"from scheduler"), {'host': host, 'node': node})
del self.host_state_map[state_key]
return (self.host_state_map[host] for host in seen_nodes)
# NOTE(mriedem): We are returning a generator, which means the global
# host_state_map could change due to a concurrent scheduling request
# where a compute node is now considered 'dead' and is removed from
# the host_state_map, so we have to be sure to check that the next
# seen_node is still in the map before returning it.
return (self.host_state_map[host] for host in seen_nodes
if host in self.host_state_map)
def _get_aggregates_info(self, host):
return [self.aggs_by_id[agg_id] for agg_id in

View File

@ -900,7 +900,10 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
context = 'fake_context'
# first call: all nodes
self.host_manager.get_all_host_states(context)
hosts = self.host_manager.get_all_host_states(context)
# get_all_host_states returns a generator so convert the values into
# an iterator
host_states1 = iter(hosts)
host_states_map = self.host_manager.host_state_map
self.assertEqual(len(host_states_map), 4)
@ -908,6 +911,10 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(len(host_states_map), 3)
# Fake a concurrent request that is still processing the first result
# to make sure we properly handle that node4 is no longer in
# host_state_map.
list(host_states1)
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')