ironic_host_manager: fix population of instances info on schedule
IronicHostManager currently overrides the _get_instance_info() method
of the base class and unconditionally returns an empty dict of
instances for a given compute node.
The problem with that is that in a heterogeneous cloud with both
libvirt and ironic compute nodes this will always return {} for the
former too, which is incorrect and will effectively break instance
affinity filters like DifferentHostFilter or SameHostFilter, that
check set intersections of instances running on a particular host and
the ones passed as a hint for nova-scheduler in a boot request.
IronicHostManager should use the method implementation of the base
class for non-ironic compute nodes.
This is a partial fix which only modifies _get_instance_info() called
down the select_destinations() stack. A following change will modify
_init_instance_info() that pre-populates node instances info on start
of a nova-scheduler process.
Partial-Bug: #1606496
(cherry-picked from af218caba4
)
Change-Id: Ib1ddb44d71f7b085512c1f3fc0544f7b00c754fe
This commit is contained in:
parent
c8b8365d31
commit
6f1151378b
|
@ -73,6 +73,11 @@ class IronicNodeState(host_manager.HostState):
|
|||
class IronicHostManager(host_manager.HostManager):
|
||||
"""Ironic HostManager class."""
|
||||
|
||||
@staticmethod
|
||||
def _is_ironic_compute(compute):
|
||||
ht = compute.hypervisor_type if 'hypervisor_type' in compute else None
|
||||
return ht == hv_type.IRONIC
|
||||
|
||||
def _load_filters(self):
|
||||
if CONF.scheduler_use_baremetal_filters:
|
||||
return CONF.baremetal_scheduler_default_filters
|
||||
|
@ -81,7 +86,7 @@ class IronicHostManager(host_manager.HostManager):
|
|||
def host_state_cls(self, host, node, **kwargs):
|
||||
"""Factory function/property to create a new HostState."""
|
||||
compute = kwargs.get('compute')
|
||||
if compute and compute.get('hypervisor_type') == hv_type.IRONIC:
|
||||
if compute and self._is_ironic_compute(compute):
|
||||
return IronicNodeState(host, node)
|
||||
else:
|
||||
return host_manager.HostState(host, node)
|
||||
|
@ -92,4 +97,9 @@ class IronicHostManager(host_manager.HostManager):
|
|||
|
||||
def _get_instance_info(self, context, compute):
|
||||
"""Ironic hosts should not pass instance info."""
|
||||
return {}
|
||||
|
||||
if compute and self._is_ironic_compute(compute):
|
||||
return {}
|
||||
else:
|
||||
return super(IronicHostManager, self)._get_instance_info(context,
|
||||
compute)
|
||||
|
|
|
@ -26,6 +26,7 @@ from nova.scheduler import filters
|
|||
from nova.scheduler import host_manager
|
||||
from nova.scheduler import ironic_host_manager
|
||||
from nova import test
|
||||
from nova.tests.unit.scheduler import fakes
|
||||
from nova.tests.unit.scheduler import ironic_fakes
|
||||
|
||||
|
||||
|
@ -92,6 +93,40 @@ class IronicHostManagerTestCase(test.NoDBTestCase):
|
|||
self.assertEqual(compute_node.free_disk_gb * 1024,
|
||||
host_states_map[state_key].free_disk_mb)
|
||||
|
||||
def test_is_ironic_compute(self):
|
||||
ironic = ironic_fakes.COMPUTE_NODES[0]
|
||||
self.assertTrue(self.host_manager._is_ironic_compute(ironic))
|
||||
|
||||
non_ironic = fakes.COMPUTE_NODES[0]
|
||||
self.assertFalse(self.host_manager._is_ironic_compute(non_ironic))
|
||||
|
||||
@mock.patch.object(host_manager.HostManager, '_get_instance_info')
|
||||
def test_get_instance_info_ironic_compute_return_empty_instance_dict(self,
|
||||
mock_get_instance_info):
|
||||
compute_node = ironic_fakes.COMPUTE_NODES[0]
|
||||
|
||||
rv = self.host_manager._get_instance_info('fake_context', compute_node)
|
||||
|
||||
# for ironic compute nodes we always return an empty dict
|
||||
self.assertEqual({}, rv)
|
||||
# base class implementation is overriden and not called
|
||||
self.assertFalse(mock_get_instance_info.called)
|
||||
|
||||
@mock.patch.object(host_manager.HostManager, '_get_instance_info')
|
||||
def test_get_instance_info_non_ironic_compute_call_super_class(self,
|
||||
mock_get_instance_info):
|
||||
expected_rv = {'fake-uuid': objects.Instance()}
|
||||
mock_get_instance_info.return_value = expected_rv
|
||||
compute_node = fakes.COMPUTE_NODES[0]
|
||||
|
||||
rv = self.host_manager._get_instance_info('fake_context', compute_node)
|
||||
|
||||
# for a non-ironic compute we call the base class implementation
|
||||
mock_get_instance_info.assert_called_once_with('fake_context',
|
||||
compute_node)
|
||||
# we return exactly what the base class implementation returned
|
||||
self.assertIs(expected_rv, rv)
|
||||
|
||||
|
||||
class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
|
||||
"""Test case for IronicHostManager class."""
|
||||
|
@ -122,7 +157,7 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
|
|||
@mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
|
||||
def test_create_ironic_node_state(self, init_mock):
|
||||
init_mock.return_value = None
|
||||
compute = {'hypervisor_type': 'ironic'}
|
||||
compute = objects.ComputeNode(hypervisor_type='ironic')
|
||||
host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
|
||||
compute=compute)
|
||||
self.assertIs(ironic_host_manager.IronicNodeState, type(host_state))
|
||||
|
|
Loading…
Reference in New Issue