Remove IronicHostManager and baremetal scheduling options

The use_baremetal_filters and baremetal_enabled_filters were
deprecated in Pike: I843353427c90142a366ae9ca63ee4298b4f3ecd4

The IronicHostManager was deprecated in Queens:
Iebc74a09990dcda8cf0ee2a41f9ae1058d2a182a

Note that the NUMATopologyFilter should be OK in a mixed
VM/BM deployment because ironic compute nodes will not
report 'numa_topology' so those "hosts" will be filtered
out from requests for a specific NUMA topology or CPU
policy.

This change drops the deprecated baremetal scheduling options
and since the IronicHostManager is really only useful when
using those scheduling options, it is also removed.

Baremetal scheduling is now required to go through resource
classes.

Change-Id: I695b250c82c8dcedcd8e2bee00c56bb2df19212c
This commit is contained in:
Matt Riedemann 2018-05-02 11:20:50 -04:00
parent 3c756ac659
commit a55608897e
6 changed files with 24 additions and 842 deletions

View File

@ -20,9 +20,11 @@ scheduler_group = cfg.OptGroup(name="scheduler",
title="Scheduler configuration")
scheduler_opts = [
# TODO(mriedem): We should be able to remove this now since it's not an
# option anymore.
cfg.StrOpt("host_manager",
default="host_manager",
choices=("host_manager", "ironic_host_manager"),
choices=("host_manager",),
deprecated_name="scheduler_host_manager",
deprecated_group="DEFAULT",
help="""
@ -31,9 +33,6 @@ The scheduler host manager to use.
The host manager manages the in-memory picture of the hosts that the scheduler
uses. The options values are chosen from the entry points under the namespace
'nova.scheduler.host_manager' in 'setup.cfg'.
NOTE: The "ironic_host_manager" option is deprecated as of the 17.0.0 Queens
release.
"""),
cfg.StrOpt("driver",
default="filter_scheduler",
@ -333,73 +332,6 @@ Related options:
* All of the filters in this option *must* be present in the
'available_filters' option, or a SchedulerHostFilterNotFound
exception will be raised.
"""),
cfg.ListOpt("baremetal_enabled_filters",
default=[
"RetryFilter",
"AvailabilityZoneFilter",
"ComputeFilter",
"ComputeCapabilitiesFilter",
"ImagePropertiesFilter",
],
deprecated_name="baremetal_scheduler_default_filters",
deprecated_group="DEFAULT",
deprecated_for_removal=True,
deprecated_reason="""
These filters were used to overcome some of the baremetal scheduling
limitations in Nova prior to the use of the Placement API. Now scheduling will
use the custom resource class defined for each baremetal node to make its
selection.
""",
help="""
Filters used for filtering baremetal hosts.
Filters are applied in order, so place your most restrictive filters first to
make the filtering process more efficient.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* A list of zero or more strings, where each string corresponds to the name of
a filter to be used for selecting a baremetal host
Related options:
* If the 'scheduler_use_baremetal_filters' option is False, this option has
no effect.
"""),
cfg.BoolOpt("use_baremetal_filters",
deprecated_name="scheduler_use_baremetal_filters",
deprecated_group="DEFAULT",
# NOTE(mriedem): We likely can't remove this option until the
# IronicHostManager is removed, and we likely can't remove that
# until all filters can at least not fail on ironic nodes, like the
# NUMATopologyFilter.
deprecated_for_removal=True,
deprecated_reason="""
These filters were used to overcome some of the baremetal scheduling
limitations in Nova prior to the use of the Placement API. Now scheduling will
use the custom resource class defined for each baremetal node to make its
selection.
""",
default=False,
help="""
Enable baremetal filters.
Set this to True to tell the nova scheduler that it should use the filters
specified in the 'baremetal_enabled_filters' option. If you are not
scheduling baremetal nodes, leave this at the default setting of False.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Related options:
* If this option is set to True, then the filters specified in the
'baremetal_enabled_filters' are used instead of the filters
specified in 'enabled_filters'.
"""),
cfg.ListOpt("weight_classes",
default=["nova.scheduler.weights.all_weighers"],

View File

@ -1,140 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ironic host manager.
This host manager will consume all cpu's, disk space, and
ram from a host / node as it is supporting Baremetal hosts, which can not be
subdivided into multiple instances.
"""
from oslo_log import log as logging
from oslo_log import versionutils
import nova.conf
from nova import context as context_module
from nova import objects
from nova.objects import fields as obj_fields
from nova.scheduler import host_manager
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class IronicNodeState(host_manager.HostState):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def _update_from_compute_node(self, compute):
"""Update information about a host from a ComputeNode object."""
# NOTE(jichenjc): if the compute record is just created but not updated
# some field such as free_disk_gb can be None
if 'free_disk_gb' not in compute or compute.free_disk_gb is None:
LOG.debug('Ignoring compute node %s as its usage has not been '
'updated yet.', compute.uuid)
return
self.vcpus_total = compute.vcpus
self.vcpus_used = compute.vcpus_used
self.free_ram_mb = compute.free_ram_mb
self.total_usable_ram_mb = compute.memory_mb
self.free_disk_mb = compute.free_disk_gb * 1024
self.stats = compute.stats or {}
self.uuid = compute.uuid
self.total_usable_disk_gb = compute.local_gb
self.hypervisor_type = compute.hypervisor_type
self.hypervisor_version = compute.hypervisor_version
self.hypervisor_hostname = compute.hypervisor_hostname
self.cpu_info = compute.cpu_info
if compute.supported_hv_specs:
self.supported_instances = [spec.to_list() for spec
in compute.supported_hv_specs]
else:
self.supported_instances = []
# update allocation ratios given by the ComputeNode object
self.cpu_allocation_ratio = compute.cpu_allocation_ratio
self.ram_allocation_ratio = compute.ram_allocation_ratio
self.disk_allocation_ratio = compute.disk_allocation_ratio
self.updated = compute.updated_at
def _locked_consume_from_request(self, spec_obj):
"""Consume nodes entire resources regardless of instance request."""
self.free_ram_mb = 0
self.free_disk_mb = 0
self.vcpus_used = self.vcpus_total
class IronicHostManager(host_manager.HostManager):
"""Ironic HostManager class."""
def __init__(self):
super(IronicHostManager, self).__init__()
msg = ('The IronicHostManager is deprecated and may be removed as '
'early as the 18.0.0 Rocky release. Deployments need to '
'schedule based on resource classes.')
versionutils.report_deprecated_feature(LOG, msg)
@staticmethod
def _is_ironic_compute(compute):
ht = compute.hypervisor_type if 'hypervisor_type' in compute else None
return ht == obj_fields.HVType.IRONIC
def _load_filters(self):
if CONF.filter_scheduler.use_baremetal_filters:
return CONF.filter_scheduler.baremetal_enabled_filters
return super(IronicHostManager, self)._load_filters()
def host_state_cls(self, host, node, cell, **kwargs):
"""Factory function/property to create a new HostState."""
compute = kwargs.get('compute')
if compute and self._is_ironic_compute(compute):
return IronicNodeState(host, node, cell)
else:
return host_manager.HostState(host, node, cell)
def _init_instance_info(self, computes_by_cell=None):
"""Ironic hosts should not pass instance info."""
context = context_module.RequestContext()
if not computes_by_cell:
computes_by_cell = {}
for cell in self.cells:
with context_module.target_cell(context, cell) as cctxt:
computes_by_cell[cell] = (
objects.ComputeNodeList.get_all(cctxt).objects)
non_ironic_computes = {cell: [c for c in compute_nodes
if not self._is_ironic_compute(c)]
for cell, compute_nodes in
computes_by_cell.items()}
super(IronicHostManager, self)._init_instance_info(non_ironic_computes)
def _get_instance_info(self, context, compute):
"""Ironic hosts should not pass instance info."""
if compute and self._is_ironic_compute(compute):
return {}
else:
return super(IronicHostManager, self)._get_instance_info(context,
compute)

View File

@ -1,618 +0,0 @@
# Copyright (c) 2014 OpenStack Foundation
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For IronicHostManager
"""
import mock
from nova import context
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.scheduler import filters
from nova.scheduler import host_manager
from nova.scheduler import ironic_host_manager
from nova import test
from nova.tests.unit.scheduler import fakes
from nova.tests.unit.scheduler import ironic_fakes
from nova.tests import uuidsentinel as uuids
class FakeFilterClass1(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class FakeFilterClass2(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class IronicHostManagerTestCase(test.NoDBTestCase):
"""Test case for IronicHostManager class."""
@mock.patch.object(ironic_host_manager.IronicHostManager,
'_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def setUp(self, mock_init_agg, mock_init_inst):
super(IronicHostManagerTestCase, self).setUp()
self.host_manager = ironic_host_manager.IronicHostManager()
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def test_manager_public_api_signatures(self, mock_init_aggs,
mock_init_inst):
self.assertPublicAPISignatures(host_manager.HostManager(),
self.host_manager)
def test_state_public_api_signatures(self):
self.assertPublicAPISignatures(
host_manager.HostState("dummy",
"dummy",
uuids.cell),
ironic_host_manager.IronicNodeState("dummy",
"dummy",
uuids.cell)
)
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_by_host')
def test_get_all_host_states(self, mock_get_by_host, mock_get_all,
mock_get_by_binary):
mock_get_all.return_value = ironic_fakes.COMPUTE_NODES
mock_get_by_binary.return_value = ironic_fakes.SERVICES
context = 'fake_context'
hosts = self.host_manager.get_all_host_states(context)
self.assertEqual(0, mock_get_by_host.call_count)
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
hosts}
self.assertEqual(len(host_states_map), 4)
for i in range(4):
compute_node = ironic_fakes.COMPUTE_NODES[i]
host = compute_node.host
node = compute_node.hypervisor_hostname
state_key = (host, node)
self.assertEqual(host_states_map[state_key].service,
obj_base.obj_to_primitive(
ironic_fakes.get_service_by_host(host)))
self.assertEqual(compute_node.stats,
host_states_map[state_key].stats)
self.assertEqual(compute_node.free_ram_mb,
host_states_map[state_key].free_ram_mb)
self.assertEqual(compute_node.free_disk_gb * 1024,
host_states_map[state_key].free_disk_mb)
def test_is_ironic_compute(self):
ironic = ironic_fakes.COMPUTE_NODES[0]
self.assertTrue(self.host_manager._is_ironic_compute(ironic))
non_ironic = fakes.COMPUTE_NODES[0]
self.assertFalse(self.host_manager._is_ironic_compute(non_ironic))
@mock.patch.object(host_manager.HostManager, '_get_instance_info')
def test_get_instance_info_ironic_compute_return_empty_instance_dict(self,
mock_get_instance_info):
compute_node = ironic_fakes.COMPUTE_NODES[0]
rv = self.host_manager._get_instance_info('fake_context', compute_node)
# for ironic compute nodes we always return an empty dict
self.assertEqual({}, rv)
# base class implementation is overridden and not called
self.assertFalse(mock_get_instance_info.called)
@mock.patch.object(host_manager.HostManager, '_get_instance_info')
def test_get_instance_info_non_ironic_compute_call_super_class(self,
mock_get_instance_info):
expected_rv = {uuids.fake_instance_uuid: objects.Instance()}
mock_get_instance_info.return_value = expected_rv
compute_node = fakes.COMPUTE_NODES[0]
rv = self.host_manager._get_instance_info('fake_context', compute_node)
# for a non-ironic compute we call the base class implementation
mock_get_instance_info.assert_called_once_with('fake_context',
compute_node)
# we return exactly what the base class implementation returned
self.assertIs(expected_rv, rv)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(objects.ComputeNodeList, 'get_all')
def test_init_instance_info(self, mock_get_all,
mock_base_init_instance_info):
cn1 = objects.ComputeNode(**{'hypervisor_type': 'ironic'})
cn2 = objects.ComputeNode(**{'hypervisor_type': 'qemu'})
cn3 = objects.ComputeNode(**{'hypervisor_type': 'qemu'})
cell = objects.CellMappingList.get_all(context.get_admin_context())[0]
self.host_manager.cells = [cell]
mock_get_all.return_value.objects = [cn1, cn2, cn3]
self.host_manager._init_instance_info()
# ensure we filter out ironic nodes before calling the base class impl
mock_base_init_instance_info.assert_called_once_with(
{cell: [cn2, cn3]})
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(objects.ComputeNodeList, 'get_all')
def test_init_instance_info_compute_nodes(self, mock_get_all,
mock_base_init_instance_info):
cn1 = objects.ComputeNode(**{'hypervisor_type': 'ironic'})
cn2 = objects.ComputeNode(**{'hypervisor_type': 'qemu'})
cell = objects.CellMapping()
self.host_manager._init_instance_info(computes_by_cell={
cell: [cn1, cn2]})
# check we don't try to get nodes list if it was passed explicitly
self.assertFalse(mock_get_all.called)
# ensure we filter out ironic nodes before calling the base class impl
mock_base_init_instance_info.assert_called_once_with({cell: [cn2]})
class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
"""Test case for IronicHostManager class."""
@mock.patch.object(ironic_host_manager.IronicHostManager,
'_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def setUp(self, mock_init_agg, mock_init_inst):
super(IronicHostManagerChangedNodesTestCase, self).setUp()
self.host_manager = ironic_host_manager.IronicHostManager()
ironic_driver = "nova.virt.ironic.driver.IronicDriver"
supported_instances = [
objects.HVSpec.from_list(["i386", "baremetal", "baremetal"])]
self.compute_node = objects.ComputeNode(
id=1, local_gb=10, memory_mb=1024, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
stats=dict(
ironic_driver=ironic_driver,
cpu_arch='i386'),
supported_hv_specs=supported_instances,
free_disk_gb=10, free_ram_mb=1024,
hypervisor_type='ironic',
hypervisor_version=1,
hypervisor_hostname='fake_host',
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
uuid=uuids.compute_node_uuid)
@mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
def test_create_ironic_node_state(self, init_mock):
init_mock.return_value = None
compute = objects.ComputeNode(**{'hypervisor_type': 'ironic'})
host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
uuids.cell,
compute=compute)
self.assertIs(ironic_host_manager.IronicNodeState, type(host_state))
@mock.patch.object(host_manager.HostState, '__init__')
def test_create_non_ironic_host_state(self, init_mock):
init_mock.return_value = None
compute = objects.ComputeNode(**{'cpu_info': 'other cpu'})
host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
uuids.cell,
compute=compute)
self.assertIs(host_manager.HostState, type(host_state))
@mock.patch.object(host_manager.HostState, '__init__')
def test_create_host_state_null_compute(self, init_mock):
init_mock.return_value = None
host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
uuids.cell)
self.assertIs(host_manager.HostState, type(host_state))
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
def test_get_all_host_states_after_delete_one(self, mock_get_all,
mock_get_by_binary):
getter = (lambda n: n.hypervisor_hostname
if 'hypervisor_hostname' in n else None)
running_nodes = [n for n in ironic_fakes.COMPUTE_NODES
if getter(n) != 'node4uuid']
mock_get_all.side_effect = [
ironic_fakes.COMPUTE_NODES, running_nodes]
mock_get_by_binary.side_effect = [
ironic_fakes.SERVICES, ironic_fakes.SERVICES]
context = 'fake_context'
# first call: all nodes
hosts = self.host_manager.get_all_host_states(context)
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
hosts}
self.assertEqual(4, len(host_states_map))
# second call: just running nodes
hosts = self.host_manager.get_all_host_states(context)
host_states_map = {(state.host, state.nodename): state for state in
hosts}
self.assertEqual(3, len(host_states_map))
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
def test_get_all_host_states_after_delete_all(self, mock_get_all,
mock_get_by_binary):
mock_get_all.side_effect = [
ironic_fakes.COMPUTE_NODES, []]
mock_get_by_binary.side_effect = [
ironic_fakes.SERVICES, ironic_fakes.SERVICES]
context = 'fake_context'
# first call: all nodes
hosts = self.host_manager.get_all_host_states(context)
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
hosts}
self.assertEqual(len(host_states_map), 4)
# second call: no nodes
self.host_manager.get_all_host_states(context)
host_states_map = {(state.host, state.nodename): state for state in
hosts}
self.assertEqual(len(host_states_map), 0)
def test_update_from_compute_node(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode",
uuids.cell)
host.update(compute=self.compute_node)
self.assertEqual(1024, host.free_ram_mb)
self.assertEqual(1024, host.total_usable_ram_mb)
self.assertEqual(10240, host.free_disk_mb)
self.assertEqual(1, host.vcpus_total)
self.assertEqual(0, host.vcpus_used)
self.assertEqual(self.compute_node.stats, host.stats)
self.assertEqual('ironic', host.hypervisor_type)
self.assertEqual(1, host.hypervisor_version)
self.assertEqual('fake_host', host.hypervisor_hostname)
# Make sure the uuid is set since that's needed for the allocation
# requests (claims to Placement) made in the FilterScheduler.
self.assertEqual(self.compute_node.uuid, host.uuid)
def test_update_from_compute_node_not_ready(self):
"""Tests that we ignore a compute node that does not have its
free_disk_gb field set yet from the compute resource tracker.
"""
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode",
uuids.cell)
self.compute_node.free_disk_gb = None
host.update(compute=self.compute_node)
self.assertEqual(0, host.free_disk_mb)
def test_consume_identical_instance_from_compute(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode",
uuids.cell)
host.update(compute=self.compute_node)
self.assertIsNone(host.updated)
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(root_gb=10, ephemeral_gb=0, memory_mb=1024,
vcpus=1),
uuid=uuids.instance)
host.consume_from_request(spec_obj)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
self.assertEqual(0, host.free_disk_mb)
self.assertIsNotNone(host.updated)
def test_consume_larger_instance_from_compute(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode",
uuids.cell)
host.update(compute=self.compute_node)
self.assertIsNone(host.updated)
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(root_gb=20, ephemeral_gb=0, memory_mb=2048,
vcpus=2))
host.consume_from_request(spec_obj)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
self.assertEqual(0, host.free_disk_mb)
self.assertIsNotNone(host.updated)
def test_consume_smaller_instance_from_compute(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode",
uuids.cell)
host.update(compute=self.compute_node)
self.assertIsNone(host.updated)
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(root_gb=5, ephemeral_gb=0, memory_mb=512,
vcpus=1))
host.consume_from_request(spec_obj)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
self.assertEqual(0, host.free_disk_mb)
self.assertIsNotNone(host.updated)
class IronicHostManagerTestFilters(test.NoDBTestCase):
"""Test filters work for IronicHostManager."""
@mock.patch.object(ironic_host_manager.IronicHostManager,
'_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def setUp(self, mock_init_agg, mock_init_inst):
super(IronicHostManagerTestFilters, self).setUp()
self.flags(available_filters=[
__name__ + '.FakeFilterClass1', __name__ + '.FakeFilterClass2'],
group='filter_scheduler')
self.flags(enabled_filters=['FakeFilterClass1'],
group='filter_scheduler')
self.flags(baremetal_enabled_filters=['FakeFilterClass2'],
group='filter_scheduler')
self.host_manager = ironic_host_manager.IronicHostManager()
cell = uuids.cell
self.fake_hosts = [ironic_host_manager.IronicNodeState(
'fake_host%s' % x, 'fake-node', cell) for x in range(1, 5)]
self.fake_hosts += [ironic_host_manager.IronicNodeState(
'fake_multihost', 'fake-node%s' % x, cell)
for x in range(1, 5)]
def test_enabled_filters(self):
enabled_filters = self.host_manager.enabled_filters
self.assertEqual(1, len(enabled_filters))
self.assertIsInstance(enabled_filters[0], FakeFilterClass1)
def test_choose_host_filters_not_found(self):
self.assertRaises(exception.SchedulerHostFilterNotFound,
self.host_manager._choose_host_filters,
'FakeFilterClass3')
def test_choose_host_filters(self):
# Test we return 1 correct filter object
host_filters = self.host_manager._choose_host_filters(
['FakeFilterClass2'])
self.assertEqual(1, len(host_filters))
self.assertIsInstance(host_filters[0], FakeFilterClass2)
def test_host_manager_enabled_filters(self):
enabled_filters = self.host_manager.enabled_filters
self.assertEqual(1, len(enabled_filters))
self.assertIsInstance(enabled_filters[0], FakeFilterClass1)
@mock.patch.object(ironic_host_manager.IronicHostManager,
'_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def test_host_manager_enabled_filters_uses_baremetal(self, mock_init_agg,
mock_init_inst):
self.flags(use_baremetal_filters=True, group='filter_scheduler')
host_manager = ironic_host_manager.IronicHostManager()
# ensure the defaults come from scheduler.baremetal_enabled_filters
# and not enabled_filters
enabled_filters = host_manager.enabled_filters
self.assertEqual(1, len(enabled_filters))
self.assertIsInstance(enabled_filters[0], FakeFilterClass2)
def test_load_filters(self):
# without scheduler.use_baremetal_filters
filters = self.host_manager._load_filters()
self.assertEqual(['FakeFilterClass1'], filters)
def test_load_filters_baremetal(self):
# with scheduler.use_baremetal_filters
self.flags(use_baremetal_filters=True, group='filter_scheduler')
filters = self.host_manager._load_filters()
self.assertEqual(['FakeFilterClass2'], filters)
def _mock_get_filtered_hosts(self, info):
info['got_objs'] = []
info['got_fprops'] = []
def fake_filter_one(_self, obj, filter_props):
info['got_objs'].append(obj)
info['got_fprops'].append(filter_props)
return True
self.stub_out(__name__ + '.FakeFilterClass1._filter_one',
fake_filter_one)
def _verify_result(self, info, result, filters=True):
for x in info['got_fprops']:
self.assertEqual(x, info['expected_fprops'])
if filters:
self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
self.assertEqual(set(info['expected_objs']), set(result))
def test_get_filtered_hosts(self):
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=[],
force_nodes=[])
info = {'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_ignore(self):
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=['fake_host1', 'fake_host3',
'fake_host5', 'fake_multihost'],
force_hosts=[],
force_nodes=[])
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_force_hosts(self):
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake_host1', 'fake_host3', 'fake_host5'],
force_nodes=[])
# [0] and [2] are host1 and host3
info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_no_matching_force_hosts(self):
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake_host5', 'fake_host6'],
force_nodes=[])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
# Ensure ignore_hosts processed before force_hosts in host filters.
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=['fake_host1'],
force_hosts=['fake_host3', 'fake_host1'],
force_nodes=[])
# only fake_host3 should be left.
info = {'expected_objs': [self.fake_hosts[2]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
# Ensure all nodes returned for a host with many nodes
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake_multihost'],
force_nodes=[])
info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
self.fake_hosts[6], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_nodes(self):
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=[],
force_nodes=['fake-node2', 'fake-node4', 'fake-node9'])
# [5] is fake-node2, [7] is fake-node4
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
# Ensure only overlapping results if both force host and node
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake_host1', 'fake_multihost'],
force_nodes=['fake-node2', 'fake-node9'])
# [5] is fake-node2
info = {'expected_objs': [self.fake_hosts[5]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
# Ensure non-overlapping force_node and force_host yield no result
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake_multihost'],
force_nodes=['fake-node'])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
# Ensure ignore_hosts can coexist with force_nodes
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=['fake_host1', 'fake_host2'],
force_hosts=[],
force_nodes=['fake-node4', 'fake-node2'])
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
# Ensure ignore_hosts is processed before force_nodes
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=['fake_multihost'],
force_hosts=[],
force_nodes=['fake_node4', 'fake_node2'])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)

View File

@ -26,7 +26,6 @@ from nova.scheduler import caching_scheduler
from nova.scheduler import chance
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
from nova.scheduler import ironic_host_manager
from nova.scheduler import manager
from nova import servicegroup
from nova import test
@ -367,16 +366,6 @@ class SchedulerInitTestCase(test.NoDBTestCase):
manager = self.driver_cls().host_manager
self.assertIsInstance(manager, host_manager.HostManager)
@mock.patch.object(ironic_host_manager.IronicHostManager,
'_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def test_init_using_ironic_hostmanager(self,
mock_init_agg,
mock_init_inst):
self.flags(host_manager='ironic_host_manager', group='scheduler')
manager = self.driver_cls().host_manager
self.assertIsInstance(manager, ironic_host_manager.IronicHostManager)
class SchedulerTestCase(test.NoDBTestCase):
"""Test case for base scheduler driver class."""

View File

@ -0,0 +1,21 @@
---
upgrade:
- |
The ``[filter_scheduler]/use_baremetal_filters`` and
``[filter_scheduler]/baremetal_enabled_filters`` configuration options
were deprecated in the 16.0.0 Pike release since deployments serving
baremetal instances should be `scheduling based on resource classes`_.
Those options have now been removed.
Similarly, the ``ironic_host_manager`` choice for the
``[scheduler]/host_manager`` configuration option was deprecated in the
17.0.0 Queens release because ``ironic_host_manager`` is only useful when
using ``use_baremetal_filters=True`` and ``baremetal_enabled_filters``.
Now that those options are gone, the deprecated ``ironic_host_manager``
host manager choice has also been removed.
Remember to run ``nova-status upgrade check`` before upgrading to
18.0.0 Rocky to ensure baremetal instances have had their embedded
flavor migrated to use the corresponding ironic node custom resource class.
.. _scheduling based on resource classes: https://docs.openstack.org/ironic/latest/install/configure-nova-flavors.html#scheduling-based-on-resource-classes

View File

@ -83,8 +83,6 @@ nova.ipv6_backend =
nova.scheduler.host_manager =
host_manager = nova.scheduler.host_manager:HostManager
# Deprecated starting from the 17.0.0 Queens release.
ironic_host_manager = nova.scheduler.ironic_host_manager:IronicHostManager
nova.scheduler.driver =
filter_scheduler = nova.scheduler.filter_scheduler:FilterScheduler