diff --git a/nova_solverscheduler/scheduler/solver_scheduler.py b/nova_solverscheduler/scheduler/solver_scheduler.py index e0cd714..a583344 100644 --- a/nova_solverscheduler/scheduler/solver_scheduler.py +++ b/nova_solverscheduler/scheduler/solver_scheduler.py @@ -20,11 +20,11 @@ cost metrics. The solution is designed to work with pluggable solvers. A default solver implementation that uses PULP is included. """ -from oslo.config import cfg +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import importutils -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import importutils -from nova.openstack.common import log as logging +from nova.i18n import _ from nova.scheduler import filter_scheduler from nova.scheduler import weights @@ -52,27 +52,51 @@ class ConstraintSolverScheduler(filter_scheduler.FilterScheduler): self.hosts_solver = importutils.import_object( CONF.solver_scheduler.scheduler_host_solver) - def _schedule(self, context, request_spec, filter_properties, - instance_uuids=None): + def _setup_instance_group(self, context, filter_properties): + """Update filter_properties with server group info. + + :returns: True if filter_properties has been updated, False if not. + """ + scheduler_hints = filter_properties.get('scheduler_hints') or {} + group_hint = scheduler_hints.get('group', None) + if not group_hint: + return False + + group = objects.InstanceGroup.get_by_hint(context, group_hint) + policies = set(('anti-affinity', 'affinity')) + if not any((policy in policies) for policy in group.policies): + return False + + if ('affinity' in group.policies and + not self._supports_affinity): + msg = _("ServerGroupAffinityConstraint not configured") + LOG.error(msg) + raise exception.NoValidHost(reason=msg) + if ('anti-affinity' in group.policies and + not self._supports_anti_affinity): + msg = _("ServerGroupAntiAffinityConstraint not configured") + LOG.error(msg) + raise exception.NoValidHost(reason=msg) + + filter_properties.setdefault('group_hosts', set()) + user_hosts = set(filter_properties['group_hosts']) + group_hosts = set(group.get_hosts(context)) + filter_properties['group_hosts'] = user_hosts | group_hosts + filter_properties['group_policies'] = group.policies + + return True + + def _schedule(self, context, request_spec, filter_properties): """Returns a list of hosts that meet the required specs, ordered by their fitness. """ - instance_properties = request_spec['instance_properties'] instance_type = request_spec.get("instance_type", None) + instance_uuids = request_spec.get("instance_uuids", None) - update_group_hosts = self._setup_instance_group(context, - filter_properties) + self._setup_instance_group(context, filter_properties) config_options = self._get_configuration_options() - # check retry policy. Rather ugly use of instance_uuids[0]... - # but if we've exceeded max retries... then we really only - # have a single instance. - properties = instance_properties.copy() - if instance_uuids: - properties['uuid'] = instance_uuids[0] - self._populate_retry(filter_properties, properties) - if instance_uuids: num_instances = len(instance_uuids) else: diff --git a/nova_solverscheduler/scheduler/solver_scheduler_host_manager.py b/nova_solverscheduler/scheduler/solver_scheduler_host_manager.py index 2764e06..d1dc5e8 100644 --- a/nova_solverscheduler/scheduler/solver_scheduler_host_manager.py +++ b/nova_solverscheduler/scheduler/solver_scheduler_host_manager.py @@ -17,10 +17,10 @@ Manage hosts in the current zone. """ -from oslo.config import cfg +from oslo_config import cfg +from oslo_log import log as logging -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import log as logging +from nova.i18n import _ from nova.scheduler import host_manager @@ -82,7 +82,7 @@ class SolverSchedulerHostManager(host_manager.HostManager): ignored_hosts.append(host) ignored_hosts_str = ', '.join(ignored_hosts) msg = _('Host filter ignoring hosts: %s') - LOG.audit(msg % ignored_hosts_str) + LOG.info(msg % ignored_hosts_str) def _match_forced_hosts(host_map, hosts_to_force): forced_hosts = [] @@ -98,7 +98,7 @@ class SolverSchedulerHostManager(host_manager.HostManager): forced_hosts_str = ', '.join(hosts_to_force) msg = _("No hosts matched due to not matching " "'force_hosts' value of '%s'") - LOG.audit(msg % forced_hosts_str) + LOG.info(msg % forced_hosts_str) def _match_forced_nodes(host_map, nodes_to_force): forced_nodes = [] @@ -114,7 +114,7 @@ class SolverSchedulerHostManager(host_manager.HostManager): forced_nodes_str = ', '.join(nodes_to_force) msg = _("No nodes matched due to not matching " "'force_nodes' value of '%s'") - LOG.audit(msg % forced_nodes_str) + LOG.info(msg % forced_nodes_str) ignore_hosts = filter_properties.get('ignore_hosts', []) force_hosts = filter_properties.get('force_hosts', []) diff --git a/nova_solverscheduler/scheduler/solvers/__init__.py b/nova_solverscheduler/scheduler/solvers/__init__.py index b2069fe..2890886 100644 --- a/nova_solverscheduler/scheduler/solvers/__init__.py +++ b/nova_solverscheduler/scheduler/solvers/__init__.py @@ -17,7 +17,7 @@ Scheduler host constraint solvers """ -from oslo.config import cfg +from oslo_config import cfg from nova_solverscheduler.scheduler.solvers import constraints from nova_solverscheduler.scheduler.solvers import costs diff --git a/nova_solverscheduler/scheduler/solvers/constraints/aggregate_disk.py b/nova_solverscheduler/scheduler/solvers/constraints/aggregate_disk.py index 6770355..5b34b1d 100644 --- a/nova_solverscheduler/scheduler/solvers/constraints/aggregate_disk.py +++ b/nova_solverscheduler/scheduler/solvers/constraints/aggregate_disk.py @@ -13,11 +13,11 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo.config import cfg +from oslo_config import cfg +from oslo_log import log as logging from nova import db -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import log as logging +from nova.i18n import _ from nova_solverscheduler.scheduler.solvers.constraints import disk_constraint CONF = cfg.CONF diff --git a/nova_solverscheduler/scheduler/solvers/constraints/aggregate_ram.py b/nova_solverscheduler/scheduler/solvers/constraints/aggregate_ram.py index dfc9509..9c95e7e 100644 --- a/nova_solverscheduler/scheduler/solvers/constraints/aggregate_ram.py +++ b/nova_solverscheduler/scheduler/solvers/constraints/aggregate_ram.py @@ -13,11 +13,11 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo.config import cfg +from oslo_config import cfg +from oslo_log import log as logging from nova import db -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import log as logging +from nova.i18n import _ from nova_solverscheduler.scheduler.solvers.constraints import ram_constraint CONF = cfg.CONF diff --git a/nova_solverscheduler/scheduler/solvers/constraints/aggregate_vcpu.py b/nova_solverscheduler/scheduler/solvers/constraints/aggregate_vcpu.py index 7cc55c9..492ca65 100644 --- a/nova_solverscheduler/scheduler/solvers/constraints/aggregate_vcpu.py +++ b/nova_solverscheduler/scheduler/solvers/constraints/aggregate_vcpu.py @@ -13,11 +13,11 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo.config import cfg +from oslo_config import cfg +from oslo_log import log as logging from nova import db -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import log as logging +from nova.i18n import _ from nova_solverscheduler.scheduler.solvers.constraints import vcpu_constraint CONF = cfg.CONF diff --git a/nova_solverscheduler/scheduler/solvers/constraints/disk_constraint.py b/nova_solverscheduler/scheduler/solvers/constraints/disk_constraint.py index 8240935..a1469b7 100644 --- a/nova_solverscheduler/scheduler/solvers/constraints/disk_constraint.py +++ b/nova_solverscheduler/scheduler/solvers/constraints/disk_constraint.py @@ -14,10 +14,10 @@ # under the License. -from oslo.config import cfg +from oslo_config import cfg +from oslo_log import log as logging -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import log as logging +from nova.i18n import _ from nova_solverscheduler.scheduler.solvers import constraints CONF = cfg.CONF diff --git a/nova_solverscheduler/scheduler/solvers/constraints/io_ops_constraint.py b/nova_solverscheduler/scheduler/solvers/constraints/io_ops_constraint.py index 13b4170..a8d8e4a 100644 --- a/nova_solverscheduler/scheduler/solvers/constraints/io_ops_constraint.py +++ b/nova_solverscheduler/scheduler/solvers/constraints/io_ops_constraint.py @@ -13,10 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo.config import cfg +from oslo_config import cfg +from oslo_log import log as logging -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import log as logging +from nova.i18n import _ from nova_solverscheduler.scheduler.solvers import constraints LOG = logging.getLogger(__name__) diff --git a/nova_solverscheduler/scheduler/solvers/constraints/num_instances_constraint.py b/nova_solverscheduler/scheduler/solvers/constraints/num_instances_constraint.py index d9b56dd..afdb85e 100644 --- a/nova_solverscheduler/scheduler/solvers/constraints/num_instances_constraint.py +++ b/nova_solverscheduler/scheduler/solvers/constraints/num_instances_constraint.py @@ -13,10 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo.config import cfg +from oslo_config import cfg +from oslo_log import log as logging -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import log as logging +from nova.i18n import _ from nova_solverscheduler.scheduler.solvers import constraints CONF = cfg.CONF diff --git a/nova_solverscheduler/scheduler/solvers/constraints/pci_passthrough_constraint.py b/nova_solverscheduler/scheduler/solvers/constraints/pci_passthrough_constraint.py index a6e6b90..64c181c 100644 --- a/nova_solverscheduler/scheduler/solvers/constraints/pci_passthrough_constraint.py +++ b/nova_solverscheduler/scheduler/solvers/constraints/pci_passthrough_constraint.py @@ -15,8 +15,9 @@ import copy -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import log as logging +from oslo_log import log as logging + +from nova.i18n import _ from nova_solverscheduler.scheduler.solvers import constraints LOG = logging.getLogger(__name__) diff --git a/nova_solverscheduler/scheduler/solvers/constraints/ram_constraint.py b/nova_solverscheduler/scheduler/solvers/constraints/ram_constraint.py index 352fda8..17bc003 100644 --- a/nova_solverscheduler/scheduler/solvers/constraints/ram_constraint.py +++ b/nova_solverscheduler/scheduler/solvers/constraints/ram_constraint.py @@ -14,10 +14,10 @@ # under the License. -from oslo.config import cfg +from oslo_config import cfg +from oslo_log import log as logging -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import log as logging +from nova.i18n import _ from nova_solverscheduler.scheduler.solvers import constraints CONF = cfg.CONF diff --git a/nova_solverscheduler/scheduler/solvers/constraints/vcpu_constraint.py b/nova_solverscheduler/scheduler/solvers/constraints/vcpu_constraint.py index 9b58e46..fa50500 100644 --- a/nova_solverscheduler/scheduler/solvers/constraints/vcpu_constraint.py +++ b/nova_solverscheduler/scheduler/solvers/constraints/vcpu_constraint.py @@ -13,10 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo.config import cfg +from oslo_config import cfg +from oslo_log import log as logging -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import log as logging +from nova.i18n import _ from nova_solverscheduler.scheduler.solvers import constraints CONF = cfg.CONF diff --git a/nova_solverscheduler/scheduler/solvers/costs/metrics_cost.py b/nova_solverscheduler/scheduler/solvers/costs/metrics_cost.py index b282d30..e7dbe17 100644 --- a/nova_solverscheduler/scheduler/solvers/costs/metrics_cost.py +++ b/nova_solverscheduler/scheduler/solvers/costs/metrics_cost.py @@ -26,7 +26,7 @@ in the configuration file as the followings: The final weight would be name1.value * 1.0 + name2.value * -1.0. """ -from oslo.config import cfg +from oslo_config import cfg from nova.scheduler import utils from nova_solverscheduler.scheduler.solvers import costs as solver_costs diff --git a/nova_solverscheduler/scheduler/solvers/costs/ram_cost.py b/nova_solverscheduler/scheduler/solvers/costs/ram_cost.py index d8ce345..d92889e 100644 --- a/nova_solverscheduler/scheduler/solvers/costs/ram_cost.py +++ b/nova_solverscheduler/scheduler/solvers/costs/ram_cost.py @@ -21,10 +21,10 @@ stacking, you can set the 'ram_cost_multiplier' option to a positive number and the cost has the opposite effect of the default. """ -from oslo.config import cfg +from oslo_config import cfg +from oslo_log import log as logging -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import log as logging +from nova.i18n import _ from nova_solverscheduler.scheduler.solvers import costs as solver_costs from nova_solverscheduler.scheduler.solvers.costs import utils diff --git a/nova_solverscheduler/scheduler/solvers/pulp_solver.py b/nova_solverscheduler/scheduler/solvers/pulp_solver.py index 641dade..d1b1537 100644 --- a/nova_solverscheduler/scheduler/solvers/pulp_solver.py +++ b/nova_solverscheduler/scheduler/solvers/pulp_solver.py @@ -17,10 +17,10 @@ from pulp import constants from pulp import pulp from pulp import solvers as pulp_solver_classes -from oslo.config import cfg +from oslo_config import cfg +from oslo_log import log as logging -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import log as logging +from nova.i18n import _ from nova_solverscheduler.scheduler import solvers as scheduler_solver pulp_solver_opts = [ diff --git a/nova_solverscheduler/solver_scheduler_exception.py b/nova_solverscheduler/solver_scheduler_exception.py index 016be7a..14e72a5 100644 --- a/nova_solverscheduler/solver_scheduler_exception.py +++ b/nova_solverscheduler/solver_scheduler_exception.py @@ -14,7 +14,7 @@ # under the License. from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ class SolverFailed(exception.NovaException): diff --git a/nova_solverscheduler/tests/scheduler/solver_scheduler_fakes.py b/nova_solverscheduler/tests/scheduler/solver_scheduler_fakes.py index 9da5a07..a7699b1 100644 --- a/nova_solverscheduler/tests/scheduler/solver_scheduler_fakes.py +++ b/nova_solverscheduler/tests/scheduler/solver_scheduler_fakes.py @@ -16,15 +16,23 @@ Fakes For Scheduler tests. """ -import mox - from nova.compute import vm_states from nova import db -from nova.openstack.common import jsonutils +from nova import objects from nova_solverscheduler.scheduler import solver_scheduler from nova_solverscheduler.scheduler import solver_scheduler_host_manager +NUMA_TOPOLOGY = objects.NUMATopology( + cells=[objects.NUMACell( + id=0, cpuset=set([1, 2]), memory=512, + cpu_usage=0, memory_usage=0, mempages=[], + siblings=[], pinned_cpus=set([])), + objects.NUMACell( + id=1, cpuset=set([3, 4]), memory=512, + cpu_usage=0, memory_usage=0, mempages=[], + siblings=[], pinned_cpus=set([]))]) + COMPUTE_NODES = [ dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1, disk_available_least=None, free_ram_mb=512, vcpus_used=1, @@ -54,119 +62,43 @@ COMPUTE_NODES = [ dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None), ] -COMPUTE_NODES_METRICS = [ - dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1, - disk_available_least=512, free_ram_mb=512, vcpus_used=1, - free_disk_gb=512, local_gb_used=0, updated_at=None, - service=dict(host='host1', disabled=False), - hypervisor_hostname='node1', host_ip='127.0.0.1', - hypervisor_version=0, - metrics=jsonutils.dumps([{'name': 'foo', - 'value': 512, - 'timestamp': None, - 'source': 'host1' - }, - {'name': 'bar', - 'value': 1.0, - 'timestamp': None, - 'source': 'host1' - }, - ])), - dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2, - disk_available_least=1024, free_ram_mb=1024, vcpus_used=2, - free_disk_gb=1024, local_gb_used=0, updated_at=None, - service=dict(host='host2', disabled=True), - hypervisor_hostname='node2', host_ip='127.0.0.1', - hypervisor_version=0, - metrics=jsonutils.dumps([{'name': 'foo', - 'value': 1024, - 'timestamp': None, - 'source': 'host2' - }, - {'name': 'bar', - 'value': 2.0, - 'timestamp': None, - 'source': 'host2' - }, - ])), - dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4, - disk_available_least=3072, free_ram_mb=3072, vcpus_used=1, - free_disk_gb=3072, local_gb_used=0, updated_at=None, - service=dict(host='host3', disabled=False), - hypervisor_hostname='node3', host_ip='127.0.0.1', - hypervisor_version=0, - metrics=jsonutils.dumps([{'name': 'foo', - 'value': 3072, - 'timestamp': None, - 'source': 'host3' - }, - {'name': 'bar', - 'value': 1.0, - 'timestamp': None, - 'source': 'host3' - }, - ])), - dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8, - disk_available_least=8192, free_ram_mb=8192, vcpus_used=0, - free_disk_gb=8192, local_gb_used=0, updated_at=None, - service=dict(host='host4', disabled=False), - hypervisor_hostname='node4', host_ip='127.0.0.1', - hypervisor_version=0, - metrics=jsonutils.dumps([{'name': 'foo', - 'value': 8192, - 'timestamp': None, - 'source': 'host4' - }, - {'name': 'bar', - 'value': 0, - 'timestamp': None, - 'source': 'host4' - }, - ])), - dict(id=5, local_gb=768, memory_mb=768, vcpus=8, - disk_available_least=768, free_ram_mb=768, vcpus_used=0, - free_disk_gb=768, local_gb_used=0, updated_at=None, - service=dict(host='host5', disabled=False), - hypervisor_hostname='node5', host_ip='127.0.0.1', - hypervisor_version=0, - metrics=jsonutils.dumps([{'name': 'foo', - 'value': 768, - 'timestamp': None, - 'source': 'host5' - }, - {'name': 'bar', - 'value': 0, - 'timestamp': None, - 'source': 'host5' - }, - {'name': 'zot', - 'value': 1, - 'timestamp': None, - 'source': 'host5' - }, - ])), - dict(id=6, local_gb=2048, memory_mb=2048, vcpus=8, - disk_available_least=2048, free_ram_mb=2048, vcpus_used=0, - free_disk_gb=2048, local_gb_used=0, updated_at=None, - service=dict(host='host6', disabled=False), - hypervisor_hostname='node6', host_ip='127.0.0.1', - hypervisor_version=0, - metrics=jsonutils.dumps([{'name': 'foo', - 'value': 2048, - 'timestamp': None, - 'source': 'host6' - }, - {'name': 'bar', - 'value': 0, - 'timestamp': None, - 'source': 'host6' - }, - {'name': 'zot', - 'value': 2, - 'timestamp': None, - 'source': 'host6' - }, - ])), +COMPUTE_NODES_OBJ = [ + objects.ComputeNode( + id=1, local_gb=1024, memory_mb=1024, vcpus=1, + disk_available_least=None, free_ram_mb=512, vcpus_used=1, + free_disk_gb=512, local_gb_used=0, updated_at=None, + host='host1', hypervisor_hostname='node1', host_ip='127.0.0.1', + hypervisor_version=0, numa_topology=None, + hypervisor_type='foo', supported_hv_specs=[], + pci_device_pools=None, cpu_info=None, stats=None, metrics=None), + objects.ComputeNode( + id=2, local_gb=2048, memory_mb=2048, vcpus=2, + disk_available_least=1024, free_ram_mb=1024, vcpus_used=2, + free_disk_gb=1024, local_gb_used=0, updated_at=None, + host='host2', hypervisor_hostname='node2', host_ip='127.0.0.1', + hypervisor_version=0, numa_topology=None, + hypervisor_type='foo', supported_hv_specs=[], + pci_device_pools=None, cpu_info=None, stats=None, metrics=None), + objects.ComputeNode( + id=3, local_gb=4096, memory_mb=4096, vcpus=4, + disk_available_least=3333, free_ram_mb=3072, vcpus_used=1, + free_disk_gb=3072, local_gb_used=0, updated_at=None, + host='host3', hypervisor_hostname='node3', host_ip='127.0.0.1', + hypervisor_version=0, numa_topology=NUMA_TOPOLOGY._to_json(), + hypervisor_type='foo', supported_hv_specs=[], + pci_device_pools=None, cpu_info=None, stats=None, metrics=None), + objects.ComputeNode( + id=4, local_gb=8192, memory_mb=8192, vcpus=8, + disk_available_least=8192, free_ram_mb=8192, vcpus_used=0, + free_disk_gb=8888, local_gb_used=0, updated_at=None, + host='host4', hypervisor_hostname='node4', host_ip='127.0.0.1', + hypervisor_version=0, numa_topology=None, + hypervisor_type='foo', supported_hv_specs=[], + pci_device_pools=None, cpu_info=None, stats=None, metrics=None), + # Broken entry + objects.ComputeNode( + id=5, local_gb=1024, memory_mb=1024, vcpus=1, + host='fake', hypervisor_hostname='fake-hyp'), ] INSTANCES = [ @@ -186,6 +118,13 @@ INSTANCES = [ host='host5', node='node5'), ] +SERVICES = [ + objects.Service(host='host1', disabled=False), + objects.Service(host='host2', disabled=True), + objects.Service(host='host3', disabled=False), + objects.Service(host='host4', disabled=False), +] + class FakeSolverScheduler(solver_scheduler.ConstraintSolverScheduler): def __init__(self, *args, **kwargs): @@ -257,9 +196,3 @@ class FakeInstance(object): class FakeComputeAPI(object): def create_db_entry_for_new_instance(self, *args, **kwargs): pass - - -def mox_host_manager_db_calls(mock, context): - mock.StubOutWithMock(db, 'compute_node_get_all') - - db.compute_node_get_all(mox.IgnoreArg()).AndReturn(COMPUTE_NODES) diff --git a/nova_solverscheduler/tests/scheduler/solvers/constraints/test_pci_passthrough_constraint.py b/nova_solverscheduler/tests/scheduler/solvers/constraints/test_pci_passthrough_constraint.py index 516a0cc..7410d63 100644 --- a/nova_solverscheduler/tests/scheduler/solvers/constraints/test_pci_passthrough_constraint.py +++ b/nova_solverscheduler/tests/scheduler/solvers/constraints/test_pci_passthrough_constraint.py @@ -15,7 +15,7 @@ import mock -from nova.pci import pci_stats +from nova.pci import stats as pci_stats from nova import test from nova_solverscheduler.scheduler.solvers.constraints \ import pci_passthrough_constraint @@ -45,8 +45,8 @@ class TestPciPassthroughConstraint(test.NoDBTestCase): {'pci_stats': pci_stats.PciDeviceStats()}) self.fake_hosts = [host1, host2, host3] - @mock.patch('nova.pci.pci_stats.PciDeviceStats.support_requests') - @mock.patch('nova.pci.pci_stats.PciDeviceStats.apply_requests') + @mock.patch('nova.pci.stats.PciDeviceStats.support_requests') + @mock.patch('nova.pci.stats.PciDeviceStats.apply_requests') def test_get_constraint_matrix(self, apl_reqs, spt_reqs): spt_reqs.side_effect = [True, False] + [False] + [True, True, False] expected_cons_mat = [ diff --git a/nova_solverscheduler/tests/scheduler/solvers/costs/test_metrics_cost.py b/nova_solverscheduler/tests/scheduler/solvers/costs/test_metrics_cost.py index 9421c56..8464cc2 100644 --- a/nova_solverscheduler/tests/scheduler/solvers/costs/test_metrics_cost.py +++ b/nova_solverscheduler/tests/scheduler/solvers/costs/test_metrics_cost.py @@ -16,29 +16,49 @@ """Test case for solver scheduler RAM cost.""" from nova import context -from nova.openstack.common.fixture import mockpatch +from nova.scheduler import host_manager from nova import test -from nova_solverscheduler.tests.scheduler import solver_scheduler_fakes \ - as fakes from nova_solverscheduler.scheduler.solvers import costs from nova_solverscheduler.scheduler.solvers.costs import ram_cost +from nova_solverscheduler.tests.scheduler import solver_scheduler_fakes \ + as fakes class TestMetricsCost(test.NoDBTestCase): def setUp(self): super(TestMetricsCost, self).setUp() self.context = context.RequestContext('fake_usr', 'fake_proj') - self.useFixture(mockpatch.Patch('nova.db.compute_node_get_all', - return_value=fakes.COMPUTE_NODES_METRICS)) - self.host_manager = fakes.FakeSolverSchedulerHostManager() self.cost_handler = costs.CostHandler() self.cost_classes = self.cost_handler.get_matching_classes( ['nova_solverscheduler.scheduler.solvers.costs.metrics_cost.' 'MetricsCost']) def _get_all_hosts(self): - ctxt = context.get_admin_context() - return self.host_manager.get_all_host_states(ctxt) + def fake_metric(value): + return host_manager.MetricItem(value=value, timestamp='fake-time', + source='fake-source') + + host1 = fakes.FakeSolverSchedulerHostState('host1', 'node1', + {'metrics': {'foo': fake_metric(512), + 'bar': fake_metric(1)}}) + host2 = fakes.FakeSolverSchedulerHostState('host2', 'node2', + {'metrics': {'foo': fake_metric(1024), + 'bar': fake_metric(2)}}) + host3 = fakes.FakeSolverSchedulerHostState('host3', 'node3', + {'metrics': {'foo': fake_metric(3072), + 'bar': fake_metric(1)}}) + host4 = fakes.FakeSolverSchedulerHostState('host4', 'node4', + {'metrics': {'foo': fake_metric(8192), + 'bar': fake_metric(0)}}) + host5 = fakes.FakeSolverSchedulerHostState('host5', 'node5', + {'metrics': {'foo': fake_metric(768), + 'bar': fake_metric(0), + 'zot': fake_metric(1)}}) + host6 = fakes.FakeSolverSchedulerHostState('host6', 'node6', + {'metrics': {'foo': fake_metric(2048), + 'bar': fake_metric(0), + 'zot': fake_metric(2)}}) + return [host1, host2, host3, host4, host5, host6] def _get_fake_cost_inputs(self): fake_hosts = self._get_all_hosts() diff --git a/nova_solverscheduler/tests/scheduler/solvers/costs/test_ram_cost.py b/nova_solverscheduler/tests/scheduler/solvers/costs/test_ram_cost.py index a1d0e2a..367d7df 100644 --- a/nova_solverscheduler/tests/scheduler/solvers/costs/test_ram_cost.py +++ b/nova_solverscheduler/tests/scheduler/solvers/costs/test_ram_cost.py @@ -16,7 +16,6 @@ """Test case for solver scheduler RAM cost.""" from nova import context -from nova.openstack.common.fixture import mockpatch from nova import test from nova_solverscheduler.scheduler.solvers import costs from nova_solverscheduler.scheduler.solvers.costs import ram_cost @@ -28,16 +27,20 @@ class TestRamCost(test.NoDBTestCase): def setUp(self): super(TestRamCost, self).setUp() self.context = context.RequestContext('fake_usr', 'fake_proj') - self.useFixture(mockpatch.Patch('nova.db.compute_node_get_all', - return_value=fakes.COMPUTE_NODES[0:5])) - self.host_manager = fakes.FakeSolverSchedulerHostManager() self.cost_handler = costs.CostHandler() self.cost_classes = self.cost_handler.get_matching_classes( ['nova_solverscheduler.scheduler.solvers.costs.ram_cost.RamCost']) def _get_all_hosts(self): - ctxt = context.get_admin_context() - return self.host_manager.get_all_host_states(ctxt) + host1 = fakes.FakeSolverSchedulerHostState('host1', 'node1', + {'free_ram_mb': 512}) + host2 = fakes.FakeSolverSchedulerHostState('host2', 'node2', + {'free_ram_mb': 1024}) + host3 = fakes.FakeSolverSchedulerHostState('host3', 'node3', + {'free_ram_mb': 3072}) + host4 = fakes.FakeSolverSchedulerHostState('host4', 'node4', + {'free_ram_mb': 8192}) + return [host1, host2, host3, host4] def test_ram_cost_multiplier_1(self): self.flags(ram_cost_multiplier=0.5, group='solver_scheduler') diff --git a/nova_solverscheduler/tests/scheduler/test_solver_scheduler.py b/nova_solverscheduler/tests/scheduler/test_solver_scheduler.py index cbcb2ee..da57b3c 100644 --- a/nova_solverscheduler/tests/scheduler/test_solver_scheduler.py +++ b/nova_solverscheduler/tests/scheduler/test_solver_scheduler.py @@ -16,20 +16,14 @@ Tests For Solver Scheduler. """ -import contextlib import mock -from nova.compute import utils as compute_utils -from nova.compute import vm_states from nova import context -from nova import db from nova import exception -from nova.scheduler import driver -from nova.scheduler import host_manager -from nova.scheduler import weights -from nova.tests.scheduler import test_scheduler +from nova.tests.unit.scheduler import test_scheduler from nova_solverscheduler.scheduler import solver_scheduler -from nova_solverscheduler import solver_scheduler_exception +from nova_solverscheduler.scheduler import solver_scheduler_host_manager \ + as host_manager from nova_solverscheduler.tests.scheduler import solver_scheduler_fakes \ as fakes @@ -56,122 +50,26 @@ class SolverSchedulerTestCase(test_scheduler.SchedulerTestCase): def setUp(self): super(SolverSchedulerTestCase, self).setUp() - def test_run_instance_no_hosts(self): - - def _fake_empty_call_zone_method(*args, **kwargs): - return [] - - sched = fakes.FakeSolverScheduler() - - uuid = 'fake-uuid1' - fake_context = context.RequestContext('user', 'project') - instance_properties = {'project_id': 1, 'os_type': 'Linux'} - request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1, - 'ephemeral_gb': 0}, - 'instance_properties': instance_properties, - 'instance_uuids': [uuid]} - with contextlib.nested( - mock.patch.object(compute_utils, 'add_instance_fault_from_exc'), - mock.patch.object(db, 'instance_update_and_get_original'), - mock.patch.object(db, 'compute_node_get_all')) as ( - add_instance, get_original, get_all): - get_original.return_value = ({}, {}) - get_all.return_value = [] - sched.schedule_run_instance( - fake_context, request_spec, None, None, None, None, {}, False) - add_instance.assert_called_once_with(fake_context, mock.ANY, {}, - mock.ANY, mock.ANY) - get_original.assert_called_once_with(fake_context, uuid, - {'vm_state': vm_states.ERROR, - 'task_state': None}) - get_all.assert_called_once_with(mock.ANY) - - def test_run_instance_non_admin(self): - self.was_admin = False - - def fake_get(context, *args, **kwargs): - # make sure this is called with admin context, even though - # we're using user context below - self.was_admin = context.is_admin - return {} - - sched = fakes.FakeSolverScheduler() - self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get) - - fake_context = context.RequestContext('user', 'project') - - uuid = 'fake-uuid1' - instance_properties = {'project_id': 1, 'os_type': 'Linux'} - request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1}, - 'instance_properties': instance_properties, - 'instance_uuids': [uuid]} - with contextlib.nested( - mock.patch.object(compute_utils, 'add_instance_fault_from_exc'), - mock.patch.object(db, 'instance_update_and_get_original')) as ( - add_instance, get_original): - get_original.return_value = ({}, {}) - sched.schedule_run_instance( - fake_context, request_spec, None, None, None, None, {}, False) - add_instance.assert_called_once_with(fake_context, mock.ANY, {}, - mock.ANY, mock.ANY) - get_original.assert_called_once_with(fake_context, uuid, - {'vm_state': vm_states.ERROR, - 'task_state': None}) - self.assertTrue(self.was_admin) - - def test_scheduler_includes_launch_index(self): - fake_context = context.RequestContext('user', 'project') - instance_opts = {'fake_opt1': 'meow'} - request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'], - 'instance_properties': instance_opts} - - instance1 = {'uuid': 'fake-uuid1'} - instance2 = {'uuid': 'fake-uuid2'} - - actual_launch_indices = [] - # Retrieving the launch_index value from the request_spec (the 3rd - # argument of the _provision_resource method) using the side_effect - - def provision_side_effect(*args, **kwargs): - if 'instance_properties' in args[2]: - if 'launch_index' in args[2]['instance_properties']: - actual_launch_indices.append( - args[2]['instance_properties']['launch_index']) - if len(actual_launch_indices) == 1: - # Setting the return_value for the first call - return instance1 - else: - # Setting the return_value for the second call - return instance2 - - with contextlib.nested( - mock.patch.object(self.driver, '_schedule'), - mock.patch.object(self.driver, '_provision_resource')) as ( - schedule_mock, provision_mock): - schedule_mock.return_value = ['host1', 'host2'] - provision_mock.side_effect = provision_side_effect - self.driver.schedule_run_instance(fake_context, request_spec, - None, None, None, None, {}, False) - schedule_mock.assert_called_once_with(fake_context, request_spec, - {}, ['fake-uuid1', 'fake-uuid2']) - call_args_list_expected = [(fake_context, 'host1', request_spec, - {}, None, None, None, None, {'instance_uuid': 'fake-uuid1', - 'legacy_bdm_in_spec': False}), - (fake_context, 'host2', request_spec, {}, None, None, None, - None, {'instance_uuid': 'fake-uuid2', - 'legacy_bdm_in_spec': False})] - self.assertEqual(2, provision_mock.call_count) - for i in range(provision_mock.call_count): - self.assertEqual(list(call_args_list_expected[i]), - list(provision_mock.call_args_list[i][0]) + - [provision_mock.call_args_list[i][1]]) - expected_launch_indices = [0, 1] - self.assertEqual(expected_launch_indices, actual_launch_indices) - - def test_schedule_happy_day(self): + @mock.patch.object(host_manager.SolverSchedulerHostManager, + '_init_instance_info') + @mock.patch.object(host_manager.SolverSchedulerHostManager, + '_init_aggregates') + @mock.patch('nova.objects.ServiceList.get_by_binary', + return_value=fakes.SERVICES) + @mock.patch('nova.objects.InstanceList.get_by_host') + @mock.patch('nova.objects.ComputeNodeList.get_all', + return_value=fakes.COMPUTE_NODES_OBJ) + @mock.patch('nova.db.instance_extra_get_by_instance_uuid', + return_value={'numa_topology': None, + 'pci_requests': None}) + def test_schedule_happy_day(self, mock_get_extra, mock_get_all, + mock_by_host, mock_get_by_binary, + mock_init_agg, mock_init_inst): """Make sure there's nothing glaringly wrong with _schedule() by doing a happy day pass through. """ + self.flags(scheduler_host_manager='nova_solverscheduler.scheduler.' + 'solver_scheduler_host_manager.SolverSchedulerHostManager') self.flags(scheduler_solver_constraints=[], group='solver_scheduler') sched = fakes.FakeSolverScheduler() @@ -193,115 +91,30 @@ class SolverSchedulerTestCase(test_scheduler.SchedulerTestCase): 'vcpus': 1, 'os_type': 'Linux'}} - with mock.patch.object(db, 'compute_node_get_all') as get_all: - get_all.return_value = fakes.COMPUTE_NODES - selected_hosts = sched._schedule(fake_context, request_spec, {}) - get_all.assert_called_once_with(mock.ANY) - self.assertEqual(10, len(selected_hosts)) - for host in selected_hosts: - self.assertTrue(host is not None) + selected_hosts = sched._schedule(fake_context, request_spec, {}) + self.assertEqual(10, len(selected_hosts)) + for host in selected_hosts: + self.assertTrue(host is not None) - def test_max_attempts(self): - self.flags(scheduler_max_attempts=4) - - sched = fakes.FakeSolverScheduler() - self.assertEqual(4, sched._max_attempts()) - - def test_invalid_max_attempts(self): - self.flags(scheduler_max_attempts=0) - - sched = fakes.FakeSolverScheduler() - self.assertRaises(exception.NovaException, sched._max_attempts) - - def test_retry_disabled(self): - # Retry info should not get populated when re-scheduling is off. - self.flags(scheduler_max_attempts=1) - sched = fakes.FakeSolverScheduler() - - instance_properties = {'project_id': '12345', 'os_type': 'Linux'} - request_spec = dict(instance_properties=instance_properties) - filter_properties = {} - - with mock.patch.object(db, 'compute_node_get_all') as get_all: - get_all.return_value = [] - try: - sched._schedule(self.context, request_spec, - filter_properties=filter_properties) - except solver_scheduler_exception.SolverFailed: - pass - get_all.assert_called_once_with(mock.ANY) - # should not have retry info in the populated filter properties: - self.assertFalse("retry" in filter_properties) - - def test_retry_attempt_one(self): - # Test retry logic on initial scheduling attempt. - self.flags(scheduler_max_attempts=2) - sched = fakes.FakeSolverScheduler() - - instance_properties = {'project_id': '12345', 'os_type': 'Linux'} - request_spec = dict(instance_properties=instance_properties) - filter_properties = {} - - with mock.patch.object(db, 'compute_node_get_all') as get_all: - get_all.return_value = [] - try: - sched._schedule(self.context, request_spec, - filter_properties=filter_properties) - except solver_scheduler_exception.SolverFailed: - pass - get_all.assert_called_once_with(mock.ANY) - num_attempts = filter_properties['retry']['num_attempts'] - self.assertEqual(1, num_attempts) - - def test_retry_attempt_two(self): - # Test retry logic when re-scheduling. - self.flags(scheduler_max_attempts=2) - sched = fakes.FakeSolverScheduler() - - instance_properties = {'project_id': '12345', 'os_type': 'Linux'} - request_spec = dict(instance_properties=instance_properties) - - retry = dict(num_attempts=1) - filter_properties = dict(retry=retry) - - with mock.patch.object(db, 'compute_node_get_all') as get_all: - get_all.return_value = [] - try: - sched._schedule(self.context, request_spec, - filter_properties=filter_properties) - except solver_scheduler_exception.SolverFailed: - pass - get_all.assert_called_once_with(mock.ANY) - num_attempts = filter_properties['retry']['num_attempts'] - self.assertEqual(2, num_attempts) - - def test_retry_exceeded_max_attempts(self): - # Test for necessary explosion when max retries is exceeded and that - # the information needed in request_spec is still present for error - # handling - self.flags(scheduler_max_attempts=2) - sched = fakes.FakeSolverScheduler() - - instance_properties = {'project_id': '12345', 'os_type': 'Linux'} - instance_uuids = ['fake-id'] - request_spec = dict(instance_properties=instance_properties, - instance_uuids=instance_uuids) - - retry = dict(num_attempts=2) - filter_properties = dict(retry=retry) - - self.assertRaises(exception.NoValidHost, sched.schedule_run_instance, - self.context, request_spec, admin_password=None, - injected_files=None, requested_networks=None, - is_first_time=False, - filter_properties=filter_properties, - legacy_bdm_in_spec=False) - uuids = request_spec.get('instance_uuids') - self.assertEqual(instance_uuids, uuids) - - def test_schedule_chooses_best_host(self): + @mock.patch.object(host_manager.SolverSchedulerHostManager, + '_init_instance_info') + @mock.patch.object(host_manager.SolverSchedulerHostManager, + '_init_aggregates') + @mock.patch('nova.objects.ServiceList.get_by_binary', + return_value=fakes.SERVICES) + @mock.patch('nova.objects.InstanceList.get_by_host') + @mock.patch('nova.objects.ComputeNodeList.get_all', + return_value=fakes.COMPUTE_NODES_OBJ) + @mock.patch('nova.db.instance_extra_get_by_instance_uuid', + return_value={'numa_topology': None, + 'pci_requests': None}) + def test_schedule_chooses_best_host(self, mock_get_extra, mock_get_all, + mock_by_host, mock_get_by_binary, + mock_init_agg, mock_init_inst): """The host with the highest free_ram_mb will be chosen! """ + self.flags(scheduler_host_manager='nova_solverscheduler.scheduler.' + 'solver_scheduler_host_manager.SolverSchedulerHostManager') self.flags(scheduler_solver_constraints=[], group='solver_scheduler') self.flags(ram_weight_multiplier=1) @@ -334,23 +147,36 @@ class SolverSchedulerTestCase(test_scheduler.SchedulerTestCase): instance_type={'memory_mb': 512}) filter_properties = {} - with mock.patch.object(db, 'compute_node_get_all') as get_all: - get_all.return_value = fakes.COMPUTE_NODES - hosts = sched._schedule(self.context, request_spec, - filter_properties=filter_properties) - get_all.assert_called_once_with(mock.ANY) - # one host should be chosen - self.assertEqual(1, len(hosts)) - selected_host = hosts.pop(0) - self.assertEqual(best_host, (selected_host.obj.host, - selected_host.obj.nodename)) + hosts = sched._schedule(self.context, request_spec, + filter_properties=filter_properties) + # one host should be chosen + self.assertEqual(1, len(hosts)) + selected_host = hosts.pop(0) + self.assertEqual(best_host, (selected_host.obj.host, + selected_host.obj.nodename)) - def test_select_destinations(self): + @mock.patch.object(host_manager.SolverSchedulerHostManager, + '_init_instance_info') + @mock.patch.object(host_manager.SolverSchedulerHostManager, + '_init_aggregates') + @mock.patch('nova.objects.ServiceList.get_by_binary', + return_value=fakes.SERVICES) + @mock.patch('nova.objects.InstanceList.get_by_host') + @mock.patch('nova.objects.ComputeNodeList.get_all', + return_value=fakes.COMPUTE_NODES_OBJ) + @mock.patch('nova.db.instance_extra_get_by_instance_uuid', + return_value={'numa_topology': None, + 'pci_requests': None}) + def test_select_destinations(self, mock_get_extra, mock_get_all, + mock_by_host, mock_get_by_binary, + mock_init_agg, mock_init_inst): """select_destinations is basically a wrapper around _schedule(). Similar to the _schedule tests, this just does a happy path test to ensure there is nothing glaringly wrong. """ + self.flags(scheduler_host_manager='nova_solverscheduler.scheduler.' + 'solver_scheduler_host_manager.SolverSchedulerHostManager') self.flags(scheduler_solver_constraints=[], group='solver_scheduler') sched = fakes.FakeSolverScheduler() @@ -372,14 +198,10 @@ class SolverSchedulerTestCase(test_scheduler.SchedulerTestCase): 'os_type': 'Linux'}, 'num_instances': 1} - with mock.patch.object(db, 'compute_node_get_all') as get_all: - get_all.return_value = fakes.COMPUTE_NODES - dests = sched.select_destinations(fake_context, request_spec, - {}) - get_all.assert_called_once_with(mock.ANY) - (host, node) = (dests[0]['host'], dests[0]['nodename']) - self.assertTrue(host is not None) - self.assertTrue(node is not None) + dests = sched.select_destinations(fake_context, request_spec, {}) + (host, node) = (dests[0]['host'], dests[0]['nodename']) + self.assertTrue(host is not None) + self.assertTrue(node is not None) def test_select_destinations_no_valid_host(self): @@ -390,28 +212,3 @@ class SolverSchedulerTestCase(test_scheduler.SchedulerTestCase): self.assertRaises(exception.NoValidHost, self.driver.select_destinations, self.context, {'num_instances': 1}, {}) - - def test_handles_deleted_instance(self): - """Test instance deletion while being scheduled.""" - - def _raise_instance_not_found(*args, **kwargs): - raise exception.InstanceNotFound(instance_id='123') - - self.stubs.Set(driver, 'instance_update_db', - _raise_instance_not_found) - - sched = fakes.FakeSolverScheduler() - - fake_context = context.RequestContext('user', 'project') - host = host_manager.HostState('host2', 'node2') - selected_host = weights.WeighedHost(host, 1) - filter_properties = {} - - uuid = 'fake-uuid1' - instance_properties = {'project_id': 1, 'os_type': 'Linux'} - request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1}, - 'instance_properties': instance_properties, - 'instance_uuids': [uuid]} - sched._provision_resource(fake_context, selected_host, - request_spec, filter_properties, - None, None, None, None) diff --git a/nova_solverscheduler/tests/scheduler/test_solver_scheduler_host_manager.py b/nova_solverscheduler/tests/scheduler/test_solver_scheduler_host_manager.py index b08e4ff..2d0b025 100644 --- a/nova_solverscheduler/tests/scheduler/test_solver_scheduler_host_manager.py +++ b/nova_solverscheduler/tests/scheduler/test_solver_scheduler_host_manager.py @@ -15,7 +15,10 @@ """ Tests For SolverSchedulerHostManager """ -from nova.openstack.common import timeutils +import mock + +from oslo_utils import timeutils + from nova import test from nova_solverscheduler.scheduler import solver_scheduler_host_manager \ as host_manager @@ -24,7 +27,11 @@ from nova_solverscheduler.scheduler import solver_scheduler_host_manager \ class SolverSchedulerHostManagerTestCase(test.NoDBTestCase): """Test case for HostManager class.""" - def setUp(self): + @mock.patch.object(host_manager.SolverSchedulerHostManager, + '_init_instance_info') + @mock.patch.object(host_manager.SolverSchedulerHostManager, + '_init_aggregates') + def setUp(self, mock_init_agg, mock_init_inst): super(SolverSchedulerHostManagerTestCase, self).setUp() self.host_manager = host_manager.SolverSchedulerHostManager() self.fake_hosts = [host_manager.SolverSchedulerHostState( diff --git a/test-requirements.txt b/test-requirements.txt index 8592bde..5a8e154 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -6,10 +6,14 @@ hacking<0.11,>=0.10.0 coverage>=3.6 discover +mock>=1.0 +mox3>=0.7.0 python-subunit>=0.0.18 sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 -oslosphinx>=2.2.0 # Apache-2.0 -oslotest>=1.2.0 # Apache-2.0 +oslosphinx>=2.5.0,<2.6.0 # Apache-2.0 +oslotest>=1.5.1,<1.6.0 # Apache-2.0 testrepository>=0.0.18 testscenarios>=0.4 testtools>=0.9.36,!=1.2.0 + +http://tarballs.openstack.org/nova/nova-stable-kilo.tar.gz#egg=nova diff --git a/tox.ini b/tox.ini index be4c0e9..412f470 100644 --- a/tox.ini +++ b/tox.ini @@ -10,7 +10,6 @@ setenv = VIRTUAL_ENV={envdir} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt - -egit+https://github.com/openstack/nova#egg=nova commands = python setup.py test --slowest --testr-args='{posargs}' [testenv:pep8]