Merge "Use RequestSpec object in HostManager"

This commit is contained in:
Jenkins 2015-11-04 15:42:38 +00:00 committed by Gerrit Code Review
commit 961e330fa3
5 changed files with 96 additions and 98 deletions

View File

@ -168,12 +168,7 @@ class FilterScheduler(driver.Scheduler):
# Now consume the resources so the filter/weights
# will change for the next instance.
# TODO(sbauza): Temporary use of the old legacy dict until we
# modify HostManager to use the RequestSpec object
spec_dict = spec_obj.to_legacy_request_spec_dict()
instance_properties = spec_dict['instance_properties']
chosen_host.obj.consume_from_instance(instance_properties)
chosen_host.obj.consume_from_request(spec_obj)
if filter_properties.get('group_updated') is True:
filter_properties['group_hosts'].add(chosen_host.obj.host)
return selected_hosts

View File

@ -105,23 +105,22 @@ class ReadOnlyDict(IterableUserDict):
raise TypeError()
@utils.expects_func_args('self', 'instance')
@utils.expects_func_args('self', 'spec_obj')
def set_update_time_on_success(function):
"""Set updated time of HostState when consuming succeed."""
@functools.wraps(function)
def decorated_function(self, instance):
def decorated_function(self, spec_obj):
return_value = None
try:
return_value = function(self, instance)
return_value = function(self, spec_obj)
except Exception as e:
# Ignores exception raised from consume_from_instance() so that
# Ignores exception raised from consume_from_request() so that
# booting instance would fail in the resource claim of compute
# node, other suitable node may be chosen during scheduling retry.
LOG.warning(_LW("Selected host: %(host)s failed to consume from "
"instance. Error: %(error)s"),
{'host': self.host, 'error': e},
instance=instance)
{'host': self.host, 'error': e})
else:
now = timeutils.utcnow()
# NOTE(sbauza): Objects are UTC tz-aware by default
@ -253,11 +252,12 @@ class HostState(object):
self.ram_allocation_ratio = compute.ram_allocation_ratio
@set_update_time_on_success
def consume_from_instance(self, instance):
"""Incrementally update host state from an instance."""
disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
ram_mb = instance['memory_mb']
vcpus = instance['vcpus']
def consume_from_request(self, spec_obj):
"""Incrementally update host state from an RequestSpec object."""
disk_mb = (spec_obj.root_gb +
spec_obj.ephemeral_gb) * 1024
ram_mb = spec_obj.memory_mb
vcpus = spec_obj.vcpus
self.free_ram_mb -= ram_mb
self.free_disk_mb -= disk_mb
self.vcpus_used += vcpus
@ -265,7 +265,7 @@ class HostState(object):
# Track number of instances on host
self.num_instances += 1
pci_requests = instance.get('pci_requests')
pci_requests = spec_obj.pci_requests
if pci_requests and self.pci_stats:
pci_requests = pci_requests.requests
else:
@ -274,24 +274,33 @@ class HostState(object):
# Calculate the numa usage
host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
self)
instance_numa_topology = hardware.instance_topology_from_instance(
instance)
instance_numa_topology = spec_obj.numa_topology
instance['numa_topology'] = hardware.numa_fit_instance_to_host(
spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
host_numa_topology, instance_numa_topology,
limits=self.limits.get('numa_topology'),
pci_requests=pci_requests, pci_stats=self.pci_stats)
if pci_requests:
instance_cells = None
if instance['numa_topology']:
instance_cells = instance['numa_topology'].cells
if spec_obj.numa_topology:
instance_cells = spec_obj.numa_topology.cells
self.pci_stats.apply_requests(pci_requests, instance_cells)
# NOTE(sbauza): Yeah, that's crap. We should get rid of all of those
# NUMA helpers because now we're 100% sure that spec_obj.numa_topology
# is an InstanceNUMATopology object. Unfortunately, since
# HostState.host_numa_topology is still limbo between an NUMATopology
# object (when updated by consume_from_request), a ComputeNode object
# (when updated by update_from_compute_node), we need to keep the call
# to get_host_numa_usage_from_instance until it's fixed (and use a
# temporary orphaned Instance object as a proxy)
instance = objects.Instance(numa_topology=spec_obj.numa_topology)
self.numa_topology = hardware.get_host_numa_usage_from_instance(
self, instance)
# NOTE(sbauza): By considering all cases when the scheduler is called
# and when consume_from_instance() is run, we can safely say that there
# and when consume_from_request() is run, we can safely say that there
# is always an IO operation because we want to move the instance
self.num_io_ops += 1

View File

@ -89,7 +89,7 @@ class IronicNodeState(host_manager.HostState):
self.updated = compute.updated_at
@host_manager.set_update_time_on_success
def consume_from_instance(self, instance):
def consume_from_request(self, spec_obj):
"""Consume nodes entire resources regardless of instance request."""
self.free_ram_mb = 0
self.free_disk_mb = 0

View File

@ -33,7 +33,6 @@ from nova.objects import base as obj_base
from nova.pci import stats as pci_stats
from nova.scheduler import filters
from nova.scheduler import host_manager
from nova.scheduler import utils as sched_utils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
@ -756,7 +755,7 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
class HostStateTestCase(test.NoDBTestCase):
"""Test case for HostState class."""
# update_from_compute_node() and consume_from_instance() are tested
# update_from_compute_node() and consume_from_request() are tested
# in HostManagerTestCase.test_get_all_host_states()
def test_stat_consumption_from_compute_node(self):
@ -870,52 +869,57 @@ class HostStateTestCase(test.NoDBTestCase):
self.assertEqual(hyper_ver_int, host.hypervisor_version)
@mock.patch('nova.virt.hardware.get_host_numa_usage_from_instance')
@mock.patch('nova.objects.Instance')
@mock.patch('nova.virt.hardware.numa_fit_instance_to_host')
@mock.patch('nova.virt.hardware.instance_topology_from_instance')
@mock.patch('nova.virt.hardware.host_topology_and_format_from_host')
def test_stat_consumption_from_instance(self, host_topo_mock,
instance_topo_mock,
numa_fit_mock,
instance_init_mock,
numa_usage_mock):
fake_numa_topology = mock.Mock()
host_topo_mock.return_value = ('fake-host-topology', None)
numa_usage_mock.return_value = 'fake-consumed-once'
numa_fit_mock.return_value = 'fake-fitted-once'
instance_topo_mock.return_value = fake_numa_topology
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
project_id='12345', vm_state=vm_states.BUILDING,
task_state=task_states.SCHEDULING, os_type='Linux',
uuid='fake-uuid',
numa_topology=fake_numa_topology,
pci_requests={'requests': []})
fake_numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell()])
fake_host_numa_topology = mock.Mock()
fake_instance = objects.Instance(numa_topology=fake_numa_topology)
host_topo_mock.return_value = (fake_host_numa_topology, True)
numa_usage_mock.return_value = fake_host_numa_topology
numa_fit_mock.return_value = fake_numa_topology
instance_init_mock.return_value = fake_instance
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=0,
vcpus=0),
uuid='fake-uuid',
numa_topology=fake_numa_topology,
pci_requests=objects.InstancePCIRequests(requests=[]))
host = host_manager.HostState("fakehost", "fakenode")
self.assertIsNone(host.updated)
host.consume_from_instance(instance)
numa_fit_mock.assert_called_once_with('fake-host-topology',
host.consume_from_request(spec_obj)
numa_fit_mock.assert_called_once_with(fake_host_numa_topology,
fake_numa_topology,
limits=None, pci_requests=None,
pci_stats=None)
numa_usage_mock.assert_called_once_with(host, instance)
self.assertEqual('fake-consumed-once', host.numa_topology)
self.assertEqual('fake-fitted-once', instance['numa_topology'])
numa_usage_mock.assert_called_once_with(host, fake_instance)
self.assertEqual(fake_host_numa_topology, host.numa_topology)
self.assertIsNotNone(host.updated)
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
project_id='12345', vm_state=vm_states.ACTIVE,
task_state=task_states.RESIZE_PREP, os_type='Linux',
uuid='fake-uuid',
numa_topology=fake_numa_topology)
numa_usage_mock.return_value = 'fake-consumed-twice'
numa_fit_mock.return_value = 'fake-fitted-twice'
host.consume_from_instance(instance)
self.assertEqual('fake-fitted-twice', instance['numa_topology'])
second_numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell()])
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=0,
vcpus=0),
uuid='fake-uuid',
numa_topology=second_numa_topology,
pci_requests=objects.InstancePCIRequests(requests=[]))
second_host_numa_topology = mock.Mock()
numa_usage_mock.return_value = second_host_numa_topology
numa_fit_mock.return_value = second_numa_topology
host.consume_from_request(spec_obj)
self.assertEqual(2, host.num_instances)
self.assertEqual(2, host.num_io_ops)
self.assertEqual(2, numa_usage_mock.call_count)
self.assertEqual(((host, instance),), numa_usage_mock.call_args)
self.assertEqual('fake-consumed-twice', host.numa_topology)
self.assertEqual(((host, fake_instance),), numa_usage_mock.call_args)
self.assertEqual(second_host_numa_topology, host.numa_topology)
self.assertIsNotNone(host.updated)
def test_stat_consumption_from_instance_pci(self):
@ -931,24 +935,14 @@ class HostStateTestCase(test.NoDBTestCase):
requests=[objects.InstancePCIRequest(**r)
for r in fake_requests],
instance_uuid='fake-uuid')
instance = objects.Instance(root_gb=0, ephemeral_gb=0, memory_mb=512,
vcpus=1,
project_id='12345', vm_state=vm_states.BUILDING,
task_state=task_states.SCHEDULING, os_type='Linux',
uuid='fake-uuid',
numa_topology=inst_topology,
pci_requests=fake_requests_obj,
id = 1243)
req_spec = sched_utils.build_request_spec(None,
None,
[instance],
objects.Flavor(
root_gb=0,
ephemeral_gb=0,
memory_mb=1024,
vcpus=1))
# NOTE(sbauza): FilterSchedule._schedule() rehydrates pci_requests
req_spec['instance_properties']['pci_requests'] = fake_requests_obj
req_spec = objects.RequestSpec(
project_id='12345',
numa_topology=inst_topology,
pci_requests=fake_requests_obj,
flavor=objects.Flavor(root_gb=0,
ephemeral_gb=0,
memory_mb=512,
vcpus=1))
host = host_manager.HostState("fakehost", "fakenode")
self.assertIsNone(host.updated)
host.pci_stats = pci_stats.PciDeviceStats(
@ -957,8 +951,8 @@ class HostStateTestCase(test.NoDBTestCase):
numa_node=1,
count=1)])
host.numa_topology = fakes.NUMA_TOPOLOGY
host.consume_from_instance(req_spec['instance_properties'])
self.assertIsInstance(req_spec['instance_properties']['numa_topology'],
host.consume_from_request(req_spec)
self.assertIsInstance(req_spec.numa_topology,
objects.InstanceNUMATopology)
self.assertEqual(512, host.numa_topology.cells[1].memory_usage)
@ -973,21 +967,14 @@ class HostStateTestCase(test.NoDBTestCase):
requests=[objects.InstancePCIRequest(**r)
for r in fake_requests],
instance_uuid='fake-uuid')
instance = objects.Instance(root_gb=0, ephemeral_gb=0, memory_mb=512,
vcpus=1,
project_id='12345', vm_state=vm_states.BUILDING,
task_state=task_states.SCHEDULING, os_type='Linux',
uuid='fake-uuid',
pci_requests=fake_requests_obj,
id=1243)
req_spec = sched_utils.build_request_spec(None,
None,
[instance],
objects.Flavor(
root_gb=0,
ephemeral_gb=0,
memory_mb=1024,
vcpus=1))
req_spec = objects.RequestSpec(
project_id='12345',
numa_topology=None,
pci_requests=fake_requests_obj,
flavor=objects.Flavor(root_gb=0,
ephemeral_gb=0,
memory_mb=1024,
vcpus=1))
host = host_manager.HostState("fakehost", "fakenode")
self.assertIsNone(host.updated)
fake_updated = mock.sentinel.fake_updated
@ -995,7 +982,7 @@ class HostStateTestCase(test.NoDBTestCase):
host.pci_stats = pci_stats.PciDeviceStats()
with mock.patch.object(host.pci_stats, 'apply_requests',
side_effect=exception.PciDeviceRequestFailed):
host.consume_from_instance(req_spec['instance_properties'])
host.consume_from_request(req_spec)
self.assertEqual(fake_updated, host.updated)
def test_resources_consumption_from_compute_node(self):

View File

@ -204,8 +204,11 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
host.update_from_compute_node(self.compute_node)
self.assertIsNone(host.updated)
instance = dict(root_gb=10, ephemeral_gb=0, memory_mb=1024, vcpus=1)
host.consume_from_instance(instance)
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(root_gb=10, ephemeral_gb=0, memory_mb=1024,
vcpus=1),
uuid='fake-uuid')
host.consume_from_request(spec_obj)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
@ -217,8 +220,10 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
host.update_from_compute_node(self.compute_node)
self.assertIsNone(host.updated)
instance = dict(root_gb=20, ephemeral_gb=0, memory_mb=2048, vcpus=2)
host.consume_from_instance(instance)
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(root_gb=20, ephemeral_gb=0, memory_mb=2048,
vcpus=2))
host.consume_from_request(spec_obj)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
@ -230,8 +235,10 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
host.update_from_compute_node(self.compute_node)
self.assertIsNone(host.updated)
instance = dict(root_gb=5, ephemeral_gb=0, memory_mb=512, vcpus=1)
host.consume_from_instance(instance)
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(root_gb=5, ephemeral_gb=0, memory_mb=512,
vcpus=1))
host.consume_from_request(spec_obj)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)