Unavailable hosts have no resources for use

If a host's:

  * resources are unavailable
  * in a unusable state

the system should:

  * report 0 resources
  * show 0 resources
  * not be scheduled to

Change-Id: Ia1c2f6f161dde1e23acce85a54566d07805d13df
Closes-Bug: 1503453
This commit is contained in:
Jesse J. Cook 2016-04-16 00:35:34 +00:00
parent c41230be03
commit 016b810f67
5 changed files with 34 additions and 21 deletions

View File

@ -275,12 +275,12 @@ class CellStateManager(base.Base):
continue
chost = compute_hosts[host]
chost['free_ram_mb'] += compute.free_ram_mb
chost['free_ram_mb'] += max(0, compute.free_ram_mb)
free_disk = compute.free_disk_gb * 1024
chost['free_disk_mb'] += free_disk
chost['total_ram_mb'] += compute.memory_mb
chost['free_disk_mb'] += max(0, free_disk)
chost['total_ram_mb'] += max(0, compute.memory_mb)
total_disk = compute.local_gb * 1024
chost['total_disk_mb'] += total_disk
chost['total_disk_mb'] += max(0, total_disk)
_get_compute_hosts()
if not compute_hosts:

View File

@ -852,6 +852,8 @@ class ResourceTracker(object):
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
self._update_usage_from_instance(context, instance)
self.compute_node.free_ram_mb = max(0, self.compute_node.free_ram_mb)
self.compute_node.free_disk_gb = max(0, self.compute_node.free_disk_gb)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for

View File

@ -163,10 +163,11 @@ class TestCellsStateManager(test.NoDBTestCase):
# utilize entire cell
cap = self._capacity(0.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
cell_free_ram = sum(max(0, compute[3]) for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
cell_free_disk = 1024 * sum(max(0, compute[4])
for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
@ -183,10 +184,11 @@ class TestCellsStateManager(test.NoDBTestCase):
# reserve the entire cell. (utilize zero percent)
cap = self._capacity(100.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
cell_free_ram = sum(max(0, compute[3]) for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
cell_free_disk = 1024 * sum(max(0, compute[4])
for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
@ -200,10 +202,11 @@ class TestCellsStateManager(test.NoDBTestCase):
# utilize half the cell's free capacity
cap = self._capacity(50.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
cell_free_ram = sum(max(0, compute[3]) for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
cell_free_disk = 1024 * sum(max(0, compute[4])
for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
@ -237,11 +240,13 @@ class TestCellsStateManagerNToOne(TestCellsStateManager):
# utilize half the cell's free capacity
cap = self._capacity(50.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES_N_TO_ONE)
cell_free_ram = sum(max(0, compute[3])
for compute in FAKE_COMPUTES_N_TO_ONE)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = (1024 *
sum(compute[4] for compute in FAKE_COMPUTES_N_TO_ONE))
sum(max(0, compute[4])
for compute in FAKE_COMPUTES_N_TO_ONE))
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
@ -269,10 +274,12 @@ class TestCellsStateManagerNodeDown(test.NoDBTestCase):
def test_capacity_no_reserve_nodedown(self):
cap = self._capacity(0.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES[:-1])
cell_free_ram = sum(max(0, compute[3])
for compute in FAKE_COMPUTES[:-1])
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
free_disk = sum(compute[4] for compute in FAKE_COMPUTES[:-1])
free_disk = sum(max(0, compute[4])
for compute in FAKE_COMPUTES[:-1])
cell_free_disk = 1024 * free_disk
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])

View File

@ -278,11 +278,11 @@ class IronicDriverTestCase(test.NoDBTestCase):
else:
props_dict = props
expected_cpus = props['cpus']
self.assertEqual(expected_cpus, result['vcpus'])
self.assertEqual(0, result['vcpus'])
self.assertEqual(expected_cpus, result['vcpus_used'])
self.assertEqual(props_dict['memory_mb'], result['memory_mb'])
self.assertEqual(0, result['memory_mb'])
self.assertEqual(props_dict['memory_mb'], result['memory_mb_used'])
self.assertEqual(props_dict['local_gb'], result['local_gb'])
self.assertEqual(0, result['local_gb'])
self.assertEqual(props_dict['local_gb'], result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
@ -397,11 +397,11 @@ class IronicDriverTestCase(test.NoDBTestCase):
instance_info=instance_info)
result = self.driver._node_resource(node)
self.assertEqual(instance_info['vcpus'], result['vcpus'])
self.assertEqual(0, result['vcpus'])
self.assertEqual(instance_info['vcpus'], result['vcpus_used'])
self.assertEqual(instance_info['memory_mb'], result['memory_mb'])
self.assertEqual(0, result['memory_mb'])
self.assertEqual(instance_info['memory_mb'], result['memory_mb_used'])
self.assertEqual(instance_info['local_gb'], result['local_gb'])
self.assertEqual(0, result['local_gb'])
self.assertEqual(instance_info['local_gb'], result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, result['stats'])

View File

@ -310,7 +310,11 @@ class IronicDriver(virt_driver.ComputeDriver):
vcpus_used = vcpus = instance_info['vcpus']
memory_mb_used = memory_mb = instance_info['memory_mb']
local_gb_used = local_gb = instance_info['local_gb']
elif self._node_resources_unavailable(node):
# Always checking allows us to catch the case where Nova thinks there
# are available resources on the Node, but Ironic does not (because it
# is not in a usable state): https://launchpad.net/bugs/1503453
if self._node_resources_unavailable(node):
# The node's current state is such that it should not present any
# of its resources to Nova
vcpus = 0