Merge "Re-use existing ComputeNode on ironic rebalance" into stable/ocata
This commit is contained in:
commit
709f07c1a7
|
@ -414,6 +414,52 @@ class ResourceTracker(object):
|
|||
return (nodename not in self.compute_nodes or
|
||||
not self.driver.node_is_available(nodename))
|
||||
|
||||
def _check_for_nodes_rebalance(self, context, resources, nodename):
|
||||
"""Check if nodes rebalance has happened.
|
||||
|
||||
The ironic driver maintains a hash ring mapping bare metal nodes
|
||||
to compute nodes. If a compute dies, the hash ring is rebuilt, and
|
||||
some of its bare metal nodes (more precisely, those not in ACTIVE
|
||||
state) are assigned to other computes.
|
||||
|
||||
This method checks for this condition and adjusts the database
|
||||
accordingly.
|
||||
|
||||
:param context: security context
|
||||
:param resources: initial values
|
||||
:param nodename: node name
|
||||
:returns: True if a suitable compute node record was found, else False
|
||||
"""
|
||||
if not self.driver.rebalances_nodes:
|
||||
return False
|
||||
|
||||
# Its possible ironic just did a node re-balance, so let's
|
||||
# check if there is a compute node that already has the correct
|
||||
# hypervisor_hostname. We can re-use that rather than create a
|
||||
# new one and have to move existing placement allocations
|
||||
cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
|
||||
context, nodename)
|
||||
|
||||
if len(cn_candidates) == 1:
|
||||
cn = cn_candidates[0]
|
||||
LOG.info(_LI("ComputeNode %(name)s moving from %(old)s to "
|
||||
"%(new)s"),
|
||||
{"name": nodename, "old": cn.host, "new": self.host})
|
||||
cn.host = self.host
|
||||
self.compute_nodes[nodename] = cn
|
||||
self._copy_resources(cn, resources)
|
||||
self._setup_pci_tracker(context, cn, resources)
|
||||
self._update(context, cn)
|
||||
return True
|
||||
elif len(cn_candidates) > 1:
|
||||
LOG.error(
|
||||
_("Found more than one ComputeNode for nodename %s. "
|
||||
"Please clean up the orphaned ComputeNode records in your "
|
||||
"DB."),
|
||||
nodename)
|
||||
|
||||
return False
|
||||
|
||||
def _init_compute_node(self, context, resources):
|
||||
"""Initialize the compute node if it does not already exist.
|
||||
|
||||
|
@ -449,6 +495,9 @@ class ResourceTracker(object):
|
|||
self.scheduler_client.update_resource_stats(cn)
|
||||
return
|
||||
|
||||
if self._check_for_nodes_rebalance(context, resources, nodename):
|
||||
return
|
||||
|
||||
# there was no local copy and none in the database
|
||||
# so we need to create a new compute node. This needs
|
||||
# to be initialized with resource values.
|
||||
|
|
|
@ -426,6 +426,7 @@ def setup_rt(hostname, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
|
|||
vd.get_available_resource.return_value = virt_resources
|
||||
vd.get_host_ip_addr.return_value = _NODENAME
|
||||
vd.estimate_instance_overhead.side_effect = estimate_overhead
|
||||
vd.rebalances_nodes = False
|
||||
|
||||
with test.nested(
|
||||
mock.patch('nova.scheduler.client.SchedulerClient',
|
||||
|
@ -998,15 +999,95 @@ class TestInitComputeNode(BaseTestCase):
|
|||
self.assertFalse(create_mock.called)
|
||||
self.assertTrue(self.sched_client_mock.update_resource_stats.called)
|
||||
|
||||
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.create')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
'_update')
|
||||
def test_compute_node_rebalanced(self, update_mock, get_mock, create_mock,
|
||||
pci_mock, get_by_hypervisor_mock):
|
||||
self._setup_rt()
|
||||
self.driver_mock.rebalances_nodes = True
|
||||
cn = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
cn.host = "old-host"
|
||||
|
||||
def fake_get_all(_ctx, nodename):
|
||||
return [cn]
|
||||
|
||||
get_mock.side_effect = exc.NotFound
|
||||
get_by_hypervisor_mock.side_effect = fake_get_all
|
||||
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
|
||||
|
||||
self.rt._init_compute_node(mock.sentinel.ctx, resources)
|
||||
|
||||
get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
|
||||
_NODENAME)
|
||||
get_by_hypervisor_mock.assert_called_once_with(mock.sentinel.ctx,
|
||||
_NODENAME)
|
||||
create_mock.assert_not_called()
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx, cn)
|
||||
|
||||
self.assertEqual(_HOSTNAME, self.rt.compute_nodes[_NODENAME].host)
|
||||
|
||||
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList(objects=[]))
|
||||
@mock.patch('nova.objects.ComputeNode.create')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
def test_compute_node_created_on_empty(self, get_mock, create_mock,
|
||||
pci_tracker_mock):
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
'_update')
|
||||
def test_compute_node_created_on_empty(self, update_mock, get_mock,
|
||||
create_mock, pci_tracker_mock,
|
||||
get_by_hypervisor_mock):
|
||||
get_by_hypervisor_mock.return_value = []
|
||||
self._test_compute_node_created(update_mock, get_mock, create_mock,
|
||||
pci_tracker_mock,
|
||||
get_by_hypervisor_mock)
|
||||
|
||||
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList(objects=[]))
|
||||
@mock.patch('nova.objects.ComputeNode.create')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
'_update')
|
||||
def test_compute_node_created_on_empty_rebalance(self, update_mock,
|
||||
get_mock,
|
||||
create_mock,
|
||||
pci_tracker_mock,
|
||||
get_by_hypervisor_mock):
|
||||
get_by_hypervisor_mock.return_value = []
|
||||
self._test_compute_node_created(update_mock, get_mock, create_mock,
|
||||
pci_tracker_mock,
|
||||
get_by_hypervisor_mock,
|
||||
rebalances_nodes=True)
|
||||
|
||||
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList(objects=[]))
|
||||
@mock.patch('nova.objects.ComputeNode.create')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
'_update')
|
||||
def test_compute_node_created_too_many(self, update_mock, get_mock,
|
||||
create_mock, pci_tracker_mock,
|
||||
get_by_hypervisor_mock):
|
||||
get_by_hypervisor_mock.return_value = ["fake_node_1", "fake_node_2"]
|
||||
self._test_compute_node_created(update_mock, get_mock, create_mock,
|
||||
pci_tracker_mock,
|
||||
get_by_hypervisor_mock,
|
||||
rebalances_nodes=True)
|
||||
|
||||
def _test_compute_node_created(self, update_mock, get_mock,
|
||||
create_mock, pci_tracker_mock,
|
||||
get_by_hypervisor_mock,
|
||||
rebalances_nodes=False):
|
||||
self.flags(cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0,
|
||||
disk_allocation_ratio=1.0)
|
||||
self._setup_rt()
|
||||
self.driver_mock.rebalances_nodes = rebalances_nodes
|
||||
|
||||
get_mock.side_effect = exc.NotFound
|
||||
|
||||
|
@ -1070,6 +1151,11 @@ class TestInitComputeNode(BaseTestCase):
|
|||
cn = self.rt.compute_nodes[_NODENAME]
|
||||
get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
|
||||
_NODENAME)
|
||||
if rebalances_nodes:
|
||||
get_by_hypervisor_mock.assert_called_once_with(
|
||||
mock.sentinel.ctx, _NODENAME)
|
||||
else:
|
||||
get_by_hypervisor_mock.assert_not_called()
|
||||
create_mock.assert_called_once_with()
|
||||
self.assertTrue(obj_base.obj_equal_prims(expected_compute, cn))
|
||||
pci_tracker_mock.assert_called_once_with(mock.sentinel.ctx,
|
||||
|
|
|
@ -130,6 +130,11 @@ class ComputeDriver(object):
|
|||
"supports_device_tagging": False,
|
||||
}
|
||||
|
||||
# Indicates if this driver will rebalance nodes among compute service
|
||||
# hosts. This is really here for ironic and should not be used by any
|
||||
# other driver.
|
||||
rebalances_nodes = False
|
||||
|
||||
def __init__(self, virtapi):
|
||||
self.virtapi = virtapi
|
||||
self._compute_event_callback = None
|
||||
|
|
|
@ -25,6 +25,7 @@ semantics of real hypervisor connections.
|
|||
|
||||
import collections
|
||||
import contextlib
|
||||
import copy
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
|
@ -155,8 +156,12 @@ class FakeDriver(driver.ComputeDriver):
|
|||
self._mounts = {}
|
||||
self._interfaces = {}
|
||||
self.active_migrations = {}
|
||||
self._nodes = self._init_nodes()
|
||||
|
||||
def _init_nodes(self):
|
||||
if not _FAKE_NODES:
|
||||
set_nodes([CONF.host])
|
||||
return copy.copy(_FAKE_NODES)
|
||||
|
||||
def init_host(self, host):
|
||||
return
|
||||
|
|
|
@ -131,6 +131,9 @@ class IronicDriver(virt_driver.ComputeDriver):
|
|||
"supports_attach_interface": False
|
||||
}
|
||||
|
||||
# This driver is capable of rebalancing nodes between computes.
|
||||
rebalances_nodes = True
|
||||
|
||||
def __init__(self, virtapi, read_only=False):
|
||||
super(IronicDriver, self).__init__(virtapi)
|
||||
global ironic
|
||||
|
|
Loading…
Reference in New Issue