Improve cluster event listener

At the moment, we handle each failover event sequentially, also
waiting a fixed amount between them.

This change ensures we do this concurrently and that we don't wait
between events.

Change-Id: I225bb63050c7f044d1c7562bcdb2640da4321273
(cherry picked from commit 9f0ea777a7)
This commit is contained in:
Lucian Petrut 2017-04-07 17:43:24 +03:00 committed by Claudiu Belu
parent 7669cfc4eb
commit 47622fdbce
2 changed files with 26 additions and 26 deletions

View File

@ -15,6 +15,8 @@
"""Management class for Cluster VM operations."""
import functools
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
@ -22,12 +24,12 @@ import nova.conf
from nova import context
from nova import network
from nova import objects
from nova import utils
from nova.virt import block_device
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from hyperv.i18n import _LI, _LE
from hyperv.nova import hostops
@ -53,7 +55,6 @@ class ClusterOps(object):
self._clustutils.check_cluster_state()
self._instance_map = {}
self._daemon = None
self._this_node = hostops.HostOps.get_hostname()
self._context = context.get_admin_context()
@ -88,17 +89,10 @@ class ClusterOps(object):
def start_failover_listener_daemon(self):
"""Start the daemon failover listener."""
def _looper():
try:
self._clustutils.monitor_vm_failover(self._failover_migrate)
except Exception:
LOG.exception(_LE('Exception occured during failover '
'observation / migration.'))
listener = self._clustutils.get_vm_owner_change_listener()
cbk = functools.partial(utils.spawn_n, self._failover_migrate)
self._daemon = loopingcall.FixedIntervalLoopingCall(_looper)
self._daemon.start(
interval=CONF.hyperv.cluster_event_check_interval)
utils.spawn_n(listener, cbk)
def reclaim_failovered_instances(self):
# NOTE(claudiub): some instances might have failovered while the
@ -114,8 +108,9 @@ class ClusterOps(object):
self._this_node.upper() != instance.host.upper()]
for instance in nova_instances:
self._failover_migrate(instance.name, instance.host,
self._this_node)
utils.spawn_n(self._failover_migrate,
instance.name, instance.host,
self._this_node)
def _failover_migrate(self, instance_name, old_host, new_host):
"""This method will check if the generated event is a legitimate

View File

@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import time
import ddt
import mock
from nova.compute import power_state
@ -46,6 +44,7 @@ class ClusterOpsTestCase(test_base.HyperVBaseTestCase):
self.clusterops._network_api = mock.MagicMock()
self.clusterops._vmops = mock.MagicMock()
self.clusterops._serial_console_ops = mock.MagicMock()
self._clustutils = self.clusterops._clustutils
def test_get_instance_host(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
@ -100,21 +99,26 @@ class ClusterOpsTestCase(test_base.HyperVBaseTestCase):
self.clusterops._instance_map[mock_instance.name],
mock_instance.uuid)
def test_start_failover_listener_daemon(self):
self.flags(cluster_event_check_interval=0, group='hyperv')
self.clusterops._clustutils.monitor_vm_failover.side_effect = (
os_win_exc.HyperVClusterException)
@mock.patch('nova.utils.spawn_n')
def test_start_failover_listener_daemon(self, mock_spawn):
self.clusterops.start_failover_listener_daemon()
# wait for the daemon to do something.
time.sleep(0.5)
self.clusterops._clustutils.monitor_vm_failover.assert_called_with(
self.clusterops._failover_migrate)
spawn_args = mock_spawn.call_args_list[0][0]
self.assertEqual(
self._clustutils.get_vm_owner_change_listener.return_value,
spawn_args[0])
cbk = spawn_args[1]
cbk()
mock_spawn.assert_called_with(self.clusterops._failover_migrate)
@mock.patch('nova.utils.spawn_n')
@mock.patch.object(clusterops.ClusterOps, '_failover_migrate')
@mock.patch.object(clusterops.ClusterOps, '_get_nova_instances')
def test_reclaim_failovered_instances(self, mock_get_instances,
mock_failover_migrate):
mock_failover_migrate,
mock_spawn):
self.clusterops._this_node = 'fake_node'
mock_instance1 = mock.MagicMock(host='other_host')
mock_instance2 = mock.MagicMock(host=self.clusterops._this_node)
@ -126,7 +130,8 @@ class ClusterOpsTestCase(test_base.HyperVBaseTestCase):
mock_get_instances.assert_called_once_with(
['id', 'uuid', 'name', 'host'],
self.clusterops._vmops.list_instance_uuids.return_value)
mock_failover_migrate.assert_called_once_with(
mock_spawn.assert_called_once_with(
mock_failover_migrate,
mock_instance1.name, mock_instance1.host,
self.clusterops._this_node)