Revert "libvirt: slow live-migration to ensure network is ready"

This reverts commit 8e6d5d404c.

As detailed in the bug, this is overly racy, waiting for the event
potentially a long ways (and a couple RPC calls) away from where
the event will be triggered. The compute manager now has a generic
mechanism to do this which conflicts and replaces this functionality,
if enabled (default is off in Rocky, to be defaulted to on in Stein).

Conflicts:
	nova/tests/unit/virt/libvirt/test_driver.py
	nova/virt/libvirt/driver.py

Change-Id: Ibf2b5eeafd962e93ae4ab6290015d58c33024132
Closes-Bug: #1786346
(cherry picked from commit 411c45842f)
This commit is contained in:
Dan Smith 2018-08-09 14:37:28 -07:00 committed by Matt Riedemann
parent 7b685cccfc
commit afd1c1e6e1
4 changed files with 41 additions and 266 deletions

View File

@ -9609,15 +9609,15 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
disk_paths = ['vda', 'vdb']
_bandwidth = CONF.libvirt.live_migration_bandwidth
params = {
'migrate_uri': 'tcp://127.0.0.2',
'migrate_disks': disk_paths,
'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW,
'bandwidth': _bandwidth,
'destination_xml': target_xml,
}
# start test
bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='10.0.0.1',
graphics_listen_addr_spice='10.0.0.2',
@ -9631,8 +9631,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, target_connection,
False, migrate_data, guest, disk_paths,
bandwidth=bandwidth)
False, migrate_data, guest, disk_paths)
mock_xml.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
mock_migrateToURI3.assert_called_once_with(
@ -9651,18 +9650,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance = objects.Instance(**instance_dict)
params = {
'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW
'bandwidth': CONF.libvirt.live_migration_bandwidth,
}
migrate_data = objects.LibvirtLiveMigrateData(
target_connect_addr=target_connection,
block_migration=False)
dom_mock = mock.MagicMock()
guest = libvirt_guest.Guest(dom_mock)
_bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
drvr._live_migration_operation(self.context, instance,
target_connection, False,
migrate_data, guest, None,
bandwidth=_bandwidth)
migrate_data, guest, None)
dom_mock.migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection),
params=params, flags=0)
@ -9687,13 +9684,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance = objects.Instance(**instance_dict)
instance.info_cache = objects.InstanceInfoCache(
network_info=_fake_network_info(self, 1))
migrate_data = objects.LibvirtLiveMigrateData(
block_migration=True)
dom = fakelibvirt.Domain(drvr._get_connection(), '<domain/>', True)
guest = libvirt_guest.Guest(dom)
guest.migrate_configure_max_speed = mock.MagicMock()
mock_guest.return_value = guest
drvr._live_migration(self.context, instance, 'dest',
lambda: None, lambda: None, True,
@ -9702,9 +9696,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, 'dest', True,
migrate_data, guest, [], libvirt_driver.MIN_MIGRATION_SPEED_BW)
guest.migrate_configure_max_speed.assert_called_once_with(
CONF.libvirt.live_migration_bandwidth)
migrate_data, guest, [])
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@mock.patch.object(nova.virt.libvirt.migration,
@ -9729,7 +9721,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
params = {
'migrate_disks': disk_paths,
'migrate_uri': 'tcp://127.0.0.2',
'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW,
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': target_xml
}
@ -9759,12 +9751,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
guest = libvirt_guest.Guest(dom)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
_bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
mock_updated_guest_xml.return_value = target_xml
drvr._live_migration_operation(
self.context, instance_ref, target_connection,
False, migrate_data, guest, disk_paths,
_bandwidth)
False, migrate_data, guest, disk_paths)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection),
params=params, flags=0)
@ -9799,8 +9789,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def _test(migrate, has_min_version, get_updated_guest_xml):
drvr._live_migration_operation(
self.context, instance, 'dest.host', False,
migrate_data, guest, [],
CONF.libvirt.live_migration_bandwidth)
migrate_data, guest, [])
self.assertEqual(1, get_updated_guest_xml.call_count)
migrate.assert_called()
@ -9831,7 +9820,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
params = {
'migrate_disks': disk_paths,
'migrate_uri': 'tcp://127.0.0.2',
'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW,
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': target_xml,
}
@ -9860,11 +9849,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
guest = libvirt_guest.Guest(dom)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
_bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
mock_updated_guest_xml.return_value = target_xml
drvr._live_migration_operation(self.context, instance_ref,
target_connection, False, migrate_data,
guest, disk_paths, _bandwidth)
guest, disk_paths)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection),
params=params, flags=0)
@ -10139,12 +10127,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
params = {
'migrate_uri': 'tcp://127.0.0.2',
'migrate_disks': ['vda', 'vdb'],
'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW,
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': target_xml,
}
# start test
bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='10.0.0.1',
graphics_listen_addr_spice='10.0.0.2',
@ -10159,8 +10146,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, target_connection,
False, migrate_data, guest, disk_paths,
bandwidth=bandwidth)
False, migrate_data, guest, disk_paths)
mock_xml.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
mock_migrateToURI3.assert_called_once_with(
@ -10192,8 +10178,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, guest, [],
bandwidth=libvirt_driver.MIN_MIGRATION_SPEED_BW)
False, migrate_data, guest, [])
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@ -10211,7 +10196,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
params = {
'migrate_uri': 'tcp://127.0.0.2',
'migrate_disks': ['vda', 'vdb'],
'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW,
'bandwidth': CONF.libvirt.live_migration_bandwidth,
}
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
@ -10232,8 +10217,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance, target_connection,
False, migrate_data, guest, disk_paths,
libvirt_driver.MIN_MIGRATION_SPEED_BW)
False, migrate_data, guest, disk_paths)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection),
params=params, flags=0)
@ -10260,17 +10244,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._parse_migration_flags()
_bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
instance = objects.Instance(**self.test_instance)
drvr._live_migration_operation(self.context, instance,
target_connection,
True, migrate_data, guest,
device_names, _bandwidth)
device_names)
params = {
'migrate_uri': 'tcp://127.0.0.2',
'migrate_disks': device_names,
'bandwidth': _bandwidth,
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': '<xml/>',
}
if not params['migrate_disks']:
@ -10315,11 +10298,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
device_names = ['disk1', 'disk2']
# Preparing mocks
_bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
# Since we are passing the VIR_MIGRATE_TUNNELLED flag, the
# 'parms' dict will not (as expected) contain 'migrate_disks'
params = {
'bandwidth': _bandwidth
'bandwidth': CONF.libvirt.live_migration_bandwidth
}
# Start test
migrate_data = objects.LibvirtLiveMigrateData(
@ -10337,7 +10319,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance = objects.Instance(**self.test_instance)
drvr._live_migration_operation(self.context, instance,
target_connection, True, migrate_data,
guest, device_names, _bandwidth)
guest, device_names)
expected_flags = (fakelibvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
fakelibvirt.VIR_MIGRATE_PERSIST_DEST |
@ -10364,7 +10346,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
params = {
'migrate_uri': 'tcp://127.0.0.2',
'migrate_disks': disk_paths,
'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW,
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': '<xml/>',
}
@ -10372,7 +10354,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
# Start test
bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='10.0.0.1',
graphics_listen_addr_spice='10.0.0.2',
@ -10386,8 +10367,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, target_connection,
False, migrate_data, guest, disk_paths,
bandwidth=bandwidth)
False, migrate_data, guest, disk_paths)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection),
params=params, flags=0)
@ -11377,87 +11357,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_live_migration_main(self, mock_copy_disk_path, mock_running,
mock_guest, mock_monitor, mock_thread,
mock_conn):
virtapi = manager.ComputeVirtAPI(mock.MagicMock())
drvr = libvirt_driver.LibvirtDriver(virtapi, False)
instance = objects.Instance(**self.test_instance)
instance.info_cache = objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([
network_model.VIF(id=uuids.vif_1,
type=network_model.VIF_TYPE_BRIDGE)]))
dom = fakelibvirt.Domain(drvr._get_connection(),
"<domain><name>demo</name></domain>", True)
guest = libvirt_guest.Guest(dom)
migrate_data = objects.LibvirtLiveMigrateData(block_migration=True)
disks_to_copy = (['/some/path/one', '/test/path/two'],
['vda', 'vdb'])
mock_copy_disk_path.return_value = disks_to_copy
mock_guest.return_value = guest
guest.migrate_configure_max_speed = mock.MagicMock()
generated_events = []
def fake_post():
pass
def fake_recover():
pass
def fake_prepare(instance, name, tag):
ev = mock.MagicMock(instance=instance,
event_name='%s-%s' % (name, tag))
ev.wait.return_value = mock.MagicMock(status='completed')
generated_events.append(ev)
return ev
prepare = virtapi._compute.instance_events.prepare_for_instance_event
prepare.side_effect = fake_prepare
drvr._live_migration(self.context, instance, "fakehost",
fake_post, fake_recover, True,
migrate_data)
mock_copy_disk_path.assert_called_once_with(self.context, instance,
guest)
class AnyEventletEvent(object):
def __eq__(self, other):
return type(other) == eventlet.event.Event
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, "fakehost", True,
migrate_data, guest, disks_to_copy[1],
libvirt_driver.MIN_MIGRATION_SPEED_BW)
mock_monitor.assert_called_once_with(
self.context, instance, guest, "fakehost",
fake_post, fake_recover, True,
migrate_data, AnyEventletEvent(), disks_to_copy[0])
guest.migrate_configure_max_speed.assert_called_once_with(
CONF.libvirt.live_migration_bandwidth)
prepare.assert_has_calls([
mock.call(instance, 'network-vif-plugged', uuids.vif_1)])
for event in generated_events:
event.wait.assert_called_once_with()
@mock.patch.object(host.Host, "get_connection")
@mock.patch.object(utils, "spawn")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor")
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_live_migration_copy_disk_paths")
def test_live_migration_ovs_vif(self, mock_copy_disk_path, mock_running,
mock_guest, mock_monitor, mock_thread,
mock_conn):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
instance.info_cache = objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([
network_model.VIF(id=uuids.vif_1,
type=network_model.VIF_TYPE_OVS)]))
dom = fakelibvirt.Domain(drvr._get_connection(),
"<domain><name>demo</name></domain>", True)
@ -11468,7 +11369,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_copy_disk_path.return_value = disks_to_copy
mock_guest.return_value = guest
guest.migrate_configure_max_speed = mock.MagicMock()
def fake_post():
pass
@ -11489,70 +11389,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, "fakehost", True,
migrate_data, guest, disks_to_copy[1],
CONF.libvirt.live_migration_bandwidth)
migrate_data, guest, disks_to_copy[1])
mock_monitor.assert_called_once_with(
self.context, instance, guest, "fakehost",
fake_post, fake_recover, True,
migrate_data, AnyEventletEvent(), disks_to_copy[0])
guest.migrate_configure_max_speed.assert_not_called()
@mock.patch.object(host.Host, "get_connection")
@mock.patch.object(utils, "spawn")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor")
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_live_migration_copy_disk_paths")
def test_live_migration_bridge_no_events(self, mock_copy_disk_path,
mock_running, mock_guest,
mock_monitor, mock_thread,
mock_conn):
self.flags(vif_plugging_timeout=0)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
instance.info_cache = objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([
network_model.VIF(id=uuids.vif_1,
type=network_model.VIF_TYPE_BRIDGE)]))
dom = fakelibvirt.Domain(drvr._get_connection(),
"<domain><name>demo</name></domain>", True)
guest = libvirt_guest.Guest(dom)
migrate_data = objects.LibvirtLiveMigrateData(block_migration=True)
disks_to_copy = (['/some/path/one', '/test/path/two'],
['vda', 'vdb'])
mock_copy_disk_path.return_value = disks_to_copy
mock_guest.return_value = guest
guest.migrate_configure_max_speed = mock.MagicMock()
def fake_post():
pass
def fake_recover():
pass
drvr._live_migration(self.context, instance, "fakehost",
fake_post, fake_recover, True,
migrate_data)
mock_copy_disk_path.assert_called_once_with(self.context, instance,
guest)
class AnyEventletEvent(object):
def __eq__(self, other):
return type(other) == eventlet.event.Event
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, "fakehost", True,
migrate_data, guest, disks_to_copy[1],
CONF.libvirt.live_migration_bandwidth)
mock_monitor.assert_called_once_with(
self.context, instance, guest, "fakehost",
fake_post, fake_recover, True,
migrate_data, AnyEventletEvent(), disks_to_copy[0])
guest.migrate_configure_max_speed.assert_not_called()
@mock.patch('os.path.exists', return_value=False)
@mock.patch.object(fake_libvirt_utils, 'create_image')
@ -17148,15 +16989,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
volume_save.assert_called_once_with()
def test_get_neutron_events_for_live_migration(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = [network_model.VIF(id=uuids.vif_ovs,
type=network_model.VIF_TYPE_OVS),
network_model.VIF(id=uuids.vif_bridge,
type=network_model.VIF_TYPE_BRIDGE)]
events = drvr._get_neutron_events_for_live_migration(network_info)
self.assertEqual([('network-vif-plugged', uuids.vif_bridge)], events)
def test_get_neutron_events(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = [network_model.VIF(id='1'),

View File

@ -624,8 +624,6 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
def test_live_migration(self):
instance_ref, network_info = self._get_running_instance()
instance_ref.info_cache = objects.InstanceInfoCache(
network_info=network_info)
fake_context = context.RequestContext('fake', 'fake')
migration = objects.Migration(context=fake_context, id=1)
migrate_data = objects.LibvirtLiveMigrateData(

View File

@ -294,8 +294,6 @@ MIN_QEMU_FILE_BACKED_DISCARD_VERSION = (2, 10, 0)
VGPU_RESOURCE_SEMAPHORE = "vgpu_resources"
MIN_MIGRATION_SPEED_BW = 1 # 1 MiB/s
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
@ -5559,12 +5557,6 @@ class LibvirtDriver(driver.ComputeDriver):
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _neutron_failed_live_migration_callback(self, event_name, instance):
msg = ('Neutron reported failure during live migration '
'with %(event)s for instance %(uuid)s' %
{'event': event_name, 'uuid': instance.uuid})
raise exception.MigrationError(reason=msg)
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
@ -5574,16 +5566,6 @@ class LibvirtDriver(driver.ComputeDriver):
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
def _get_neutron_events_for_live_migration(self, network_info):
# Neutron should send events to Nova indicating that the VIFs
# are successfully plugged on destination host.
# TODO(sahid): Currently we only use the mechanism of waiting
# for neutron events during live-migration for linux-bridge.
return [('network-vif-plugged', vif['id'])
for vif in network_info if (
vif.get('type') == network_model.VIF_TYPE_BRIDGE)]
def _cleanup_failed_start(self, context, instance, network_info,
block_device_info, guest, destroy_disks):
try:
@ -7033,7 +7015,7 @@ class LibvirtDriver(driver.ComputeDriver):
def _live_migration_operation(self, context, instance, dest,
block_migration, migrate_data, guest,
device_names, bandwidth):
device_names):
"""Invoke the live migration operation
:param context: security context
@ -7046,7 +7028,6 @@ class LibvirtDriver(driver.ComputeDriver):
:param guest: the guest domain object
:param device_names: list of device names that are being migrated with
instance
:param bandwidth: MiB/s of bandwidth allowed for the migration at start
This method is intended to be run in a background thread and will
block that thread until the migration is finished or failed.
@ -7143,7 +7124,7 @@ class LibvirtDriver(driver.ComputeDriver):
flags=migration_flags,
migrate_disks=device_names,
destination_xml=new_xml_str,
bandwidth=bandwidth)
bandwidth=CONF.libvirt.live_migration_bandwidth)
LOG.debug("Migrate API has completed", instance=instance)
for hostname, port in serial_ports:
@ -7483,58 +7464,11 @@ class LibvirtDriver(driver.ComputeDriver):
disk_paths, device_names = self._live_migration_copy_disk_paths(
context, instance, guest)
deadline = CONF.vif_plugging_timeout
if utils.is_neutron() and deadline:
# We don't generate events if CONF.vif_plugging_timeout=0
# meaning that the operator disabled using them.
# In case of Linux Bridge, the agent is waiting for new
# TAP devices on destination node. They are going to be
# created by libvirt at the very beginning of the
# live-migration process. Then receiving the events from
# Neutron will ensure that everything is configured
# correctly.
events = self._get_neutron_events_for_live_migration(
instance.get_network_info())
else:
# TODO(sahid): This 'is_neutron()' condition should be
# removed when nova-network will be erased from the tree
# (Rocky).
events = []
if events:
# We start migration with the minimum bandwidth
# speed. Depending on the VIF type (see:
# _get_neutron_events_for_live_migration) we will wait for
# Neutron to send events that confirm network is setup or
# directly configure QEMU to use the maximun BW allowed.
bandwidth = MIN_MIGRATION_SPEED_BW
else:
bandwidth = CONF.libvirt.live_migration_bandwidth
try:
error_cb = self._neutron_failed_live_migration_callback
with self.virtapi.wait_for_instance_event(instance, events,
deadline=deadline,
error_callback=error_cb):
opthread = utils.spawn(self._live_migration_operation,
context, instance, dest,
block_migration,
migrate_data, guest,
device_names, bandwidth)
except eventlet.timeout.Timeout:
msg = ('Timeout waiting for VIF plugging events, '
'canceling migration')
raise exception.MigrationError(reason=msg)
else:
if utils.is_neutron() and events:
LOG.debug('VIF events received, continuing migration '
'with max bandwidth configured: %d',
CONF.libvirt.live_migration_bandwidth,
instance=instance)
# Configure QEMU to use the maximum bandwidth allowed.
guest.migrate_configure_max_speed(
CONF.libvirt.live_migration_bandwidth)
opthread = utils.spawn(self._live_migration_operation,
context, instance, dest,
block_migration,
migrate_data, guest,
device_names)
finish_event = eventlet.event.Event()
self.active_migrations[instance.uuid] = deque()

View File

@ -0,0 +1,11 @@
---
fixes:
- |
Note that the original fix for `bug 1414559`_ committed early in rocky was automatic and always
enabled. Because of `bug 1786346`_ that fix has since been reverted and superseded by an opt-in
mechanism which must be enabled. Setting ``[compute]/live_migration_wait_for_vif_plug=True``
will restore the behavior of `waiting for neutron events`_ during the live migration process.
.. _bug 1414559: https://bugs.launchpad.net/neutron/+bug/1414559
.. _bug 1786346: https://bugs.launchpad.net/nova/+bug/1786346
.. _waiting for neutron events: https://docs.openstack.org/nova/latest/configuration/config.html#compute.live_migration_wait_for_vif_plug