Add context as parameter for two libvirt APIs

When finish_revert_migration is called, the caller of
finish_revert_migration already includes context as a parameter,
but finish_revert_migration did not reuse this parameter and still
re-generate the context inside finish_revert_migration, we should
add context as a new parameter for finish_revert_migration so that
the functions inside of it can reuse context when needed.

For the function _create_domain_and_network in libvirt/driver.py,
it set context as an optional parameter, but context isn't really
an optional parameter for this method. We should always pass a
context down because it might be needed somewhere inside
_create_domain_and_network().

The patch includes two parts:
1) Add context as a new parameter for finish_revert_migration
2) Set context as a required parameter for _create_domain_and_network

Closes-Bug: #1251261
(cherry picked from commit 81fc3967bc)

Conflicts:
	nova/tests/compute/test_compute_mgr.py
	nova/virt/vmwareapi/vmops.py

Change-Id: I72d131fc8dce903400e3080d51ba0fc0a1320751
This commit is contained in:
Jay Lau 2013-11-22 23:32:38 +08:00 committed by wingwj
parent 0a4146d682
commit 6a1def6864
19 changed files with 60 additions and 51 deletions

View File

@ -684,7 +684,7 @@ class ComputeManager(manager.SchedulerDependentManager):
block_dev_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.finish_revert_migration(
self.driver.finish_revert_migration(context,
instance, net_info, block_dev_info, power_on)
except Exception as e:
@ -2839,7 +2839,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context, instance, refresh_conn_info=True)
power_on = old_vm_state != vm_states.STOPPED
self.driver.finish_revert_migration(instance,
self.driver.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)

View File

@ -4186,7 +4186,7 @@ class ComputeTestCase(BaseTestCase):
def fake_finish_revert_migration_driver(*args, **kwargs):
# Confirm the instance uses the old type in finish_revert_resize
inst = args[0]
inst = args[1]
sys_meta = inst.system_metadata
self.assertEqual(sys_meta['instance_type_flavorid'], '1')

View File

@ -311,7 +311,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
utils.instance_sys_meta(instance).AndReturn(sys_meta)
self.compute._get_instance_volume_block_device_info(
self.context, instance).AndReturn([])
self.compute.driver.finish_revert_migration(instance, [], [], power_on)
self.compute.driver.finish_revert_migration(self.context, instance,
[], [], power_on)
self.compute._instance_update(self.context, instance['uuid'],
task_state=None).AndReturn(fixed)
self.compute.driver.get_info(fixed).AndReturn(

View File

@ -1597,7 +1597,8 @@ class HyperVAPITestCase(test.NoDBTestCase):
constants.HYPERV_VM_STATE_ENABLED)
self._mox.ReplayAll()
self._conn.finish_revert_migration(instance, network_info, None,
self._conn.finish_revert_migration(self._context, instance,
network_info, None,
power_on)
self._mox.VerifyAll()

View File

@ -4290,9 +4290,9 @@ class LibvirtConnTestCase(test.TestCase):
conn._create_images_and_backing(self.context, instance,
libvirt_utils.get_instance_path(instance),
disk_info_json)
conn._create_domain_and_network(dummyxml, instance,
conn._create_domain_and_network(self.context, dummyxml, instance,
network_info, block_device_info,
context=self.context, reboot=True)
reboot=True)
self.mox.ReplayAll()
conn._hard_reboot(self.context, instance, network_info,
@ -4326,10 +4326,10 @@ class LibvirtConnTestCase(test.TestCase):
block_device_info)
_get_existing_domain_xml.assert_has_calls([mock.call(instance,
network_info, block_device_info)])
_create_domain_and_network.assert_has_calls([mock.call(dummyxml,
instance, network_info,
block_device_info=block_device_info,
context=self.context)])
_create_domain_and_network.assert_has_calls([mock.call(
self.context, dummyxml,
instance, network_info,
block_device_info=block_device_info)])
_attach_pci_devices.assert_has_calls([mock.call('fake_dom',
'fake_pci_devs')])
@ -7003,8 +7003,9 @@ class LibvirtDriverTestCase(test.TestCase):
f = open(libvirt_xml_path, 'w')
f.close()
self.libvirtconnection.finish_revert_migration(ins_ref, None,
None, power_on)
self.libvirtconnection.finish_revert_migration(
context.get_admin_context(), ins_ref,
None, None, power_on)
self.assertTrue(self.fake_create_domain_called)
def test_finish_revert_migration_power_on(self):
@ -7021,6 +7022,7 @@ class LibvirtDriverTestCase(test.TestCase):
def wait(self):
return None
context = 'fake_context'
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
@ -7046,7 +7048,7 @@ class LibvirtDriverTestCase(test.TestCase):
self.mox.ReplayAll()
self.libvirtconnection.finish_revert_migration({}, [])
self.libvirtconnection.finish_revert_migration(context, {}, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(backup_made=True)

View File

@ -439,9 +439,11 @@ class PowerVMDriverTestCase(test.TestCase):
self.mox.ReplayAll()
self.powervm_connection.finish_revert_migration(inst, network_info,
block_device_info=None,
power_on=power_on)
self.powervm_connection.finish_revert_migration(
context.get_admin_context(),
inst, network_info,
block_device_info=None,
power_on=power_on)
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True, True)

View File

@ -965,7 +965,8 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
fake_wait_for_task)
# perform the revert on our stubbed methods
self.conn.finish_revert_migration(instance=self.instance,
self.conn.finish_revert_migration(self.context,
instance=self.instance,
network_info=None,
power_on=power_on)

View File

@ -80,6 +80,7 @@ class VMOpsTestCase(test.NoDBTestCase):
def _test_finish_revert_migration_after_crash(self, backup_made, new_made):
instance = {'name': 'foo',
'task_state': task_states.RESIZE_MIGRATING}
context = 'fake_context'
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(self._vmops, '_destroy')
@ -100,7 +101,7 @@ class VMOpsTestCase(test.NoDBTestCase):
self.mox.ReplayAll()
self._vmops.finish_revert_migration(instance, [])
self._vmops.finish_revert_migration(context, instance, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True)

View File

@ -1309,13 +1309,13 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, instance, block_info,
def finish_revert_migration(self, context, instance, block_info,
power_on):
self.finish_revert_migration_called = True
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(instance, None)
conn.finish_revert_migration(self.context, instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def test_reboot_hard(self):
@ -1717,6 +1717,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
context = 'fake_context'
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
@ -1750,7 +1751,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, power_on)
conn.finish_revert_migration(instance, network_info)
conn.finish_revert_migration(context, instance, network_info)
self.assertEqual(self.fake_finish_revert_migration_called, True)
def test_revert_migrate_power_on(self):

View File

@ -401,11 +401,12 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, instance, network_info,
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""
Finish reverting a resize.
:param context: the context for the finish_revert_migration
:param instance: the instance being migrated/resized
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
@ -413,7 +414,6 @@ class ComputeDriver(object):
:param power_on: True if the instance should be powered on, False
otherwise
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def pause(self, instance):

View File

@ -172,7 +172,7 @@ class FakeDriver(driver.ComputeDriver):
block_device_info=None):
pass
def finish_revert_migration(self, instance, network_info,
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
pass

View File

@ -179,9 +179,10 @@ class HyperVDriver(driver.ComputeDriver):
def confirm_migration(self, migration, instance, network_info):
self._migrationops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
self._migrationops.finish_revert_migration(instance, network_info,
self._migrationops.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,

View File

@ -142,7 +142,7 @@ class MigrationOps(object):
instance_name)
self._pathutils.rename(revert_path, instance_path)
def finish_revert_migration(self, instance, network_info,
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug(_("finish_revert_migration called"), instance=instance)

View File

@ -1928,9 +1928,8 @@ class LibvirtDriver(driver.ComputeDriver):
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(xml, instance, network_info,
block_device_info, context=context,
reboot=True)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, reboot=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance))
@ -1979,8 +1978,8 @@ class LibvirtDriver(driver.ComputeDriver):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
dom = self._create_domain_and_network(xml, instance, network_info,
block_device_info=block_device_info, context=context)
dom = self._create_domain_and_network(context, xml, instance,
network_info, block_device_info=block_device_info)
self._attach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
@ -2087,8 +2086,8 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
block_device_info, context=context)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info)
LOG.debug(_("Instance is running"), instance=instance)
def _wait_for_boot():
@ -3211,9 +3210,9 @@ class LibvirtDriver(driver.ComputeDriver):
return domain
def _create_domain_and_network(self, xml, instance, network_info,
def _create_domain_and_network(self, context, xml, instance, network_info,
block_device_info=None, power_on=True,
context=None, reboot=False):
reboot=False):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
@ -4606,9 +4605,8 @@ class LibvirtDriver(driver.ComputeDriver):
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
block_device_info, power_on,
context=context)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
@ -4625,7 +4623,7 @@ class LibvirtDriver(driver.ComputeDriver):
if e.errno != errno.ENOENT:
raise
def finish_revert_migration(self, instance, network_info,
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
@ -4644,10 +4642,9 @@ class LibvirtDriver(driver.ComputeDriver):
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(xml, instance, network_info,
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, power_on)
if power_on:

View File

@ -310,7 +310,7 @@ class PowerVMDriver(driver.ComputeDriver):
new_name = self._get_resize_name(instance['name'])
self._powervm.destroy(new_name)
def finish_revert_migration(self, instance, network_info,
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize."""

View File

@ -447,11 +447,11 @@ class VMwareVCDriver(VMwareESXDriver):
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize, powering back on the instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.finish_revert_migration(instance, network_info,
_vmops.finish_revert_migration(context, instance, network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,

View File

@ -1236,7 +1236,7 @@ class VMwareVMOps(object):
LOG.warn(_("In vmwareapi:vmops:confirm_migration, got this "
"exception while destroying the VM: %s") % str(excep))
def finish_revert_migration(self, instance, network_info,
def finish_revert_migration(self, context, instance, network_info,
block_device_info, power_on=True):
"""Finish reverting a resize."""
# The original vm was suffixed with '-orig'; find it using

View File

@ -220,11 +220,12 @@ class XenAPIDriver(driver.ComputeDriver):
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize."""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(instance, block_device_info,
self._vmops.finish_revert_migration(context, instance,
block_device_info,
power_on)
def finish_migration(self, context, migration, instance, disk_info,

View File

@ -239,7 +239,8 @@ class VMOps(object):
mount_device,
hotplug=False)
def finish_revert_migration(self, instance, block_device_info=None,
def finish_revert_migration(self, context, instance,
block_device_info=None,
power_on=True):
self._restore_orig_vm_and_cleanup_orphan(instance, block_device_info,
power_on)