From 1e2c92f3f20b2742887edde11aaf2a062566c16f Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 18 Mar 2014 09:16:29 -0700 Subject: [PATCH] xenapi: Attach original local disks during rescue When rescuing an instance, a new VM is created and only the original root disk is re-attached. Often when a user is rescuing a VM, they expect to be able to access all of their original disks so they can potentially salvage data. This changes the xenapi driver to attach the original local disks during rescue so the user can rescue all of their data. DocImpact Implements: blueprint rescue-attach-all-disks Change-Id: Iba5cc85cd03d0a60f1858cf16aa31397e163df50 Partial-bug: 1223396 --- nova/tests/virt/xenapi/test_vmops.py | 18 +++--- nova/tests/virt/xenapi/test_xenapi.py | 22 +++++-- nova/virt/xenapi/fake.py | 9 ++- nova/virt/xenapi/vmops.py | 90 ++++++++++++++++++--------- 4 files changed, 94 insertions(+), 45 deletions(-) diff --git a/nova/tests/virt/xenapi/test_vmops.py b/nova/tests/virt/xenapi/test_vmops.py index 7fcea40f70f8..f69298b2ce42 100644 --- a/nova/tests/virt/xenapi/test_vmops.py +++ b/nova/tests/virt/xenapi/test_vmops.py @@ -224,7 +224,7 @@ class SpawnTestCase(VMOpsTestBase): self.mox.StubOutWithMock(self.vmops, '_attach_disks') self.mox.StubOutWithMock(pci_manager, 'get_instance_pci_devs') self.mox.StubOutWithMock(vm_utils, 'set_other_config_pci') - self.mox.StubOutWithMock(self.vmops, '_attach_orig_disk_for_rescue') + self.mox.StubOutWithMock(self.vmops, '_attach_orig_disks') self.mox.StubOutWithMock(self.vmops, 'inject_network_info') self.mox.StubOutWithMock(self.vmops, '_inject_hostname') self.mox.StubOutWithMock(self.vmops, '_inject_instance_metadata') @@ -300,7 +300,7 @@ class SpawnTestCase(VMOpsTestBase): self.vmops._update_instance_progress(context, instance, step, steps) self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type, - network_info, admin_password, injected_files) + network_info, rescue, admin_password, injected_files) if attach_pci_dev: fake_dev = { 'created_at': None, @@ -346,7 +346,7 @@ class SpawnTestCase(VMOpsTestBase): self.vmops._update_instance_progress(context, instance, step, steps) if rescue: - self.vmops._attach_orig_disk_for_rescue(instance, vm_ref) + self.vmops._attach_orig_disks(instance, vm_ref) step += 1 self.vmops._update_instance_progress(context, instance, step, steps) @@ -435,7 +435,7 @@ class SpawnTestCase(VMOpsTestBase): if resize_instance: self.vmops._resize_up_vdis(instance, vdis) self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type, - network_info, None, None) + network_info, False, None, None) self.vmops._attach_mapped_block_devices(instance, block_device_info) pci_manager.get_instance_pci_devs(instance).AndReturn([]) @@ -569,21 +569,23 @@ class SpawnTestCase(VMOpsTestBase): self.mox.ReplayAll() self.vmops._wait_for_instance_to_start(instance, vm_ref) - def test_attach_orig_disk_for_rescue(self): + def test_attach_orig_disks(self): instance = {"name": "dummy"} vm_ref = "vm_ref" + vbd_refs = {vmops.DEVICE_ROOT: "vdi_ref"} self.mox.StubOutWithMock(vm_utils, 'lookup') - self.mox.StubOutWithMock(self.vmops, '_find_root_vdi_ref') + self.mox.StubOutWithMock(self.vmops, '_find_vdi_refs') self.mox.StubOutWithMock(vm_utils, 'create_vbd') vm_utils.lookup(self.vmops._session, "dummy").AndReturn("ref") - self.vmops._find_root_vdi_ref("ref").AndReturn("vdi_ref") + self.vmops._find_vdi_refs("ref", exclude_volumes=True).AndReturn( + vbd_refs) vm_utils.create_vbd(self.vmops._session, vm_ref, "vdi_ref", vmops.DEVICE_RESCUE, bootable=False) self.mox.ReplayAll() - self.vmops._attach_orig_disk_for_rescue(instance, vm_ref) + self.vmops._attach_orig_disks(instance, vm_ref) def test_agent_update_setup(self): # agent updates need to occur after networking is configured diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py index 2bb9dfceb95e..970d6e31006f 100644 --- a/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -1231,9 +1231,16 @@ iface eth0 inet6 static swap_vdi_ref = xenapi_fake.create_vdi('swap', None) root_vdi_ref = xenapi_fake.create_vdi('root', None) + eph1_vdi_ref = xenapi_fake.create_vdi('eph', None) + eph2_vdi_ref = xenapi_fake.create_vdi('eph', None) + vol_vdi_ref = xenapi_fake.create_vdi('volume', None) - xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=1) + xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=2) xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0) + xenapi_fake.create_vbd(vm_ref, eph1_vdi_ref, userdevice=4) + xenapi_fake.create_vbd(vm_ref, eph2_vdi_ref, userdevice=5) + xenapi_fake.create_vbd(vm_ref, vol_vdi_ref, userdevice=6, + other_config={'osvol': True}) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) image_meta = {'id': IMAGE_VHD, @@ -1245,11 +1252,16 @@ iface eth0 inet6 static rescue_ref = vm_utils.lookup(session, rescue_name) rescue_vm = xenapi_fake.get_record('VM', rescue_ref) - vdi_refs = [] + vdi_refs = {} for vbd_ref in rescue_vm['VBDs']: - vdi_refs.append(xenapi_fake.get_record('VBD', vbd_ref)['VDI']) - self.assertNotIn(swap_vdi_ref, vdi_refs) - self.assertIn(root_vdi_ref, vdi_refs) + vbd = xenapi_fake.get_record('VBD', vbd_ref) + vdi_refs[vbd['VDI']] = vbd['userdevice'] + + self.assertEqual('1', vdi_refs[root_vdi_ref]) + self.assertEqual('2', vdi_refs[swap_vdi_ref]) + self.assertEqual('4', vdi_refs[eph1_vdi_ref]) + self.assertEqual('5', vdi_refs[eph2_vdi_ref]) + self.assertNotIn(vol_vdi_ref, vdi_refs) def test_rescue_preserve_disk_on_failure(self): # test that the original disk is preserved if rescue setup fails diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index df243f255876..94b4f30011fa 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -206,11 +206,15 @@ def after_VDI_create(vdi_ref, vdi_rec): vdi_rec.setdefault('VBDs', []) -def create_vbd(vm_ref, vdi_ref, userdevice=0): +def create_vbd(vm_ref, vdi_ref, userdevice=0, other_config=None): + if other_config is None: + other_config = {} + vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref, 'userdevice': str(userdevice), - 'currently_attached': False} + 'currently_attached': False, + 'other_config': other_config} vbd_ref = _create_object('VBD', vbd_rec) after_VBD_create(vbd_ref, vbd_rec) return vbd_ref @@ -222,6 +226,7 @@ def after_VBD_create(vbd_ref, vbd_rec): """ vbd_rec['currently_attached'] = False vbd_rec['device'] = '' + vbd_rec.setdefault('other_config', {}) vm_ref = vbd_rec['VM'] vm_rec = _db_content['VM'][vm_ref] diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b71e48946d7c..e159aa619047 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -382,8 +382,8 @@ class VMOps(object): self._resize_up_vdis(instance, vdis) self._attach_disks(instance, vm_ref, name_label, vdis, - disk_image_type, network_info, admin_password, - injected_files) + disk_image_type, network_info, rescue, + admin_password, injected_files) if not first_boot: self._attach_mapped_block_devices(instance, block_device_info) @@ -440,19 +440,20 @@ class VMOps(object): attach_pci_devices(undo_mgr, vm_ref) if rescue: - # NOTE(johannes): Attach root disk to rescue VM now, before - # booting the VM, since we can't hotplug block devices + # NOTE(johannes): Attach disks from original VM to rescue VM now, + # before booting the VM, since we can't hotplug block devices # on non-PV guests @step - def attach_root_disk_step(undo_mgr, vm_ref): - vbd_ref = self._attach_orig_disk_for_rescue(instance, vm_ref) + def attach_orig_disks_step(undo_mgr, vm_ref): + vbd_refs = self._attach_orig_disks(instance, vm_ref) - def undo_attach_root_disk(): - # destroy the vbd in preparation to re-attach the VDI + def undo_attach_orig_disks(): + # Destroy the VBDs in preparation to re-attach the VDIs # to its original VM. (does not delete VDI) - vm_utils.destroy_vbd(self._session, vbd_ref) + for vbd_ref in vbd_refs: + vm_utils.destroy_vbd(self._session, vbd_ref) - undo_mgr.undo_with(undo_attach_root_disk) + undo_mgr.undo_with(undo_attach_orig_disks) @step def inject_instance_data_step(undo_mgr, vm_ref, vdis): @@ -508,7 +509,7 @@ class VMOps(object): setup_network_step(undo_mgr, vm_ref) if rescue: - attach_root_disk_step(undo_mgr, vm_ref) + attach_orig_disks_step(undo_mgr, vm_ref) boot_instance_step(undo_mgr, vm_ref) @@ -521,11 +522,35 @@ class VMOps(object): msg = _("Failed to spawn, rolling back") undo_mgr.rollback_and_reraise(msg=msg, instance=instance) - def _attach_orig_disk_for_rescue(self, instance, vm_ref): + def _attach_orig_disks(self, instance, vm_ref): orig_vm_ref = vm_utils.lookup(self._session, instance['name']) - vdi_ref = self._find_root_vdi_ref(orig_vm_ref) - return vm_utils.create_vbd(self._session, vm_ref, vdi_ref, - DEVICE_RESCUE, bootable=False) + orig_vdi_refs = self._find_vdi_refs(orig_vm_ref, + exclude_volumes=True) + + # Attach original root disk + root_vdi_ref = orig_vdi_refs.get(DEVICE_ROOT) + if not root_vdi_ref: + raise exception.NotFound(_("Unable to find root VBD/VDI for VM")) + + vbd_ref = vm_utils.create_vbd(self._session, vm_ref, root_vdi_ref, + DEVICE_RESCUE, bootable=False) + vbd_refs = [vbd_ref] + + # Attach original swap disk + swap_vdi_ref = orig_vdi_refs.get(DEVICE_SWAP) + if swap_vdi_ref: + vbd_ref = vm_utils.create_vbd(self._session, vm_ref, swap_vdi_ref, + DEVICE_SWAP, bootable=False) + vbd_refs.append(vbd_ref) + + # Attach original ephemeral disks + for userdevice, vdi_ref in orig_vdi_refs.iteritems(): + if userdevice >= DEVICE_EPHEMERAL: + vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref, + userdevice, bootable=False) + vbd_refs.append(vbd_ref) + + return vbd_refs def _file_inject_vm_settings(self, instance, vm_ref, vdis, network_info): if CONF.flat_injected: @@ -565,7 +590,7 @@ class VMOps(object): return vm_ref def _attach_disks(self, instance, vm_ref, name_label, vdis, - disk_image_type, network_info, + disk_image_type, network_info, rescue=False, admin_password=None, files=None): flavor = flavors.extract_flavor(instance) @@ -607,14 +632,17 @@ class VMOps(object): userdevice, bootable=False, osvol=vdi_info.get('osvol')) + # For rescue, swap and ephemeral disks get attached in + # _attach_orig_disks + # Attach (optional) swap disk swap_mb = flavor['swap'] - if swap_mb: + if not rescue and swap_mb: vm_utils.generate_swap(self._session, instance, vm_ref, DEVICE_SWAP, name_label, swap_mb) ephemeral_gb = flavor['ephemeral_gb'] - if ephemeral_gb: + if not rescue and ephemeral_gb: ephemeral_vdis = vdis.get('ephemerals') if ephemeral_vdis: # attach existing (migrated) ephemeral disks @@ -1242,19 +1270,18 @@ class VMOps(object): process_change(location, change) update_meta() - def _find_root_vdi_ref(self, vm_ref): - """Find and return the root vdi ref for a VM.""" + def _find_vdi_refs(self, vm_ref, exclude_volumes=False): + """Find and return the root and ephemeral vdi refs for a VM.""" if not vm_ref: - return None + return {} - vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref) + vdi_refs = {} + for vbd_ref in self._session.call_xenapi("VM.get_VBDs", vm_ref): + vbd = self._session.call_xenapi("VBD.get_record", vbd_ref) + if not exclude_volumes or 'osvol' not in vbd['other_config']: + vdi_refs[vbd['userdevice']] = vbd['VDI'] - for vbd_uuid in vbd_refs: - vbd = self._session.call_xenapi("VBD.get_record", vbd_uuid) - if vbd["userdevice"] == DEVICE_ROOT: - return vbd["VDI"] - - raise exception.NotFound(_("Unable to find root VBD/VDI for VM")) + return vdi_refs def _destroy_vdis(self, instance, vm_ref): """Destroys all VDIs associated with a VM.""" @@ -1311,8 +1338,11 @@ class VMOps(object): # Destroy Rescue VDIs vdi_refs = vm_utils.lookup_vm_vdis(self._session, rescue_vm_ref) - root_vdi_ref = self._find_root_vdi_ref(original_vm_ref) - vdi_refs = [vdi_ref for vdi_ref in vdi_refs if vdi_ref != root_vdi_ref] + + # Don't destroy any VDIs belonging to the original VM + orig_vdi_refs = self._find_vdi_refs(original_vm_ref) + vdi_refs = set(vdi_refs) - set(orig_vdi_refs.values()) + vm_utils.safe_destroy_vdis(self._session, vdi_refs) # Destroy Rescue VM