XenAPI should only snapshot root disk
Change ID I3e2973 broke XenAPI migrations (resizes) as it caused snapshots
of all disks (including swap and ephemeral disks, if enabled) to be sent
over to the destination host. The destination only expects vhds for the
root disk, named by sequence numbers. swap and ephemeral disks ended up
with sequence numbers, also, messing up the re-building of the chain on
the destination.
Fixes bug 1064083
Change ID I3e2973 also didn't change the stub of VM.snapshot to
VDI.snapshot, but tests still passed due to the new code ignoring all
exceptions while snapshotting. Potential issues in
snapshot_attached_here() would also be ignored due to excessive stubbing
in the xenapi migration testing. This is also addressed here.
Change-Id: I145030f92a75615d056a30cabcb6ca97e34a8b6c
(cherry picked from commit fb101685cc
)
This commit is contained in:
parent
803a05b7b3
commit
6a17579214
|
@ -15,7 +15,6 @@
|
|||
|
||||
"""Stubouts, mocks and fixtures for the test suite"""
|
||||
|
||||
import contextlib
|
||||
import pickle
|
||||
import random
|
||||
import sys
|
||||
|
@ -34,9 +33,9 @@ def stubout_firewall_driver(stubs, conn):
|
|||
def fake_none(self, *args):
|
||||
return
|
||||
|
||||
vmops = conn._vmops
|
||||
stubs.Set(vmops.firewall_driver, 'prepare_instance_filter', fake_none)
|
||||
stubs.Set(vmops.firewall_driver, 'instance_filter_exists', fake_none)
|
||||
_vmops = conn._vmops
|
||||
stubs.Set(_vmops.firewall_driver, 'prepare_instance_filter', fake_none)
|
||||
stubs.Set(_vmops.firewall_driver, 'instance_filter_exists', fake_none)
|
||||
|
||||
|
||||
def stubout_instance_snapshot(stubs):
|
||||
|
@ -203,16 +202,9 @@ class FakeSessionForVMTests(fake.SessionBase):
|
|||
vm_rec = self.VM_start(_1, vm_ref, _2, _3)
|
||||
vm_rec['resident_on'] = host_ref
|
||||
|
||||
def VM_snapshot(self, session_ref, vm_ref, label):
|
||||
status = "Running"
|
||||
template_vm_ref = fake.create_vm(label, status, is_a_template=True,
|
||||
is_control_domain=False)
|
||||
|
||||
def VDI_snapshot(self, session_ref, vm_ref, _1):
|
||||
sr_ref = "fakesr"
|
||||
template_vdi_ref = fake.create_vdi(label, sr_ref, read_only=True)
|
||||
|
||||
template_vbd_ref = fake.create_vbd(template_vm_ref, template_vdi_ref)
|
||||
return template_vm_ref
|
||||
return fake.create_vdi('fakelabel', sr_ref, read_only=True)
|
||||
|
||||
def SR_scan(self, session_ref, sr_ref):
|
||||
pass
|
||||
|
@ -320,23 +312,24 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
|
|||
|
||||
|
||||
def stub_out_migration_methods(stubs):
|
||||
@contextlib.contextmanager
|
||||
def fake_snapshot_attached_here(session, instance, vm_ref, label):
|
||||
yield ['bar', 'foo']
|
||||
fakesr = fake.create_sr()
|
||||
|
||||
def fake_move_disks(self, instance, disk_info):
|
||||
vdi_ref = fake.create_vdi(instance['name'], 'fake')
|
||||
vdi_ref = fake.create_vdi(instance['name'], fakesr)
|
||||
vdi_rec = fake.get_record('VDI', vdi_ref)
|
||||
vdi_rec['other_config']['nova_disk_type'] = 'root'
|
||||
return {'uuid': vdi_rec['uuid'], 'ref': vdi_ref}
|
||||
|
||||
def fake_get_vdi(session, vm_ref):
|
||||
vdi_ref = fake.create_vdi('derp', 'herp')
|
||||
vdi_ref_parent = fake.create_vdi('derp-parent', fakesr)
|
||||
vdi_rec_parent = fake.get_record('VDI', vdi_ref_parent)
|
||||
vdi_ref = fake.create_vdi('derp', fakesr,
|
||||
sm_config={'vhd-parent': vdi_rec_parent['uuid']})
|
||||
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
|
||||
return vdi_ref, {'uuid': vdi_rec['uuid'], }
|
||||
return vdi_ref, vdi_rec
|
||||
|
||||
def fake_sr(session, *args):
|
||||
pass
|
||||
return fakesr
|
||||
|
||||
def fake_get_sr_path(*args):
|
||||
return "fake"
|
||||
|
@ -350,8 +343,6 @@ def stub_out_migration_methods(stubs):
|
|||
stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
|
||||
stubs.Set(vm_utils, 'move_disks', fake_move_disks)
|
||||
stubs.Set(vm_utils, 'scan_default_sr', fake_sr)
|
||||
stubs.Set(vm_utils, '_scan_sr', fake_sr)
|
||||
stubs.Set(vm_utils, 'snapshot_attached_here', fake_snapshot_attached_here)
|
||||
stubs.Set(vm_utils, 'get_vdi_for_vm_safely', fake_get_vdi)
|
||||
stubs.Set(vm_utils, 'get_sr_path', fake_get_sr_path)
|
||||
stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
|
||||
|
|
|
@ -172,7 +172,7 @@ def create_vdi(name_label, sr_ref, **kwargs):
|
|||
'other_config': {},
|
||||
'location': '',
|
||||
'xenstore_data': {},
|
||||
'sm_config': {},
|
||||
'sm_config': {'vhd-parent': None},
|
||||
'physical_utilisation': '123',
|
||||
'managed': True,
|
||||
}
|
||||
|
@ -564,12 +564,18 @@ class SessionBase(object):
|
|||
def _plugin_pickle_noop(self, method, args):
|
||||
return pickle.dumps(None)
|
||||
|
||||
def _plugin_migration_transfer_vhd(self, method, args):
|
||||
kwargs = pickle.loads(args['params'])['kwargs']
|
||||
vdi_ref = self.xenapi_request('VDI.get_by_uuid',
|
||||
(kwargs['vdi_uuid'], ))
|
||||
assert vdi_ref
|
||||
return pickle.dumps(None)
|
||||
|
||||
_plugin_glance_upload_vhd = _plugin_pickle_noop
|
||||
_plugin_kernel_copy_vdi = _plugin_noop
|
||||
_plugin_kernel_create_kernel_ramdisk = _plugin_noop
|
||||
_plugin_kernel_remove_kernel_ramdisk = _plugin_noop
|
||||
_plugin_migration_move_vhds_into_sr = _plugin_noop
|
||||
_plugin_migration_transfer_vhd = _plugin_pickle_noop
|
||||
|
||||
def _plugin_xenhost_host_data(self, method, args):
|
||||
return jsonutils.dumps({'host_memory': {'total': 10,
|
||||
|
|
|
@ -518,56 +518,26 @@ def get_vdi_for_vm_safely(session, vm_ref):
|
|||
|
||||
@contextlib.contextmanager
|
||||
def snapshot_attached_here(session, instance, vm_ref, label):
|
||||
"""Snapshot the root disk only. Return a list of uuids for the vhds
|
||||
in the chain.
|
||||
"""
|
||||
LOG.debug(_("Starting snapshot for VM"), instance=instance)
|
||||
|
||||
# Memorize the original_parent_uuid so we can poll for coalesce
|
||||
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
|
||||
original_parent_uuid = _get_vhd_parent_uuid(session, vm_vdi_ref)
|
||||
sr_ref = vm_vdi_rec["SR"]
|
||||
|
||||
snapshot_ref = session.call_xenapi("VDI.snapshot", vm_vdi_ref, {})
|
||||
try:
|
||||
vdi_snapshot_recs = _vdi_snapshot_vm_base(session, instance, vm_ref)
|
||||
sr_ref = vm_vdi_rec["SR"]
|
||||
parent_uuid, base_uuid = _wait_for_vhd_coalesce(
|
||||
session, instance, sr_ref, vm_vdi_ref, original_parent_uuid)
|
||||
|
||||
vdi_uuids = []
|
||||
for snapshot in vdi_snapshot_recs:
|
||||
vdi_uuids += [vdi_rec['uuid'] for vdi_rec in
|
||||
_walk_vdi_chain(session, snapshot['uuid'])]
|
||||
|
||||
snapshot_rec = session.call_xenapi("VDI.get_record", snapshot_ref)
|
||||
_wait_for_vhd_coalesce(session, instance, sr_ref, vm_vdi_ref,
|
||||
original_parent_uuid)
|
||||
vdi_uuids = [vdi_rec['uuid'] for vdi_rec in
|
||||
_walk_vdi_chain(session, snapshot_rec['uuid'])]
|
||||
yield vdi_uuids
|
||||
finally:
|
||||
_destroy_snapshots(session, instance, vdi_snapshot_recs)
|
||||
|
||||
|
||||
def _vdi_snapshot_vm_base(session, instance, vm_ref):
|
||||
"""Make a snapshot of every non-cinder VDI and return a list
|
||||
of the new vdi records.
|
||||
"""
|
||||
new_vdis = []
|
||||
try:
|
||||
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
|
||||
for vbd_ref in vbd_refs:
|
||||
oc = session.call_xenapi("VBD.get_other_config", vbd_ref)
|
||||
if 'osvol' not in oc:
|
||||
# This volume is not a nova/cinder volume
|
||||
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
|
||||
snapshot_ref = session.call_xenapi("VDI.snapshot", vdi_ref,
|
||||
{})
|
||||
new_vdis.append(session.call_xenapi("VDI.get_record",
|
||||
snapshot_ref))
|
||||
|
||||
except session.XenAPI.Failure:
|
||||
LOG.exception(_("Failed to snapshot VDI"), instance=instance)
|
||||
raise
|
||||
finally:
|
||||
return new_vdis
|
||||
|
||||
|
||||
def _destroy_snapshots(session, instance, vdi_snapshot_recs):
|
||||
vdi_refs = [session.call_xenapi("VDI.get_by_uuid", vdi_rec['uuid'])
|
||||
for vdi_rec in vdi_snapshot_recs]
|
||||
safe_destroy_vdis(session, vdi_refs)
|
||||
safe_destroy_vdis(session, [snapshot_ref])
|
||||
|
||||
|
||||
def get_sr_path(session):
|
||||
|
|
Loading…
Reference in New Issue