add to driver option to keep disks when instance destroyed

Adding delete_disks flag to destroy instance method.
In case it's false the driver keeps disks.
*required for blueprint rebuild-for-ha

DocImpact

Change-Id: I38c7833114d8414dff831c9cc0c04c4610dbd33a
Co-authored-by: Oshrit Feder <oshritf@il.ibm.com>
This commit is contained in:
Kravchenko Pavel 2013-01-08 21:22:35 +02:00
parent 8a812a75ce
commit 2d45a177b0
12 changed files with 161 additions and 75 deletions

View File

@ -2760,6 +2760,74 @@ class LibvirtConnTestCase(test.TestCase):
instance = db.instance_create(self.context, self.test_instance)
conn.destroy(instance, {})
def test_destroy_removes_disk(self):
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_undefine_domain')
libvirt_driver.LibvirtDriver._undefine_domain(instance)
self.mox.StubOutWithMock(shutil, "rmtree")
shutil.rmtree(os.path.join(CONF.instances_path, instance['name']))
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_cleanup_lvm')
libvirt_driver.LibvirtDriver._cleanup_lvm(instance)
# Start test
self.mox.ReplayAll()
def fake_destroy(instance):
pass
def fake_os_path_exists(path):
return True
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_destroy', fake_destroy)
self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
self.stubs.Set(conn.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
conn.destroy(instance, [])
def test_destroy_not_removes_disk(self):
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_undefine_domain')
libvirt_driver.LibvirtDriver._undefine_domain(instance)
# Start test
self.mox.ReplayAll()
def fake_destroy(instance):
pass
def fake_os_path_exists(path):
return True
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_destroy', fake_destroy)
self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
self.stubs.Set(conn.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
conn.destroy(instance, [], None, False)
def test_destroy_undefines(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.destroy()

View File

@ -194,7 +194,8 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
def destroy(self, instance, network_info, block_device_info=None):
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
@ -206,6 +207,7 @@ class ComputeDriver(object):
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
"""
# TODO(Vek): Need to pass context in for access to auth_token

View File

@ -189,7 +189,8 @@ class FakeDriver(driver.ComputeDriver):
def resume(self, instance, network_info, block_device_info=None):
pass
def destroy(self, instance, network_info, block_device_info=None):
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
key = instance['name']
if key in self.instances:
del self.instances[key]

View File

@ -98,8 +98,9 @@ class HyperVDriver(driver.ComputeDriver):
block_device_info=None):
self._vmops.reboot(instance, network_info, reboot_type)
def destroy(self, instance, network_info=None, cleanup=True):
self._vmops.destroy(instance, network_info, cleanup)
def destroy(self, instance, network_info=None, cleanup=True,
destroy_disks=True):
self._vmops.destroy(instance, network_info, cleanup, destroy_disks)
def get_info(self, instance):
return self._vmops.get_info(instance)

View File

@ -448,7 +448,8 @@ class VMOps(baseops.BaseOps):
raise exception.InstanceNotFound(instance_id=instance["id"])
self._set_vm_state(instance['name'], 'Reboot')
def destroy(self, instance, network_info=None, cleanup=True):
def destroy(self, instance, network_info=None, cleanup=True,
destroy_disks=True):
"""Destroy the VM. Also destroy the associated VHD disk files"""
LOG.debug(_("Got request to destroy vm %s"), instance['name'])
vm = self._vmutils.lookup(self._conn, instance['name'])
@ -486,17 +487,18 @@ class VMOps(baseops.BaseOps):
if not success:
raise vmutils.HyperVException(_('Failed to destroy vm %s') %
instance['name'])
#Disconnect volumes
for volume_drive in volumes_drives_list:
self._volumeops.disconnect_volume(volume_drive)
#Delete associated vhd disk files.
for disk in disk_files:
vhdfile = self._conn_cimv2.query(
"Select * from CIM_DataFile where Name = '" +
disk.replace("'", "''") + "'")[0]
LOG.debug(_("Del: disk %(vhdfile)s vm %(name)s")
% {'vhdfile': vhdfile, 'name': instance['name']})
vhdfile.Delete()
if destroy_disks:
#Disconnect volumes
for volume_drive in volumes_drives_list:
self._volumeops.disconnect_volume(volume_drive)
#Delete associated vhd disk files.
for disk in disk_files:
vhdfile = self._conn_cimv2.query(
"Select * from CIM_DataFile where Name = '" +
disk.replace("'", "''") + "'")[0]
LOG.debug(_("Del: disk %(vhdfile)s vm %(name)s")
% {'vhdfile': vhdfile, 'name': instance['name']})
vhdfile.Delete()
def pause(self, instance):
"""Pause VM instance."""

View File

@ -516,9 +516,10 @@ class LibvirtDriver(driver.ComputeDriver):
timer = utils.FixedIntervalLoopingCall(_wait_for_destroy)
timer.start(interval=0.5).wait()
def destroy(self, instance, network_info, block_device_info=None):
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
self._destroy(instance)
self._cleanup(instance, network_info, block_device_info)
self._cleanup(instance, network_info, block_device_info, destroy_disks)
def _undefine_domain(self, instance):
try:
@ -551,7 +552,8 @@ class LibvirtDriver(driver.ComputeDriver):
locals(), instance=instance)
raise
def _cleanup(self, instance, network_info, block_device_info):
def _cleanup(self, instance, network_info, block_device_info,
destroy_disks):
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
try:
@ -575,21 +577,22 @@ class LibvirtDriver(driver.ComputeDriver):
connection_info,
mount_device)
target = os.path.join(CONF.instances_path, instance['name'])
LOG.info(_('Deleting instance files %(target)s') % locals(),
instance=instance)
if os.path.exists(target):
# If we fail to get rid of the directory
# tree, this shouldn't block deletion of
# the instance as whole.
try:
shutil.rmtree(target)
except OSError, e:
LOG.error(_("Failed to cleanup directory %(target)s: %(e)s") %
locals())
if destroy_disks:
target = os.path.join(CONF.instances_path, instance['name'])
LOG.info(_('Deleting instance files %(target)s') % locals(),
instance=instance)
if os.path.exists(target):
# If we fail to get rid of the directory
# tree, this shouldn't block deletion of
# the instance as whole.
try:
shutil.rmtree(target)
except OSError, e:
LOG.error(_("Failed to cleanup directory %(target)s: %(e)s"
) % locals())
#NOTE(bfilippov): destroy all LVM disks for this instance
self._cleanup_lvm(instance)
#NOTE(bfilippov): destroy all LVM disks for this instance
self._cleanup_lvm(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object"""

View File

@ -100,9 +100,10 @@ class PowerVMDriver(driver.ComputeDriver):
"""Create a new instance/VM/domain on powerVM."""
self._powervm.spawn(context, instance, image_meta['id'])
def destroy(self, instance, network_info, block_device_info=None):
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance."""
self._powervm.destroy(instance['name'])
self._powervm.destroy(instance['name'], destroy_disks)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):

View File

@ -276,13 +276,13 @@ class PowerVMOperator(object):
LOG.info(_("Instance spawned in %s seconds") % spawn_time,
instance=instance)
def destroy(self, instance_name):
def destroy(self, instance_name, destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance.
:param instance_name: Instance name.
"""
try:
self._cleanup(instance_name)
self._cleanup(instance_name, destroy_disks)
except exception.PowerVMLPARInstanceNotFound:
LOG.warn(_("During destroy, LPAR instance '%s' was not found on "
"PowerVM system.") % instance_name)
@ -317,7 +317,7 @@ class PowerVMOperator(object):
if previous_state == 'Running':
self.power_on(instance['name'])
def _cleanup(self, instance_name):
def _cleanup(self, instance_name, destroy_disks=True):
lpar_id = self._get_instance(instance_name)['lpar_id']
try:
vhost = self._operator.get_vhost_by_instance_id(lpar_id)
@ -326,7 +326,7 @@ class PowerVMOperator(object):
LOG.debug(_("Shutting down the instance '%s'") % instance_name)
self._operator.stop_lpar(instance_name)
if disk_name:
if disk_name and destroy_disks:
# TODO(mrodden): we should also detach from the instance
# before we start deleting things...
self._disk_adapter.detach_volume_from_host(disk_name)

View File

@ -136,9 +136,10 @@ class VMWareESXDriver(driver.ComputeDriver):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
def destroy(self, instance, network_info, block_device_info=None):
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info)
self._vmops.destroy(instance, network_info, destroy_disks)
def pause(self, instance):
"""Pause VM instance."""

View File

@ -539,7 +539,7 @@ class VMWareVMOps(object):
self._session._wait_for_task(instance['uuid'], reset_task)
LOG.debug(_("Did hard reboot of VM"), instance=instance)
def destroy(self, instance, network_info):
def destroy(self, instance, network_info, destroy_disks=True):
"""
Destroy a VM instance. Steps followed are:
1. Power off the VM, if it is in poweredOn state.
@ -590,30 +590,32 @@ class VMWareVMOps(object):
# Delete the folder holding the VM related content on
# the datastore.
try:
dir_ds_compliant_path = vm_util.build_datastore_path(
datastore_name,
os.path.dirname(vmx_file_path))
LOG.debug(_("Deleting contents of the VM from "
"datastore %(datastore_name)s") %
{'datastore_name': datastore_name},
instance=instance)
delete_task = self._session._call_method(
self._session._get_vim(),
"DeleteDatastoreFile_Task",
self._session._get_vim().get_service_content().fileManager,
name=dir_ds_compliant_path)
self._session._wait_for_task(instance['uuid'], delete_task)
LOG.debug(_("Deleted contents of the VM from "
"datastore %(datastore_name)s") %
{'datastore_name': datastore_name},
instance=instance)
except Exception, excep:
LOG.warn(_("In vmwareapi:vmops:destroy, "
"got this exception while deleting"
" the VM contents from the disk: %s")
% str(excep),
instance=instance)
if destroy_disks:
try:
dir_ds_compliant_path = vm_util.build_datastore_path(
datastore_name,
os.path.dirname(vmx_file_path))
LOG.debug(_("Deleting contents of the VM from "
"datastore %(datastore_name)s") %
{'datastore_name': datastore_name},
instance=instance)
vim = self._session._get_vim()
delete_task = self._session._call_method(
vim,
"DeleteDatastoreFile_Task",
vim.get_service_content().fileManager,
name=dir_ds_compliant_path)
self._session._wait_for_task(instance['uuid'], delete_task)
LOG.debug(_("Deleted contents of the VM from "
"datastore %(datastore_name)s") %
{'datastore_name': datastore_name},
instance=instance)
except Exception, excep:
LOG.warn(_("In vmwareapi:vmops:destroy, "
"got this exception while deleting"
" the VM contents from the disk: %s")
% str(excep),
instance=instance)
except Exception, exc:
LOG.exception(exc, instance=instance)

View File

@ -211,9 +211,11 @@ class XenAPIDriver(driver.ComputeDriver):
"""Apply a diff to the instance metadata."""
self._vmops.change_instance_metadata(instance, diff)
def destroy(self, instance, network_info, block_device_info=None):
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance"""
self._vmops.destroy(instance, network_info, block_device_info)
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def pause(self, instance):
"""Pause VM instance"""

View File

@ -1067,7 +1067,8 @@ class VMOps(object):
# Destroy Rescue VM
self._session.call_xenapi("VM.destroy", rescue_vm_ref)
def destroy(self, instance, network_info, block_device_info=None):
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
@ -1087,10 +1088,11 @@ class VMOps(object):
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
return self._destroy(instance, vm_ref, network_info,
block_device_info=block_device_info)
block_device_info=block_device_info,
destroy_disks=destroy_disks)
def _destroy(self, instance, vm_ref, network_info=None,
block_device_info=None):
block_device_info=None, destroy_disks=True):
"""Destroys VM instance by performing:
1. A shutdown
@ -1106,10 +1108,11 @@ class VMOps(object):
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
# Destroy VDIs
self._detach_vm_vols(instance, vm_ref, block_device_info)
self._destroy_vdis(instance, vm_ref, block_device_info)
self._destroy_kernel_ramdisk(instance, vm_ref)
# Destroy VDIs (if necessary)
if destroy_disks:
self._detach_vm_vols(instance, vm_ref, block_device_info)
self._destroy_vdis(instance, vm_ref, block_device_info)
self._destroy_kernel_ramdisk(instance, vm_ref)
vm_utils.destroy_vm(self._session, instance, vm_ref)