Stop, Rescue, and Delete should give guest a chance to shutdown

Currently in libvirt stop, shelve, rescue, and delete simply
destroy the underlying VM. Some GuestOS's do not react well to this
type of power failure, and so it would be better if these operations
followed the same approach as soft_reboot and give the guest as
chance to shutdown gracefully.  Even where VM is being deleted,
it may be booted from a volume which will be reused on another
server.

The change is implemented by adding a clean_shutdown parameter
to the relevant methods from the compute/api layer downwards
and into the virt drivers.  The implementation in the libvirt
driver is also provided.  Other drivers are modified just to
expect the additional parameter.

The timer configuration value previous used by soft_reboot in
libvirt is moved up to virt/driver so it can be used by other drivers.

The timer logic itself is changed from simple loop counting with one
second sleeps to a more precise approach, since testing showed that
other calls in the loop could introduce a difference of up to +50% on
the expected duration. This can extent the default two minute to
three minutes, which would not be acceptable in some environments
and breaks some of the tempest tests.

A separate config value defines what the default shutdown
behaviour for delete should be (default False to keep compatibility
with current behaviour).

This code is structured to enable a subsequent API change to add
clean/forced options to the stop and delete methods

Also as a minor tidy-up moved the repeated definition of
FakeLoopingCall in test_libvirt be common across tests

Partially-Implements: blueprint user-defined-shutdown
Closes-Bug: #1196924

DocImpact

Change-Id: Ie69aa2621cb52d6fefdc19f664247b069d6782ee
This commit is contained in:
Phil Day 2013-07-02 15:32:57 +01:00
parent caa2775793
commit b32d01d44c
26 changed files with 704 additions and 284 deletions

View File

@ -639,6 +639,11 @@
# number means unlimited. (integer value)
#max_local_block_devices=3
# Control what happens to the Guest OS when an instance is
# deleted. If set to true instances will be given a chance to
# shutdown before they are destroyed. (boolean value)
#default_shutdown_on_delete=false
#
# Options defined in nova.compute.flavors
@ -1932,6 +1937,13 @@
# Whether to use cow images (boolean value)
#use_cow_images=true
# Number of seconds to wait for instance to shut down after a
# request is made for clean shutdown. We continue with the
# stop if instance does not shutdown within this window. A
# value of 0 will still signal the instance but not wait for
# it to shutdown (integer value)
#wait_shutdown_seconds=120
#
# Options defined in nova.virt.firewall
@ -2527,12 +2539,6 @@
# are: sd, xvd, uvd, vd) (string value)
#disk_prefix=<None>
# Number of seconds to wait for instance to shut down after
# soft reboot request is made. We fall back to hard reboot if
# instance does not shutdown within this window. (integer
# value)
#wait_soft_reboot_seconds=120
# Set to "host-model" to clone the host CPU feature flags; to
# "host-passthrough" to use the host CPU model exactly; to
# "custom" to use a named CPU model; to "none" to not set any

View File

@ -65,7 +65,7 @@ class CellsManager(manager.Manager):
Scheduling requests get passed to the scheduler class.
"""
RPC_API_VERSION = '1.24'
RPC_API_VERSION = '1.25'
def __init__(self, *args, **kwargs):
# Mostly for tests.
@ -216,13 +216,16 @@ class CellsManager(manager.Manager):
"""Destroy an instance at the top level cell."""
self.msg_runner.instance_destroy_at_top(ctxt, instance)
def instance_delete_everywhere(self, ctxt, instance, delete_type):
def instance_delete_everywhere(self, ctxt, instance, delete_type,
clean_shutdown=False):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
self.msg_runner.instance_delete_everywhere(ctxt, instance,
delete_type)
self.msg_runner.instance_delete_everywhere(
ctxt, instance,
delete_type,
clean_shutdown=clean_shutdown)
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top level cell."""
@ -442,9 +445,10 @@ class CellsManager(manager.Manager):
"""Start an instance in its cell."""
self.msg_runner.start_instance(ctxt, instance)
def stop_instance(self, ctxt, instance, do_cast=True):
def stop_instance(self, ctxt, instance, clean_shutdown, do_cast=True):
"""Stop an instance in its cell."""
response = self.msg_runner.stop_instance(ctxt, instance,
clean_shutdown=clean_shutdown,
do_cast=do_cast)
if not do_cast:
return response.value_or_raise()
@ -481,13 +485,15 @@ class CellsManager(manager.Manager):
"""Resume an instance in its cell."""
self.msg_runner.resume_instance(ctxt, instance)
def terminate_instance(self, ctxt, instance):
def terminate_instance(self, ctxt, instance, clean_shutdown=False):
"""Delete an instance in its cell."""
self.msg_runner.terminate_instance(ctxt, instance)
self.msg_runner.terminate_instance(ctxt, instance,
clean_shutdown)
def soft_delete_instance(self, ctxt, instance):
def soft_delete_instance(self, ctxt, instance, clean_shutdown=False):
"""Soft-delete an instance in its cell."""
self.msg_runner.soft_delete_instance(ctxt, instance)
self.msg_runner.soft_delete_instance(ctxt, instance,
clean_shutdown)
def resize_instance(self, ctxt, instance, flavor,
extra_instance_updates):

View File

@ -836,11 +836,13 @@ class _TargetedMessageMethods(_BaseMessageMethods):
"""Start an instance via compute_api.start()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'start')
def stop_instance(self, message, instance):
def stop_instance(self, message, instance, clean_shutdown):
"""Stop an instance via compute_api.stop()."""
do_cast = not message.need_response
return self._call_compute_api_with_obj(message.ctxt, instance,
'stop', do_cast=do_cast)
'stop',
clean_shutdown=clean_shutdown,
do_cast=do_cast)
def reboot_instance(self, message, instance, reboot_type):
"""Reboot an instance via compute_api.reboot()."""
@ -858,11 +860,13 @@ class _TargetedMessageMethods(_BaseMessageMethods):
def get_host_uptime(self, message, host_name):
return self.host_api.get_host_uptime(message.ctxt, host_name)
def terminate_instance(self, message, instance):
self._call_compute_api_with_obj(message.ctxt, instance, 'delete')
def terminate_instance(self, message, instance, clean_shutdown):
self._call_compute_api_with_obj(message.ctxt, instance, 'delete',
clean_shutdown=clean_shutdown)
def soft_delete_instance(self, message, instance):
self._call_compute_api_with_obj(message.ctxt, instance, 'soft_delete')
def soft_delete_instance(self, message, instance, clean_shutdown):
self._call_compute_api_with_obj(message.ctxt, instance, 'soft_delete',
clean_shutdown=clean_shutdown)
def pause_instance(self, message, instance):
"""Pause an instance via compute_api.pause()."""
@ -1059,7 +1063,7 @@ class _BroadcastMessageMethods(_BaseMessageMethods):
pass
def instance_delete_everywhere(self, message, instance, delete_type,
**kwargs):
clean_shutdown, **kwargs):
"""Call compute API delete() or soft_delete() in every cell.
This is used when the API cell doesn't know what cell an instance
belongs to but the instance was requested to be deleted or
@ -1068,9 +1072,11 @@ class _BroadcastMessageMethods(_BaseMessageMethods):
LOG.debug(_("Got broadcast to %(delete_type)s delete instance"),
{'delete_type': delete_type}, instance=instance)
if delete_type == 'soft':
self.compute_api.soft_delete(message.ctxt, instance)
self.compute_api.soft_delete(message.ctxt, instance,
clean_shutdown=clean_shutdown)
else:
self.compute_api.delete(message.ctxt, instance)
self.compute_api.delete(message.ctxt, instance,
clean_shutdown=clean_shutdown)
def instance_fault_create_at_top(self, message, instance_fault, **kwargs):
"""Destroy an instance from the DB if we're a top level cell."""
@ -1431,12 +1437,14 @@ class MessageRunner(object):
run_locally=False)
message.process()
def instance_delete_everywhere(self, ctxt, instance, delete_type):
def instance_delete_everywhere(self, ctxt, instance, delete_type,
clean_shutdown):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
method_kwargs = dict(instance=instance, delete_type=delete_type)
method_kwargs = dict(instance=instance, delete_type=delete_type,
clean_shutdown=clean_shutdown)
message = _BroadcastMessage(self, ctxt,
'instance_delete_everywhere',
method_kwargs, 'down',
@ -1688,17 +1696,20 @@ class MessageRunner(object):
"""Start an instance in its cell."""
self._instance_action(ctxt, instance, 'start_instance')
def stop_instance(self, ctxt, instance, do_cast=True):
def stop_instance(self, ctxt, instance, clean_shutdown, do_cast=True):
"""Stop an instance in its cell."""
extra_kwargs = {'clean_shutdown': clean_shutdown}
if do_cast:
self._instance_action(ctxt, instance, 'stop_instance')
self._instance_action(ctxt, instance, 'stop_instance',
extra_kwargs=extra_kwargs)
else:
return self._instance_action(ctxt, instance, 'stop_instance',
extra_kwargs=extra_kwargs,
need_response=True)
def reboot_instance(self, ctxt, instance, reboot_type):
"""Reboot an instance in its cell."""
extra_kwargs = dict(reboot_type=reboot_type)
extra_kwargs = {'reboot_type': reboot_type}
self._instance_action(ctxt, instance, 'reboot_instance',
extra_kwargs=extra_kwargs)
@ -1710,11 +1721,15 @@ class MessageRunner(object):
"""Resume an instance in its cell."""
self._instance_action(ctxt, instance, 'resume_instance')
def terminate_instance(self, ctxt, instance):
self._instance_action(ctxt, instance, 'terminate_instance')
def terminate_instance(self, ctxt, instance, clean_shutdown=False):
extra_kwargs = {'clean_shutdown': clean_shutdown}
self._instance_action(ctxt, instance, 'terminate_instance',
extra_kwargs=extra_kwargs)
def soft_delete_instance(self, ctxt, instance):
self._instance_action(ctxt, instance, 'soft_delete_instance')
def soft_delete_instance(self, ctxt, instance, clean_shutdown=False):
extra_kwargs = {'clean_shutdown': clean_shutdown}
self._instance_action(ctxt, instance, 'soft_delete_instance',
extra_kwargs=extra_kwargs)
def pause_instance(self, ctxt, instance):
"""Pause an instance in its cell."""

View File

@ -85,6 +85,8 @@ class CellsAPI(rpcclient.RpcProxy):
... Havana supports message version 1.24. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.24.
1.25 - Adds clean_shutdown option to stop_and delete methods
'''
BASE_RPC_API_VERSION = '1.0'
@ -157,7 +159,8 @@ class CellsAPI(rpcclient.RpcProxy):
instance_p = jsonutils.to_primitive(instance)
self.client.cast(ctxt, 'instance_destroy_at_top', instance=instance_p)
def instance_delete_everywhere(self, ctxt, instance, delete_type):
def instance_delete_everywhere(self, ctxt, instance, delete_type,
clean_shutdown=False):
"""Delete instance everywhere. delete_type may be 'soft'
or 'hard'. This is generally only used to resolve races
when API cell doesn't know to what cell an instance belongs.
@ -165,8 +168,10 @@ class CellsAPI(rpcclient.RpcProxy):
if not CONF.cells.enable:
return
instance_p = jsonutils.to_primitive(instance)
self.client.cast(ctxt, 'instance_delete_everywhere',
instance=instance_p, delete_type=delete_type)
cctxt = self.client.prepare(version='1.25')
cctxt.cast(ctxt, 'instance_delete_everywhere',
instance=instance_p, delete_type=delete_type,
clean_shutdown=clean_shutdown)
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top."""
@ -400,17 +405,19 @@ class CellsAPI(rpcclient.RpcProxy):
cctxt = self.client.prepare(version='1.12')
cctxt.cast(ctxt, 'start_instance', instance=instance)
def stop_instance(self, ctxt, instance, do_cast=True):
def stop_instance(self, ctxt, instance, do_cast=True,
clean_shutdown=True):
"""Stop an instance in its cell.
This method takes a new-world instance object.
"""
if not CONF.cells.enable:
return
cctxt = self.client.prepare(version='1.12')
cctxt = self.client.prepare(version='1.25')
method = do_cast and cctxt.cast or cctxt.call
return method(ctxt, 'stop_instance',
instance=instance, do_cast=do_cast)
instance=instance, do_cast=do_cast,
clean_shutdown=clean_shutdown)
def cell_create(self, ctxt, values):
cctxt = self.client.prepare(version='1.13')
@ -481,25 +488,29 @@ class CellsAPI(rpcclient.RpcProxy):
cctxt = self.client.prepare(version='1.15')
cctxt.cast(ctxt, 'resume_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
def terminate_instance(self, ctxt, instance, bdms, reservations=None,
clean_shutdown=False):
"""Delete an instance in its cell.
This method takes a new-world instance object.
"""
if not CONF.cells.enable:
return
cctxt = self.client.prepare(version='1.18')
cctxt.cast(ctxt, 'terminate_instance', instance=instance)
cctxt = self.client.prepare(version='1.25')
cctxt.cast(ctxt, 'terminate_instance', instance=instance,
clean_shutdown=clean_shutdown)
def soft_delete_instance(self, ctxt, instance, reservations=None):
def soft_delete_instance(self, ctxt, instance, reservations=None,
clean_shutdown=False):
"""Soft-delete an instance in its cell.
This method takes a new-world instance object.
"""
if not CONF.cells.enable:
return
cctxt = self.client.prepare(version='1.18')
cctxt.cast(ctxt, 'soft_delete_instance', instance=instance)
cctxt = self.client.prepare(version='1.25')
cctxt.cast(ctxt, 'soft_delete_instance', instance=instance,
clean_shutdown=clean_shutdown)
def resize_instance(self, ctxt, instance, extra_instance_updates,
scheduler_hint, flavor, reservations):

View File

@ -109,12 +109,18 @@ compute_opts = [
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
cfg.IntOpt('max_local_block_devices',
cfg.IntOpt('max_local_block_devices',
default=3,
help='Maximum number of devices that will result '
'in a local image being created on the hypervisor node. '
'Setting this to 0 means nova will allow only '
'boot from volume. A negative number means unlimited.'),
cfg.BoolOpt('default_shutdown_on_delete',
default=False,
help='Control what happens to the Guest OS when an instance '
'is deleted. '
'If set to true instances will be given a chance to '
'shutdown before they are destroyed.'),
]
@ -1300,7 +1306,8 @@ class API(base.Base):
auto_disk_config,
image_ref)
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
def _delete(self, context, instance, delete_type,
cb, clean_shutdown, **instance_attrs):
if instance['disable_terminate']:
LOG.info(_('instance termination disabled'),
instance=instance)
@ -1342,7 +1349,9 @@ class API(base.Base):
# which will cause a cast to the child cell. Also,
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
cb(context, instance,
clean_shutdown=clean_shutdown,
bdms=bdms, reservations=None)
if reservations:
QUOTAS.commit(context,
reservations,
@ -1382,13 +1391,17 @@ class API(base.Base):
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms, reservations=reservations)
cb(context, instance,
clean_shutdown=clean_shutdown,
bdms=bdms, reservations=reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms, delete_type, cb)
self._local_delete(context, instance,
bdms=bdms, delete_type=delete_type,
cb=cb, clean_shutdown=clean_shutdown)
if reservations:
QUOTAS.commit(context,
reservations,
@ -1498,7 +1511,8 @@ class API(base.Base):
ram=-instance_memory_mb)
return reservations
def _local_delete(self, context, instance, bdms, delete_type, cb):
def _local_delete(self, context, instance, bdms, delete_type, cb,
clean_shutdown):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
instance_uuid = instance['uuid']
@ -1532,33 +1546,38 @@ class API(base.Base):
err_str = _("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
self.db.block_device_mapping_destroy(context, bdm['id'])
cb(context, instance, bdms, local=True)
cb(context, instance, bdms=bdms,
clean_shutdown=clean_shutdown, local=True)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=system_meta)
def _do_delete(self, context, instance, bdms, reservations=None,
local=False):
def _do_delete(self, context, instance, bdms, clean_shutdown,
reservations=None, local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
self.compute_rpcapi.terminate_instance(
context, instance, bdms,
clean_shutdown=clean_shutdown,
reservations=reservations)
def _do_soft_delete(self, context, instance, bdms, reservations=None,
local=False):
def _do_soft_delete(self, context, instance, bdms, clean_shutdown,
reservations=None, local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
self.compute_rpcapi.soft_delete_instance(
context, instance,
reservations=reservations,
clean_shutdown=clean_shutdown)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@ -1566,17 +1585,24 @@ class API(base.Base):
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
def soft_delete(self, context, instance,
clean_shutdown=None):
"""Terminate an instance."""
LOG.debug(_('Going to try to soft delete instance'),
instance=instance)
if clean_shutdown == None:
clean_shutdown = CONF.default_shutdown_on_delete
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
self._delete(context, instance, delete_type='soft_delete',
cb=self._do_soft_delete,
clean_shutdown=clean_shutdown,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
def _delete_instance(self, context, instance, clean_shutdown):
self._delete(context, instance, delete_type='delete',
cb=self._do_delete,
clean_shutdown=clean_shutdown,
task_state=task_states.DELETING)
@wrap_check_policy
@ -1584,10 +1610,13 @@ class API(base.Base):
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
def delete(self, context, instance, clean_shutdown=None):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate instance"), instance=instance)
self._delete_instance(context, instance)
if clean_shutdown == None:
clean_shutdown = CONF.default_shutdown_on_delete
self._delete_instance(context, instance, clean_shutdown)
@wrap_check_policy
@check_instance_lock
@ -1625,11 +1654,15 @@ class API(base.Base):
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED],
must_have_launched=False)
def force_delete(self, context, instance):
def force_delete(self, context, instance,
clean_shutdown=None):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete_instance(context, instance)
if clean_shutdown == None:
clean_shutdown = CONF.default_shutdown_on_delete
self._delete_instance(context, instance, clean_shutdown)
def force_stop(self, context, instance, do_cast=True):
def force_stop(self, context, instance, do_cast=True,
clean_shutdown=True):
LOG.debug(_("Going to try to stop instance"), instance=instance)
instance.task_state = task_states.POWERING_OFF
@ -1638,7 +1671,8 @@ class API(base.Base):
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast,
clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@ -1647,9 +1681,9 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR],
task_state=[None])
def stop(self, context, instance, do_cast=True):
def stop(self, context, instance, do_cast=True, clean_shutdown=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast)
self.force_stop(context, instance, do_cast, clean_shutdown)
@wrap_check_policy
@check_instance_lock
@ -2369,7 +2403,7 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED],
task_state=[None])
def shelve(self, context, instance):
def shelve(self, context, instance, clean_shutdown=True):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
@ -2387,10 +2421,10 @@ class API(base.Base):
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id)
image_id=image_id, clean_shutdown=clean_shutdown)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance)
instance=instance, clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@ -2482,7 +2516,8 @@ class API(base.Base):
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None):
def rescue(self, context, instance, rescue_password=None,
clean_shutdown=True):
"""Rescue the given instance."""
bdms = self.get_instance_bdms(context, instance, legacy=False)
@ -2506,7 +2541,8 @@ class API(base.Base):
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password)
rescue_password=rescue_password,
clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock

View File

@ -187,25 +187,31 @@ class ComputeCellsAPI(compute_api.API):
pass
return rv
def soft_delete(self, context, instance):
self._handle_cell_delete(context, instance, 'soft_delete')
def soft_delete(self, context, instance, clean_shutdown=False):
self._handle_cell_delete(context, instance,
method_name='soft_delete',
clean_shutdown=clean_shutdown)
def delete(self, context, instance):
self._handle_cell_delete(context, instance, 'delete')
def delete(self, context, instance, clean_shutdown=False):
self._handle_cell_delete(context, instance,
method_name='delete',
clean_shutdown=clean_shutdown)
def _handle_cell_delete(self, context, instance, method_name):
def _handle_cell_delete(self, context, instance, method_name,
clean_shutdown):
if not instance['cell_name']:
delete_type = method_name == 'soft_delete' and 'soft' or 'hard'
self.cells_rpcapi.instance_delete_everywhere(context,
instance, delete_type)
instance, delete_type, clean_shutdown)
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid']))
# NOTE(danms): If we try to delete an instance with no cell,
# there isn't anything to salvage, so we can hard-delete here.
super(ComputeCellsAPI, self)._local_delete(context, instance, bdms,
method_name,
self._do_delete)
super(ComputeCellsAPI, self)._local_delete(context, instance,
bdms=bdms, delete_type=method_name,
cb=self._do_delete,
clean_shutdown=clean_shutdown)
return
method = getattr(super(ComputeCellsAPI, self), method_name)
@ -218,9 +224,10 @@ class ComputeCellsAPI(compute_api.API):
self._cast_to_cells(context, instance, 'restore')
@check_instance_cell
def force_delete(self, context, instance):
def force_delete(self, context, instance, clean_shutdown=False):
"""Force delete a previously deleted (but not reclaimed) instance."""
super(ComputeCellsAPI, self).force_delete(context, instance)
super(ComputeCellsAPI, self).force_delete(context, instance,
clean_shutdown)
self._cast_to_cells(context, instance, 'force_delete')
@check_instance_cell
@ -261,12 +268,15 @@ class ComputeCellsAPI(compute_api.API):
return self._call_to_cells(context, instance, 'get_diagnostics')
@check_instance_cell
def rescue(self, context, instance, rescue_password=None):
def rescue(self, context, instance, rescue_password=None,
clean_shutdown=True):
"""Rescue the given instance."""
super(ComputeCellsAPI, self).rescue(context, instance,
rescue_password=rescue_password)
rescue_password=rescue_password,
clean_shutdown=clean_shutdown)
self._cast_to_cells(context, instance, 'rescue',
rescue_password=rescue_password)
rescue_password=rescue_password,
clean_shutdown=clean_shutdown)
@check_instance_cell
def unrescue(self, context, instance):
@ -276,9 +286,10 @@ class ComputeCellsAPI(compute_api.API):
@wrap_check_policy
@check_instance_cell
def shelve(self, context, instance):
def shelve(self, context, instance, clean_shutdown=True):
"""Shelve the given instance."""
self._cast_to_cells(context, instance, 'shelve')
self._cast_to_cells(context, instance, 'shelve',
clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_cell

View File

@ -420,7 +420,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
RPC_API_VERSION = '3.3'
RPC_API_VERSION = '3.4'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@ -1126,8 +1126,9 @@ class ComputeManager(manager.Manager):
capi = self.conductor_api
bdms = capi.block_device_mapping_get_all_by_instance(context,
instance)
self._shutdown_instance(context, instance,
bdms, requested_networks)
# Failed during spawn so little point in trying a clean shutdown
self._shutdown_instance(context, instance, bdms,
requested_networks, clean_shutdown=False)
self._cleanup_volumes(context, instance['uuid'], bdms)
except Exception:
# do not attempt retry if clean up failed:
@ -1839,7 +1840,8 @@ class ComputeManager(manager.Manager):
self._set_instance_error_state(context, instance['uuid'])
def _shutdown_instance(self, context, instance,
bdms, requested_networks=None, notify=True):
bdms, requested_networks=None, notify=True,
clean_shutdown=False):
"""Shutdown an instance on this host."""
context = context.elevated()
LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'},
@ -1864,7 +1866,7 @@ class ComputeManager(manager.Manager):
# want to keep ip allocated for certain failures
try:
self.driver.destroy(context, instance, network_info,
block_device_info)
block_device_info, clean_shutdown=clean_shutdown)
except exception.InstancePowerOffFailure:
# if the instance can't power off, don't release the ip
with excutils.save_and_reraise_exception():
@ -1907,7 +1909,7 @@ class ComputeManager(manager.Manager):
# NOTE(vish): bdms will be deleted on instance destroy
@hooks.add_hook("delete_instance")
def _delete_instance(self, context, instance, bdms,
def _delete_instance(self, context, instance, bdms, clean_shutdown,
reservations=None):
"""Delete an instance on this host. Commit or rollback quotas
as necessary.
@ -1941,7 +1943,8 @@ class ComputeManager(manager.Manager):
self.conductor_api.instance_info_cache_delete(context, db_inst)
self._notify_about_instance_usage(context, instance,
"delete.start")
self._shutdown_instance(context, db_inst, bdms)
self._shutdown_instance(context, db_inst, bdms,
clean_shutdown=clean_shutdown)
# NOTE(vish): We have already deleted the instance, so we have
# to ignore problems cleaning up the volumes. It
# would be nice to let the user know somehow that
@ -1986,13 +1989,16 @@ class ComputeManager(manager.Manager):
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def terminate_instance(self, context, instance, bdms, reservations):
def terminate_instance(self, context, instance, bdms, reservations,
clean_shutdown=False):
"""Terminate an instance on this host."""
@utils.synchronized(instance['uuid'])
def do_terminate_instance(instance, bdms):
def do_terminate_instance(instance, bdms, clean_shutdown):
try:
self._delete_instance(context, instance, bdms,
self._delete_instance(context, instance,
bdms=bdms,
clean_shutdown=clean_shutdown,
reservations=reservations)
except exception.InstanceTerminationFailure as error:
LOG.exception(_('Setting instance vm_state to ERROR'),
@ -2001,7 +2007,7 @@ class ComputeManager(manager.Manager):
except exception.InstanceNotFound as e:
LOG.warn(e, instance=instance)
do_terminate_instance(instance, bdms)
do_terminate_instance(instance, bdms, clean_shutdown)
# NOTE(johannes): This is probably better named power_off_instance
# so it matches the driver method, but because of other issues, we
@ -2010,10 +2016,10 @@ class ComputeManager(manager.Manager):
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def stop_instance(self, context, instance):
def stop_instance(self, context, instance, clean_shutdown=True):
"""Stopping an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_off.start")
self.driver.power_off(instance)
self.driver.power_off(instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.STOPPED
@ -2051,7 +2057,8 @@ class ComputeManager(manager.Manager):
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def soft_delete_instance(self, context, instance, reservations):
def soft_delete_instance(self, context, instance, reservations,
clean_shutdown=True):
"""Soft delete an instance on this host."""
if context.is_admin and context.project_id != instance['project_id']:
@ -2068,11 +2075,11 @@ class ComputeManager(manager.Manager):
"soft_delete.start")
db_inst = obj_base.obj_to_primitive(instance)
try:
self.driver.soft_delete(db_inst)
self.driver.soft_delete(db_inst, clean_shutdown)
except NotImplementedError:
# Fallback to just powering off the instance if the
# hypervisor doesn't implement the soft_delete method
self.driver.power_off(instance)
self.driver.power_off(instance, clean_shutdown)
current_power_state = self._get_power_state(context, db_inst)
instance.power_state = current_power_state
instance.vm_state = vm_states.SOFT_DELETED
@ -2634,7 +2641,8 @@ class ComputeManager(manager.Manager):
@wrap_exception()
@reverts_task_state
@wrap_instance_event
def rescue_instance(self, context, instance, rescue_password):
def rescue_instance(self, context, instance, rescue_password,
clean_shutdown=True):
"""
Rescue an instance on this host.
:param rescue_password: password to set on rescue instance
@ -2661,7 +2669,9 @@ class ComputeManager(manager.Manager):
try:
self.driver.rescue(context, instance,
network_info,
rescue_image_meta, admin_password)
rescue_image_meta,
rescue_password=admin_password,
clean_shutdown=clean_shutdown)
except Exception as e:
LOG.exception(_("Error trying to Rescue Instance"),
instance=instance)
@ -3421,7 +3431,8 @@ class ComputeManager(manager.Manager):
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def shelve_instance(self, context, instance, image_id):
def shelve_instance(self, context, instance, image_id,
clean_shutdown=True):
"""Shelve an instance.
This should be used when you want to take a snapshot of the instance.
@ -3431,6 +3442,7 @@ class ComputeManager(manager.Manager):
:param context: request context
:param instance: an Instance object
:param image_id: an image id to snapshot to.
:param clean_shutdown: give guest a chance to stop
"""
self.conductor_api.notify_usage_exists(
context, obj_base.obj_to_primitive(instance),
@ -3449,7 +3461,7 @@ class ComputeManager(manager.Manager):
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self.driver.power_off(instance)
self.driver.power_off(instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
self.driver.snapshot(context, instance, image_id, update_task_state)
@ -3468,12 +3480,15 @@ class ComputeManager(manager.Manager):
self._notify_about_instance_usage(context, instance, 'shelve.end')
if CONF.shelved_offload_time == 0:
self.shelve_offload_instance(context, instance)
# Note: Have already shutdown the instance
self.shelve_offload_instance(context, instance,
clean_shutdown=False)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def shelve_offload_instance(self, context, instance):
def shelve_offload_instance(self, context, instance,
clean_shutdown=True):
"""Remove a shelved instance from the hypervisor.
This frees up those resources for use by other instances, but may lead
@ -3487,7 +3502,7 @@ class ComputeManager(manager.Manager):
self._notify_about_instance_usage(context, instance,
'shelve_offload.start')
self.driver.power_off(instance)
self.driver.power_off(instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
network_info = self._get_instance_nw_info(context, instance)
@ -4954,7 +4969,11 @@ class ComputeManager(manager.Manager):
# the instance was soft deleted, so there's no need to
# pass reservations here.
try:
self._delete_instance(context, instance, bdms)
# Note(Phild): Would have performed a clean shudown (if
# requested) at the point of the original soft delete
# so no need to do it here
self._delete_instance(context, instance, bdms,
clean_shutdown=False)
except Exception as e:
LOG.warning(_("Periodic reclaim failed to delete "
"instance: %s"),
@ -5062,8 +5081,12 @@ class ComputeManager(manager.Manager):
"DELETED but still present on host."),
instance['name'], instance=instance)
try:
# Note(phild): Could be that we missed the original
# delete request, so give the instance a chance to
# stop cleanly
self._shutdown_instance(context, instance, bdms,
notify=False)
notify=False,
clean_shutdown=True)
self._cleanup_volumes(context, instance['uuid'], bdms)
except Exception as e:
LOG.warning(_("Periodic cleanup failed to delete "

View File

@ -215,6 +215,8 @@ class ComputeAPI(rpcclient.RpcProxy):
3.1 - Update get_spice_console() to take an instance object
3.2 - Update get_vnc_console() to take an instance object
3.3 - Update validate_console_port() to take an instance object
3.4 - Add clean shutdown option to stop, rescue, shelve, delete
and soft_delete
'''
#
@ -620,15 +622,23 @@ class ComputeAPI(rpcclient.RpcProxy):
return cctxt.call(ctxt, 'remove_volume_connection',
instance=instance_p, volume_id=volume_id)
def rescue_instance(self, ctxt, instance, rescue_password):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
def rescue_instance(self, ctxt, instance, rescue_password,
clean_shutdown=True):
instance_p = jsonutils.to_primitive(instance)
if self.client.can_send_version('3.4'):
version = '3.4'
kwargs = {'instance': instance_p,
'rescue_password': rescue_password,
'clean_shutdown': clean_shutdown}
else:
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
kwargs = {'instance': instance_p,
'rescue_password': rescue_password}
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'rescue_instance',
instance=instance_p,
rescue_password=rescue_password)
version=version)
cctxt.cast(ctxt, 'rescue_instance', **kwargs)
def reset_network(self, ctxt, instance):
# NOTE(russellb) Havana compat
@ -760,13 +770,21 @@ class ComputeAPI(rpcclient.RpcProxy):
version=version)
cctxt.cast(ctxt, 'start_instance', instance=instance)
def stop_instance(self, ctxt, instance, do_cast=True):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.29')
def stop_instance(self, ctxt, instance, do_cast=True,
clean_shutdown=True):
if self.client.can_send_version('3.4'):
version = '3.4'
kwargs = {'instance': instance,
'clean_shutdown': clean_shutdown}
else:
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.29')
kwargs = {'instance': instance}
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
rpc_method = cctxt.cast if do_cast else cctxt.call
return rpc_method(ctxt, 'stop_instance', instance=instance)
return rpc_method(ctxt, 'stop_instance', **kwargs)
def suspend_instance(self, ctxt, instance):
# NOTE(russellb) Havana compat
@ -775,15 +793,23 @@ class ComputeAPI(rpcclient.RpcProxy):
version=version)
cctxt.cast(ctxt, 'suspend_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.35')
def terminate_instance(self, ctxt, instance, bdms, reservations=None,
clean_shutdown=False):
bdms_p = jsonutils.to_primitive(bdms)
if self.client.can_send_version('3.4'):
version = '3.4'
kwargs = {'instance': instance, 'bdms': bdms_p,
'reservations': reservations,
'clean_shutdown': clean_shutdown}
else:
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.35')
kwargs = {'instance': instance, 'bdms': bdms_p,
'reservations': reservations}
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'terminate_instance',
instance=instance, bdms=bdms_p,
reservations=reservations)
version=version)
cctxt.cast(ctxt, 'terminate_instance', **kwargs)
def unpause_instance(self, ctxt, instance):
# NOTE(russellb) Havana compat
@ -800,13 +826,20 @@ class ComputeAPI(rpcclient.RpcProxy):
version=version)
cctxt.cast(ctxt, 'unrescue_instance', instance=instance_p)
def soft_delete_instance(self, ctxt, instance, reservations=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.35')
def soft_delete_instance(self, ctxt, instance, reservations=None,
clean_shutdown=True):
if self.client.can_send_version('3.4'):
version = '3.4'
kwargs = {'instance': instance, 'reservations': reservations,
'clean_shutdown': clean_shutdown}
else:
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.35')
kwargs = {'instance': instance, 'reservations': reservations}
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'soft_delete_instance',
instance=instance, reservations=reservations)
version=version)
cctxt.cast(ctxt, 'soft_delete_instance', **kwargs)
def restore_instance(self, ctxt, instance):
# NOTE(russellb) Havana compat
@ -816,20 +849,35 @@ class ComputeAPI(rpcclient.RpcProxy):
version=version)
cctxt.cast(ctxt, 'restore_instance', instance=instance_p)
def shelve_instance(self, ctxt, instance, image_id=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.31')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_instance',
instance=instance, image_id=image_id)
def shelve_instance(self, ctxt, instance, image_id=None,
clean_shutdown=True):
if self.client.can_send_version('3.4'):
version = '3.4'
kwargs = {'instance': instance, 'image_id': image_id,
'clean_shutdown': clean_shutdown}
else:
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.31')
kwargs = {'instance': instance, 'image_id': image_id}
def shelve_offload_instance(self, ctxt, instance):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.31')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_offload_instance', instance=instance)
cctxt.cast(ctxt, 'shelve_instance', **kwargs)
def shelve_offload_instance(self, ctxt, instance,
clean_shutdown=True):
if self.client.can_send_version('3.4'):
version = '3.4'
kwargs = {'instance': instance,
'clean_shutdown': clean_shutdown}
else:
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.31')
kwargs = {'instance': instance}
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_offload_instance', **kwargs)
def unshelve_instance(self, ctxt, instance, host, image=None):
# NOTE(russellb) Havana compat

View File

@ -2293,7 +2293,7 @@ class CloudTestCase(test.TestCase):
self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on)
def fake_power_off(self, instance):
def fake_power_off(self, instance, clean_shutdown=True):
virt_driver['powered_off'] = True
self.stubs.Set(fake_virt.FakeDriver, 'power_off', fake_power_off)

View File

@ -169,11 +169,13 @@ class CellsManagerClassTestCase(test.NoDBTestCase):
'instance_delete_everywhere')
self.msg_runner.instance_delete_everywhere(self.ctxt,
'fake-instance',
'fake-type')
'fake-type',
clean_shutdown=False)
self.mox.ReplayAll()
self.cells_manager.instance_delete_everywhere(
self.ctxt, instance='fake-instance',
delete_type='fake-type')
delete_type='fake-type',
clean_shutdown=False)
def test_instance_fault_create_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
@ -644,10 +646,12 @@ class CellsManagerClassTestCase(test.NoDBTestCase):
def test_stop_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'stop_instance')
self.msg_runner.stop_instance(self.ctxt, 'fake-instance',
clean_shutdown=False,
do_cast='meow')
self.mox.ReplayAll()
self.cells_manager.stop_instance(self.ctxt,
instance='fake-instance',
clean_shutdown=False,
do_cast='meow')
def test_cell_create(self):
@ -721,14 +725,16 @@ class CellsManagerClassTestCase(test.NoDBTestCase):
def test_terminate_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'terminate_instance')
self.msg_runner.terminate_instance(self.ctxt, 'fake-instance')
self.msg_runner.terminate_instance(self.ctxt, 'fake-instance',
False)
self.mox.ReplayAll()
self.cells_manager.terminate_instance(self.ctxt,
instance='fake-instance')
def test_soft_delete_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'soft_delete_instance')
self.msg_runner.soft_delete_instance(self.ctxt, 'fake-instance')
self.msg_runner.soft_delete_instance(self.ctxt, 'fake-instance',
False)
self.mox.ReplayAll()
self.cells_manager.soft_delete_instance(self.ctxt,
instance='fake-instance')

View File

@ -1201,12 +1201,16 @@ class CellsTargetedMethodsTestCase(test.TestCase):
self._test_instance_action_method('start', (), {}, (), {}, False)
def test_stop_instance_cast(self):
self._test_instance_action_method('stop', (), {}, (),
{'do_cast': True}, False)
kwargs = {'clean_shutdown': False}
ekwargs = {'clean_shutdown': False, 'do_cast': True}
self._test_instance_action_method('stop', (), kwargs, (),
ekwargs, False)
def test_stop_instance_call(self):
self._test_instance_action_method('stop', (), {}, (),
{'do_cast': False}, True)
kwargs = {'clean_shutdown': False}
ekwargs = {'clean_shutdown': False, 'do_cast': False}
self._test_instance_action_method('stop', (), kwargs, (),
ekwargs, True)
def test_reboot_instance(self):
kwargs = dict(reboot_type='HARD')
@ -1234,12 +1238,14 @@ class CellsTargetedMethodsTestCase(test.TestCase):
self.assertEqual(host_uptime, expected_host_uptime)
def test_terminate_instance(self):
kwargs = {'clean_shutdown': False}
self._test_instance_action_method('terminate',
(), {}, (), {}, False)
(), kwargs, (), kwargs, False)
def test_soft_delete_instance(self):
kwargs = {'clean_shutdown': False}
self._test_instance_action_method('soft_delete',
(), {}, (), {}, False)
(), kwargs, (), kwargs, False)
def test_pause_instance(self):
self._test_instance_action_method('pause', (), {}, (), {}, False)
@ -1563,7 +1569,7 @@ class CellsBroadcastMethodsTestCase(test.TestCase):
self.mox.ReplayAll()
self.src_msg_runner.instance_delete_everywhere(self.ctxt,
instance, 'hard')
instance, 'hard', clean_shutdown=False)
def test_instance_soft_delete_everywhere(self):
# Reset this, as this is a broadcast down.
@ -1582,7 +1588,7 @@ class CellsBroadcastMethodsTestCase(test.TestCase):
self.mox.ReplayAll()
self.src_msg_runner.instance_delete_everywhere(self.ctxt,
instance, 'soft')
instance, 'soft', clean_shutdown=False)
def test_instance_fault_create_at_top(self):
fake_instance_fault = {'id': 1,

View File

@ -196,9 +196,10 @@ class CellsAPITestCase(test.NoDBTestCase):
'fake-type')
expected_args = {'instance': fake_instance,
'delete_type': 'fake-type'}
'delete_type': 'fake-type',
'clean_shutdown': False}
self._check_result(call_info, 'instance_delete_everywhere',
expected_args)
expected_args, version='1.25')
def test_instance_fault_create_at_top(self):
fake_instance_fault = {'id': 2,
@ -504,23 +505,27 @@ class CellsAPITestCase(test.NoDBTestCase):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=True)
self.fake_context, 'fake-instance',
clean_shutdown=False, do_cast=True)
expected_args = {'instance': 'fake-instance',
'clean_shutdown': False,
'do_cast': True}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.12')
expected_args, version='1.25')
def test_stop_instance_call(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=False)
self.fake_context, 'fake-instance',
clean_shutdown=False, do_cast=False)
expected_args = {'instance': 'fake-instance',
'clean_shutdown': False,
'do_cast': False}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.12')
expected_args, version='1.25')
self.assertEqual(result, 'fake_response')
def test_cell_create(self):
@ -624,18 +629,20 @@ class CellsAPITestCase(test.NoDBTestCase):
self.cells_rpcapi.terminate_instance(self.fake_context,
'fake-instance', [])
expected_args = {'instance': 'fake-instance'}
expected_args = {'instance': 'fake-instance',
'clean_shutdown': False}
self._check_result(call_info, 'terminate_instance',
expected_args, version='1.18')
expected_args, version='1.25')
def test_soft_delete_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.soft_delete_instance(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
expected_args = {'instance': 'fake-instance',
'clean_shutdown': False}
self._check_result(call_info, 'soft_delete_instance',
expected_args, version='1.18')
expected_args, version='1.25')
def test_resize_instance(self):
call_info = self._stub_rpc_method('cast', None)

View File

@ -1736,7 +1736,7 @@ class ComputeTestCase(BaseTestCase):
'unrescued': False}
def fake_rescue(self, context, instance_ref, network_info, image_meta,
rescue_password):
rescue_password, clean_shutdown=True):
called['rescued'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
@ -1767,7 +1767,7 @@ class ComputeTestCase(BaseTestCase):
def test_rescue_notifications(self):
# Ensure notifications on instance rescue.
def fake_rescue(self, context, instance_ref, network_info, image_meta,
rescue_password):
rescue_password, clean_shutdown=True):
pass
self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
@ -1857,7 +1857,8 @@ class ComputeTestCase(BaseTestCase):
self.compute._get_rescue_image(
mox.IgnoreArg(), instance).AndReturn({})
nova.virt.fake.FakeDriver.rescue(
mox.IgnoreArg(), instance, [], mox.IgnoreArg(), 'password'
mox.IgnoreArg(), instance, [], mox.IgnoreArg(),
rescue_password='password', clean_shutdown=False
).AndRaise(RuntimeError("Try again later"))
self.mox.ReplayAll()
@ -1870,7 +1871,8 @@ class ComputeTestCase(BaseTestCase):
exception.InstanceNotRescuable, expected_message):
self.compute.rescue_instance(
self.context, instance=instance,
rescue_password='password')
rescue_password='password',
clean_shutdown=False)
self.assertEqual('some_random_state', instance['vm_state'])
@ -1905,7 +1907,7 @@ class ComputeTestCase(BaseTestCase):
called = {'power_off': False}
def fake_driver_power_off(self, instance):
def fake_driver_power_off(self, instance, clean_shutdown):
called['power_off'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off',
@ -3137,7 +3139,7 @@ class ComputeTestCase(BaseTestCase):
fake_cleanup_volumes)
self.compute._delete_instance(self.context, instance=instance,
bdms={})
bdms={}, clean_shutdown=False)
def test_delete_instance_keeps_net_on_power_off_fail(self):
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
@ -3146,7 +3148,8 @@ class ComputeTestCase(BaseTestCase):
self.compute.driver.destroy(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(exp)
mox.IgnoreArg(),
clean_shutdown=False).AndRaise(exp)
# mox will detect if _deallocate_network gets called unexpectedly
self.mox.ReplayAll()
instance = self._create_fake_instance()
@ -3154,7 +3157,7 @@ class ComputeTestCase(BaseTestCase):
self.compute._delete_instance,
self.context,
instance=jsonutils.to_primitive(instance),
bdms={})
bdms={}, clean_shutdown=False)
def test_delete_instance_loses_net_on_other_fail(self):
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
@ -3163,7 +3166,8 @@ class ComputeTestCase(BaseTestCase):
self.compute.driver.destroy(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(exp)
mox.IgnoreArg(),
clean_shutdown=False).AndRaise(exp)
self.compute._deallocate_network(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
@ -3173,7 +3177,7 @@ class ComputeTestCase(BaseTestCase):
self.compute._delete_instance,
self.context,
instance=jsonutils.to_primitive(instance),
bdms={})
bdms={}, clean_shutdown=False)
def test_delete_instance_deletes_console_auth_tokens(self):
instance = self._create_fake_instance_obj()
@ -3189,7 +3193,7 @@ class ComputeTestCase(BaseTestCase):
fake_delete_tokens)
self.compute._delete_instance(self.context, instance=instance,
bdms={})
bdms={}, clean_shutdown=False)
self.assertTrue(self.tokens_deleted)
@ -3208,7 +3212,7 @@ class ComputeTestCase(BaseTestCase):
fake_delete_tokens)
self.compute._delete_instance(self.context, instance=instance,
bdms={})
bdms={}, clean_shutdown=False)
self.assertTrue(self.tokens_deleted)
@ -3219,7 +3223,7 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance_obj()
def fake_delete_instance(context, instance, bdms,
reservations=None):
clean_shutdown, reservations=None):
raise exception.InstanceTerminationFailure(reason='')
self.stubs.Set(self.compute, '_delete_instance',
@ -5105,9 +5109,11 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute, "_shutdown_instance")
# Simulate an error and make sure cleanup proceeds with next instance.
self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False).\
self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False,
clean_shutdown=True).\
AndRaise(test.TestingException)
self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False).\
self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False,
clean_shutdown=True).\
AndReturn(None)
self.mox.StubOutWithMock(self.compute, "_cleanup_volumes")
@ -9129,8 +9135,9 @@ class ComputeRescheduleOrErrorTestCase(BaseTestCase):
self.compute.conductor_api,
self.instance, exc_info[0], exc_info=exc_info)
self.compute._shutdown_instance(self.context, self.instance,
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(InnerTestingException("Error"))
mox.IgnoreArg(), mox.IgnoreArg(),
clean_shutdown=False).AndRaise(
InnerTestingException("Error"))
self.compute._log_original_error(exc_info, instance_uuid)
self.mox.ReplayAll()
@ -9156,7 +9163,8 @@ class ComputeRescheduleOrErrorTestCase(BaseTestCase):
self.compute._shutdown_instance(self.context, self.instance,
mox.IgnoreArg(),
mox.IgnoreArg())
mox.IgnoreArg(),
clean_shutdown=False)
self.compute._cleanup_volumes(self.context, instance_uuid,
mox.IgnoreArg())
self.compute._reschedule(self.context, None, instance_uuid,
@ -9187,7 +9195,8 @@ class ComputeRescheduleOrErrorTestCase(BaseTestCase):
self.compute._shutdown_instance(self.context, self.instance,
mox.IgnoreArg(),
mox.IgnoreArg())
mox.IgnoreArg(),
clean_shutdown=False)
self.compute._cleanup_volumes(self.context, instance_uuid,
mox.IgnoreArg())
self.compute._reschedule(self.context, None, {}, instance_uuid,
@ -9219,7 +9228,8 @@ class ComputeRescheduleOrErrorTestCase(BaseTestCase):
self.instance, exc_info[0], exc_info=exc_info)
self.compute._shutdown_instance(self.context, self.instance,
mox.IgnoreArg(),
mox.IgnoreArg())
mox.IgnoreArg(),
clean_shutdown=False)
self.compute._cleanup_volumes(self.context, instance_uuid,
mox.IgnoreArg())
self.compute._reschedule(self.context, None, {}, instance_uuid,

View File

@ -281,7 +281,8 @@ class _ComputeAPIUnitTestMixIn(object):
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'stop_instance')
rpcapi.stop_instance(self.context, instance, do_cast=True)
rpcapi.stop_instance(self.context, instance, do_cast=True,
clean_shutdown=True)
self.mox.ReplayAll()
@ -541,10 +542,12 @@ class _ComputeAPIUnitTestMixIn(object):
cast_reservations = reservations
if delete_type == 'soft_delete':
rpcapi.soft_delete_instance(self.context, inst,
reservations=cast_reservations)
reservations=cast_reservations,
clean_shutdown=False)
elif delete_type in ['delete', 'force_delete']:
rpcapi.terminate_instance(self.context, inst, [],
reservations=cast_reservations)
reservations=cast_reservations,
clean_shutdown=False)
if commit_quotas:
# Local delete or when we're testing API cell.
@ -612,6 +615,7 @@ class _ComputeAPIUnitTestMixIn(object):
if self.cell_type == 'api':
rpcapi.terminate_instance(self.context, inst, [],
clean_shutdown=False,
reservations=None)
else:
compute_utils.notify_about_instance_usage(mox.IgnoreArg(),
@ -641,7 +645,8 @@ class _ComputeAPIUnitTestMixIn(object):
'delete_on_termiantion': False}]
def _fake_do_delete(context, instance, bdms,
rservations=None, local=False):
resrvations=None, local=False,
clean_shutdown=False):
pass
inst = self._create_instance_obj()
@ -685,7 +690,8 @@ class _ComputeAPIUnitTestMixIn(object):
self.mox.ReplayAll()
self.compute_api._local_delete(self.context, inst, bdms,
'delete',
_fake_do_delete)
_fake_do_delete,
clean_shutdown=False)
def test_delete_disabled(self):
inst = self._create_instance_obj()

View File

@ -153,7 +153,7 @@ class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
'instance_delete_everywhere')
inst = self._create_fake_instance_obj()
cells_rpcapi.instance_delete_everywhere(self.context,
inst, 'hard')
inst, 'hard', False)
self.mox.ReplayAll()
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
@ -165,7 +165,7 @@ class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
'instance_delete_everywhere')
inst = self._create_fake_instance_obj()
cells_rpcapi.instance_delete_everywhere(self.context,
inst, 'soft')
inst, 'soft', False)
self.mox.ReplayAll()
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)

View File

@ -400,7 +400,15 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_soft_delete_instance(self):
self._test_compute_api('soft_delete_instance', 'cast',
instance=self.fake_instance,
reservations=['uuid1', 'uuid2'])
reservations=['uuid1', 'uuid2'],
clean_shutdown=True,
version='3.4')
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('soft_delete_instance', 'cast',
instance=self.fake_instance,
reservations=['uuid1', 'uuid2'],
version='3.0')
# NOTE(russellb) Havana compat
self.flags(compute='havana', group='upgrade_levels')
@ -559,7 +567,13 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_rescue_instance(self):
self._test_compute_api('rescue_instance', 'cast',
instance=self.fake_instance, rescue_password='pw')
instance=self.fake_instance, rescue_password='pw',
clean_shutdown=True, version='3.4')
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('rescue_instance', 'cast',
instance=self.fake_instance, rescue_password='pw',
version='3.0')
# NOTE(russellb) Havana compat
self.flags(compute='havana', group='upgrade_levels')
@ -693,7 +707,13 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_stop_instance_cast(self):
self._test_compute_api('stop_instance', 'cast',
instance=self.fake_instance)
instance=self.fake_instance,
clean_shutdown=True, version='3.4')
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('stop_instance', 'cast',
instance=self.fake_instance,
version='3.0')
# NOTE(russellb) Havana compat
self.flags(compute='havana', group='upgrade_levels')
@ -702,7 +722,13 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_stop_instance_call(self):
self._test_compute_api('stop_instance', 'call',
instance=self.fake_instance)
instance=self.fake_instance,
clean_shutdown=True, version='3.4')
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('stop_instance', 'call',
instance=self.fake_instance,
version='3.0')
# NOTE(russellb) Havana compat
self.flags(compute='havana', group='upgrade_levels')
@ -721,7 +747,14 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_terminate_instance(self):
self._test_compute_api('terminate_instance', 'cast',
instance=self.fake_instance, bdms=[],
reservations=['uuid1', 'uuid2'])
reservations=['uuid1', 'uuid2'],
clean_shutdown=False, version='3.4')
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('terminate_instance', 'cast',
instance=self.fake_instance, bdms=[],
reservations=['uuid1', 'uuid2'],
version='3.0')
# NOTE(russellb) Havana compat
self.flags(compute='havana', group='upgrade_levels')
@ -749,7 +782,13 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_shelve_instance(self):
self._test_compute_api('shelve_instance', 'cast',
instance=self.fake_instance, image_id='image_id')
instance=self.fake_instance, image_id='image_id',
clean_shutdown=True, version='3.4')
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('shelve_instance', 'cast',
instance=self.fake_instance, image_id='image_id',
version='3.0')
# NOTE(russellb) Havana compat
self.flags(compute='havana', group='upgrade_levels')
@ -759,7 +798,12 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_shelve_offload_instance(self):
self._test_compute_api('shelve_offload_instance', 'cast',
instance=self.fake_instance)
instance=self.fake_instance,
clean_shutdown=True, version='3.4')
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('shelve_offload_instance', 'cast',
instance=self.fake_instance, version='3.0')
# NOTE(russellb) Havana compat
self.flags(compute='havana', group='upgrade_levels')

View File

@ -57,7 +57,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.compute._notify_about_instance_usage(self.context, instance,
'shelve.start')
self.compute.driver.power_off(instance)
self.compute.driver.power_off(instance, True)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
self.compute.driver.snapshot(self.context, instance, 'fake_image_id',
@ -81,7 +81,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
if CONF.shelved_offload_time == 0:
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.start')
self.compute.driver.power_off(instance)
self.compute.driver.power_off(instance, False)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
db.instance_update_and_get_original(self.context,
@ -132,7 +132,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.start')
self.compute.driver.power_off(instance)
self.compute.driver.power_off(instance, True)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
db.instance_update_and_get_original(self.context, instance['uuid'],

View File

@ -359,6 +359,14 @@ class FakeNodeDevice(object):
return self.xml
class FakeLoopingCall:
def start(self, *a, **k):
return self
def wait(self):
return None
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
@ -4308,14 +4316,7 @@ class LibvirtConnTestCase(test.TestCase):
self.assertTrue(conn.delete_instance_files(inst_obj))
def test_reboot_different_ids(self):
class FakeLoopingCall:
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
self.flags(wait_shutdown_seconds=1)
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_create_called = False
@ -4349,14 +4350,7 @@ class LibvirtConnTestCase(test.TestCase):
self.assertTrue(self.reboot_create_called)
def test_reboot_same_ids(self):
class FakeLoopingCall:
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
self.flags(wait_shutdown_seconds=1)
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_hard_reboot_called = False
@ -4643,6 +4637,84 @@ class LibvirtConnTestCase(test.TestCase):
_attach_pci_devices.assert_has_calls([mock.call('fake_dom',
'fake_pci_devs')])
def test_power_off(self):
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
# Mock domain
mock_domain = self.mox.CreateMock(libvirt.virDomain)
# Called via _shutdown()
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.shutdown()
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
mock_domain.ID().AndReturn('some_other_fake_id')
# Called via _destroy()
mock_domain.ID().AndReturn('some_other_fake_id')
mock_domain.destroy()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock_domain
def fake_create_domain(**kwargs):
self.reboot_create_called = True
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, '_create_domain', fake_create_domain)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
conn.power_off(instance, clean_shutdown=True)
def test_power_off_shutdown_fails(self):
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.flags(wait_shutdown_seconds=3)
# Mock domain
mock_domain = self.mox.CreateMock(libvirt.virDomain)
# Called via _shutdown()
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.shutdown()
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
# still running after 1 sec
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
# still running after 2 secs
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
# Called via _destroy()
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.destroy()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock_domain
def fake_create_domain(**kwargs):
self.reboot_create_called = True
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, '_create_domain', fake_create_domain)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
conn.power_off(instance, clean_shutdown=True)
def test_destroy_undefines(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
@ -4704,6 +4776,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.VerifyAll()
def test_destroy_undefines_no_undefine_flags(self):
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
@ -4808,6 +4881,45 @@ class LibvirtConnTestCase(test.TestCase):
self.assertRaises(exception.InstancePowerOffFailure,
conn.destroy, self.context, instance, [])
def test_destroy_with_shutdown(self):
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
# Mock domain
mock_domain = self.mox.CreateMock(libvirt.virDomain)
# Called via _shutdown()
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.shutdown()
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
mock_domain.ID().AndReturn('some_other_fake_id')
# Called via _destroy()
mock_domain.ID().AndReturn('some_other_fake_id')
mock_domain.destroy()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock_domain
def fake_create_domain(**kwargs):
self.reboot_create_called = True
def fake_cleanup(instance, network_info, block_device_info,
destroy_discs, context):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, '_create_domain', fake_create_domain)
self.stubs.Set(conn, '_cleanup', fake_cleanup)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
conn.destroy(self.context, instance, [], clean_shutdown=True)
def test_private_destroy_not_found(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
@ -7084,6 +7196,9 @@ class LibvirtDriverTestCase(test.TestCase):
block_device_info=None):
return disk_info_text
def fake_shutdown(instance):
pass
def fake_destroy(instance):
pass
@ -7095,6 +7210,8 @@ class LibvirtDriverTestCase(test.TestCase):
self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.libvirtconnection, '_shutdown',
fake_shutdown)
self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
fake_get_host_ip_addr)

View File

@ -723,7 +723,7 @@ class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
return self.ctxt
def test_force_hard_reboot(self):
self.flags(wait_soft_reboot_seconds=0, group='libvirt')
self.flags(wait_shutdown_seconds=0)
self.test_reboot()
def test_migrate_disk_and_power_off(self):

View File

@ -294,7 +294,9 @@ class BareMetalDriver(driver.ComputeDriver):
"for instance %r") % instance['uuid'])
_update_state(ctx, node, instance, state)
def destroy(self, context, instance, network_info, block_device_info=None):
def destroy(self, context, instance, network_info, block_device_info=None,
clean_shutdown=False):
# TODO(PhilD): Add support for clean_shutdown
context = nova_context.get_admin_context()
try:
@ -326,8 +328,9 @@ class BareMetalDriver(driver.ComputeDriver):
LOG.error(_("Error while recording destroy failure in "
"baremetal database: %s") % e)
def power_off(self, instance, node=None):
def power_off(self, instance, node=None, clean_shutdown=True):
"""Power off the specified instance."""
# TODO(PhilD): Add support for clean_shutdown
if not node:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance)

View File

@ -49,6 +49,14 @@ driver_opts = [
cfg.BoolOpt('use_cow_images',
default=True,
help='Whether to use cow images'),
cfg.IntOpt('wait_shutdown_seconds',
default=120,
deprecated_name='libvirt_wait_soft_reboot_seconds',
help='Number of seconds to wait for instance to shut down after'
' a request is made for clean shutdown. We continue with'
' the stop if instance does not shutdown within this'
' window. A value of 0 will still signal the instance but'
' not wait for it to shutdown'),
]
CONF = cfg.CONF
@ -242,7 +250,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
destroy_disks=True, clean_shutdown=False):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
@ -256,6 +264,9 @@ class ComputeDriver(object):
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
:param clean_shutdown: Indicates if the instance should be shutdown
first
"""
raise NotImplementedError()
@ -449,7 +460,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
rescue_password, clean_shutdown=True):
"""Rescue the specified instance."""
raise NotImplementedError()
@ -462,7 +473,7 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def power_off(self, instance):
def power_off(self, instance, clean_shutdown=True):
"""Power off the specified instance."""
raise NotImplementedError()
@ -471,7 +482,7 @@ class ComputeDriver(object):
"""Power on the specified instance."""
raise NotImplementedError()
def soft_delete(self, instance):
def soft_delete(self, instance, clean_shutdown=True):
"""Soft delete the specified instance."""
raise NotImplementedError()

View File

@ -154,7 +154,7 @@ class FakeDriver(driver.ComputeDriver):
pass
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
rescue_password, clean_shutdown=True):
pass
def unrescue(self, instance, network_info):
@ -178,13 +178,13 @@ class FakeDriver(driver.ComputeDriver):
block_device_info=None):
pass
def power_off(self, instance):
def power_off(self, instance, clean_shutdown=True):
pass
def power_on(self, context, instance, network_info, block_device_info):
pass
def soft_delete(self, instance):
def soft_delete(self, instance, clean_shutdown=False):
pass
def restore(self, instance):
@ -203,7 +203,7 @@ class FakeDriver(driver.ComputeDriver):
pass
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
destroy_disks=True, clean_shutdown=False):
key = instance['name']
if key in self.instances:
del self.instances[key]

View File

@ -59,7 +59,8 @@ class HyperVDriver(driver.ComputeDriver):
self._vmops.reboot(instance, network_info, reboot_type)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
destroy_disks=True, clean_shutdown=False):
# TODO(PhilD): Add support for clean_shutdown
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
@ -103,7 +104,8 @@ class HyperVDriver(driver.ComputeDriver):
def resume(self, context, instance, network_info, block_device_info=None):
self._vmops.resume(instance)
def power_off(self, instance):
def power_off(self, instance, clean_shutdown=True):
# TODO(PhilD): Add support for clean_shutdown
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,

View File

@ -82,6 +82,7 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import timeutils
from nova.openstack.common import xmlutils
from nova.pci import pci_manager
from nova.pci import pci_utils
@ -210,13 +211,6 @@ libvirt_opts = [
'(valid options are: sd, xvd, uvd, vd)',
deprecated_name='libvirt_disk_prefix',
deprecated_group='DEFAULT'),
cfg.IntOpt('wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.',
deprecated_name='libvirt_wait_soft_reboot_seconds',
deprecated_group='DEFAULT'),
cfg.StrOpt('cpu_mode',
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
@ -259,6 +253,7 @@ CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('wait_shutdown_seconds', 'nova.virt.driver')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
@ -890,7 +885,9 @@ class LibvirtDriver(driver.ComputeDriver):
self._destroy(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
destroy_disks=True, clean_shutdown=False):
if clean_shutdown:
self._shutdown(instance)
self._destroy(instance)
self._cleanup(context, instance, network_info, block_device_info,
destroy_disks)
@ -1909,31 +1906,45 @@ class LibvirtDriver(driver.ComputeDriver):
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
def _shutdown(self, instance, post_function=None):
"""Attempt to shutdown the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
:returns: True if the shutdown succeeded
"""
dom = self._lookup_by_name(instance["name"])
try:
dom = self._lookup_by_name(instance["name"])
except exception.InstanceNotFound:
# If the instance has gone then we don't need to
# wait for it to shutdown
return True
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
LOG.debug(_("Shuting down instance from state %s") % state,
instance=instance)
if (state == power_state.SHUTDOWN or
state == power_state.CRASHED):
LOG.info(_("Instance already shutdown."),
instance=instance)
if post_function:
post_function(dom)
return True
elif state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slightly longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
# NOTE(PhilD): Because the call to get_info can be slow we
# don't want to depend just on counting calls
# to sleep(1). Keep the loop count as a backstop
# to avoid an infinite while loop, but also break
# on the elapsed time.
start_time = timeutils.utcnow()
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance))
for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):
for x in xrange(CONF.wait_shutdown_seconds):
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
@ -1946,18 +1957,45 @@ class LibvirtDriver(driver.ComputeDriver):
power_state.CRASHED]:
LOG.info(_("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
if post_function:
post_function(dom)
return True
else:
LOG.info(_("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
if timeutils.is_older_than(start_time,
CONF.wait_shutdown_seconds):
LOG.info(_("Instance failed to shutdown in %d seconds.") %
CONF.wait_shutdown_seconds, instance=instance)
break
greenthread.sleep(1)
return False
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
def _reboot_after_shutdown(dom):
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
return self._shutdown(instance,
_reboot_after_shutdown)
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
@ -2038,8 +2076,10 @@ class LibvirtDriver(driver.ComputeDriver):
dom = self._lookup_by_name(instance['name'])
dom.resume()
def power_off(self, instance):
def power_off(self, instance, clean_shutdown=True):
"""Power off the specified instance."""
if clean_shutdown:
self._shutdown(instance)
self._destroy(instance)
def power_on(self, context, instance, network_info,
@ -2089,7 +2129,7 @@ class LibvirtDriver(driver.ComputeDriver):
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
rescue_password, clean_shutdown=True):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
@ -2123,6 +2163,8 @@ class LibvirtDriver(driver.ComputeDriver):
xml = self.to_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
if clean_shutdown:
self._shutdown(instance)
self._destroy(instance)
self._create_domain(xml)
@ -4662,7 +4704,7 @@ class LibvirtDriver(driver.ComputeDriver):
if not shared_storage:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
self.power_off(instance)
self.power_off(instance, clean_shutdown=True)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)

View File

@ -188,8 +188,9 @@ class VMwareESXDriver(driver.ComputeDriver):
self._vmops.reboot(instance, network_info)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
destroy_disks=True, clean_shutdown=False):
"""Destroy VM instance."""
# TODO(PhilD): Add support for clean_shutdown
self._vmops.destroy(instance, network_info, destroy_disks)
def pause(self, instance):
@ -209,16 +210,18 @@ class VMwareESXDriver(driver.ComputeDriver):
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
rescue_password, clean_shutdown=True):
"""Rescue the specified instance."""
# TODO(PhilD): Add support for clean_shutdown
self._vmops.rescue(context, instance, network_info, image_meta)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
def power_off(self, instance, clean_shutdown=True):
"""Power off the specified instance."""
# TODO(PhilD): Add support for clean_shutdown
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
@ -657,8 +660,9 @@ class VMwareVCDriver(VMwareESXDriver):
_vmops.reboot(instance, network_info)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
destroy_disks=True, clean_shutdown=False):
"""Destroy VM instance."""
# TODO(PhilD): Add support for clean_shutdown
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.destroy(instance, network_info, destroy_disks)
@ -683,8 +687,9 @@ class VMwareVCDriver(VMwareESXDriver):
_vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
rescue_password, clean_shutdown=True):
"""Rescue the specified instance."""
# TODO(PhilD): Add support for clean_shutdown
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.rescue(context, instance, network_info, image_meta)
@ -693,8 +698,9 @@ class VMwareVCDriver(VMwareESXDriver):
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.unrescue(instance)
def power_off(self, instance):
def power_off(self, instance, clean_shutdown=True):
"""Power off the specified instance."""
# TODO(PhilD): Add support for clean_shutdown
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.power_off(instance)

View File

@ -275,8 +275,9 @@ class XenAPIDriver(driver.ComputeDriver):
self._vmops.change_instance_metadata(instance, diff)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
destroy_disks=True, clean_shutdown=False):
"""Destroy VM instance."""
# TODO(PhilD): Add support for clean_shutdown
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
@ -307,8 +308,9 @@ class XenAPIDriver(driver.ComputeDriver):
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
rescue_password, clean_shutdown=True):
"""Rescue the specified instance."""
# TODO(PhilD): Add support for clean_shutdown
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
@ -320,8 +322,9 @@ class XenAPIDriver(driver.ComputeDriver):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
def power_off(self, instance, clean_shutdown=True):
"""Power off the specified instance."""
# TODO(PhilD): Add support for clean_shutdown
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
@ -329,8 +332,9 @@ class XenAPIDriver(driver.ComputeDriver):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
def soft_delete(self, instance, clean_shutdown=True):
"""Soft delete the specified instance."""
# TODO(PhilD): Add support for clean_shutdown
self._vmops.soft_delete(instance)
def restore(self, instance):