Do not use rich objects is servers update

- Use new ServerUpdateProgress tasks that support delayed
  API calls (not only checks).
  As this new task imposes restrictions on compute plugin API,
  move all Server*Progress classes to new, plugin-agnostic module.
  Simple unit tests for this class added.
- Changed logic in Nova client plugin to not use scheduler tasks.
- Moved server interface attach/detach to Nova client plugin.
- Split resize in two distinct methods 'resize' and 'verify_resize',
  with their own checkers. This allows better handling of such 'delayed'
  VM status transitions as
  ACTIVE->[resize]->ACTIVE->RESIZING->VERIFY_RESIZE->[confirm]->
  ->VERIFY_RESIZE->ACTIVE
  It might also make it easier later to plug a user notification in
  verify_resize instead of optimistically confirming it.
- rebuild/resize and interface attach/detach are also made more resilent,
  tolerating intermittent API failures, allowing update logic
  to retry API call later.
- Instance.handle_update was refactored similar to Server resource,
  with building tasks for update moved to separate private methods.

Change-Id: Ic78adf878aac09e56f5c50052ee78bdf5b11aa21
Partial-Bug: #1393268
This commit is contained in:
Pavlo Shchelokovskyy 2015-07-21 23:08:00 +03:00
parent 748e49a4f5
commit a1819ff069
7 changed files with 488 additions and 222 deletions

View File

@ -35,7 +35,6 @@ from heat.common.i18n import _LW
from heat.engine.clients import client_plugin
from heat.engine import constraints
from heat.engine import resource
from heat.engine import scheduler
LOG = logging.getLogger(__name__)
@ -421,55 +420,88 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
status_reason=errmsg)
return False
@scheduler.wrappertask
def resize(self, server, flavor, flavor_id):
"""Resize the server and then call check_resize task to verify."""
server.resize(flavor_id)
yield self.check_resize(server, flavor, flavor_id)
def rename(self, server, name):
"""Update the name for a server."""
server.update(name)
def check_resize(self, server, flavor, flavor_id):
def resize(self, server_id, flavor_id):
"""Resize the server."""
server = self.fetch_server(server_id)
if server:
server.resize(flavor_id)
return True
else:
return False
def check_resize(self, server_id, flavor_id, flavor):
"""
Verify that a resizing server is properly resized.
If that's the case, confirm the resize, if not raise an error.
"""
self.refresh_server(server)
server = self.fetch_server(server_id)
# resize operation is asynchronous so the server resize may not start
# when checking server status (the server may stay ACTIVE instead
# of RESIZE).
while server.status in ('RESIZE', 'ACTIVE'):
yield
self.refresh_server(server)
if not server or server.status in ('RESIZE', 'ACTIVE'):
return False
if server.status == 'VERIFY_RESIZE':
server.confirm_resize()
return True
else:
raise exception.Error(
_("Resizing to '%(flavor)s' failed, status '%(status)s'") %
dict(flavor=flavor, status=server.status))
@scheduler.wrappertask
def rebuild(self, server, image_id, password=None,
def verify_resize(self, server_id):
server = self.fetch_server(server_id)
if not server:
return False
status = self.get_status(server)
if status == 'VERIFY_RESIZE':
server.confirm_resize()
return True
else:
msg = _("Could not confirm resize of server %s") % server_id
raise resource.ResourceUnknownStatus(result=msg,
resource_status=status)
def check_verify_resize(self, server_id):
server = self.fetch_server(server_id)
if not server:
return False
status = self.get_status(server)
if status == 'ACTIVE':
return True
if status == 'VERIFY_RESIZE':
return False
else:
msg = _("Confirm resize for server %s failed") % server_id
raise resource.ResourceUnknownStatus(result=msg,
resource_status=status)
def rebuild(self, server_id, image_id, password=None,
preserve_ephemeral=False):
"""Rebuild the server and call check_rebuild to verify."""
server.rebuild(image_id, password=password,
preserve_ephemeral=preserve_ephemeral)
yield self.check_rebuild(server.id, image_id)
server = self.fetch_server(server_id)
if server:
server.rebuild(image_id, password=password,
preserve_ephemeral=preserve_ephemeral)
return True
else:
return False
def check_rebuild(self, server_id, image_id):
def check_rebuild(self, server_id):
"""
Verify that a rebuilding server is rebuilt.
Raise error if it ends up in an ERROR state.
"""
server = self.fetch_server(server_id)
while (server is None or server.status == 'REBUILD'):
yield
server = self.fetch_server(server_id)
if server is None or server.status == 'REBUILD':
return False
if server.status == 'ERROR':
raise exception.Error(
_("Rebuilding server failed, status '%s'") % server.status)
else:
return True
def meta_serialize(self, metadata):
"""
@ -626,6 +658,22 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
'att': attach_id, 'srv': server_id})
return False
def interface_detach(self, server_id, port_id):
server = self.fetch_server(server_id)
if server:
server.interface_detach(port_id)
return True
else:
return False
def interface_attach(self, server_id, port_id=None, net_id=None, fip=None):
server = self.fetch_server(server_id)
if server:
server.interface_attach(port_id, net_id, fip)
return True
else:
return False
class ServerConstraint(constraints.BaseCustomConstraint):
@ -662,20 +710,3 @@ class NetworkConstraint(constraints.BaseCustomConstraint):
def validate_with_client(self, client, network):
client.client_plugin('nova').get_nova_network_id(network)
# NOTE(pas-ha): these Server*Progress classes are simple key-value storages
# meant to be passed between handle_* and check_*_complete,
# being mutated during subsequent check_*_complete calls.
class ServerCreateProgress(object):
def __init__(self, server_id, complete=False):
self.complete = complete
self.server_id = server_id
class ServerDeleteProgress(object):
def __init__(self, server_id, image_id=None, image_complete=True):
self.server_id = server_id
self.image_id = image_id
self.image_complete = image_complete

View File

@ -0,0 +1,82 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Helper classes that are simple key-value storages
meant to be passed between handle_* and check_*_complete,
being mutated during subsequent check_*_complete calls.
Some of them impose restrictions on client plugin API, thus they are
put in this client-plugin-agnostic module.
"""
class ServerCreateProgress(object):
def __init__(self, server_id, complete=False):
self.complete = complete
self.server_id = server_id
class ServerUpdateProgress(ServerCreateProgress):
"""Keeps track on particular server update task
``handler`` is a method of client plugin performing
required update operation.
It must accept ``server_id`` as first positional argument and
be resilent to intermittent failures, returning ``True`` if
API was successfully called, ``False`` otherwise.
If result of API call is asyncronous, client plugin must have
corresponding ``check_<handler>`` method
accepting ``server_id`` as first positional argument and
returning ``True`` or ``False``.
For syncronous API calls,
set ``complete`` attribute of this object to ``True``.
``*_extra`` arguments, if passed to constructor, should be dictionaries of
{'args': tuple(), 'kwargs': dict()}
structure and contain parameters with which corresponding ``handler`` and
``check_<handler>`` methods of client plugin must be called.
(``args`` is automatically prepended with ``server_id``).
Missing ``args`` or ``kwargs`` are interpreted
as empty tuple/dict respectively.
Defaults are interpreted as both ``args`` and ``kwargs`` being empty.
"""
def __init__(self, server_id, handler, complete=False, called=False,
handler_extra=None, checker_extra=None):
super(ServerUpdateProgress, self).__init__(server_id, complete)
self.called = called
self.handler = handler
self.checker = 'check_%s' % handler
# set call arguments basing on incomplete values and defaults
hargs = handler_extra or {}
self.handler_args = (server_id,) + (hargs.get('args') or ())
self.handler_kwargs = hargs.get('kwargs') or {}
cargs = checker_extra or {}
self.checker_args = (server_id,) + (cargs.get('args') or ())
self.checker_kwargs = cargs.get('kwargs') or {}
class ServerDeleteProgress(object):
def __init__(self, server_id, image_id=None, image_complete=True):
self.server_id = server_id
self.image_id = image_id
self.image_complete = image_complete

View File

@ -22,11 +22,10 @@ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine.clients.os import cinder as cinder_cp
from heat.engine.clients.os import nova as nova_cp
from heat.engine.clients import progress
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import scheduler
cfg.CONF.import_opt('stack_scheduler_hints', 'heat.common.config')
@ -568,7 +567,7 @@ class Instance(resource.Resource):
if server is not None:
self.resource_id_set(server.id)
creator = nova_cp.ServerCreateProgress(server.id)
creator = progress.ServerCreateProgress(server.id)
attachers = []
for vol_id, device in self.volumes():
attachers.append(cinder_cp.VolumeAttachProgress(self.resource_id,
@ -633,10 +632,103 @@ class Instance(resource.Resource):
raise exception.Error(_("Instance is not ACTIVE (was: %s)") %
server.status.strip())
def _update_instance_type(self, prop_diff):
flavor = prop_diff[self.INSTANCE_TYPE]
flavor_id = self.client_plugin().get_flavor_id(flavor)
handler_args = {'args': (flavor_id,)}
checker_args = {'args': (flavor_id, flavor)}
prg_resize = progress.ServerUpdateProgress(self.resource_id,
'resize',
handler_extra=handler_args,
checker_extra=checker_args)
prg_verify = progress.ServerUpdateProgress(self.resource_id,
'verify_resize')
return prg_resize, prg_verify
def _update_network_interfaces(self, server, prop_diff):
updaters = []
new_network_ifaces = prop_diff.get(self.NETWORK_INTERFACES)
old_network_ifaces = self.properties.get(self.NETWORK_INTERFACES)
subnet_id = (
prop_diff.get(self.SUBNET_ID) or
self.properties.get(self.SUBNET_ID))
security_groups = self._get_security_groups()
# if there is entrys in old_network_ifaces and new_network_ifaces,
# remove the same entrys from old and new ifaces
if old_network_ifaces and new_network_ifaces:
# there are four situations:
# 1.old includes new, such as: old = 2,3, new = 2
# 2.new includes old, such as: old = 2,3, new = 1,2,3
# 3.has overlaps, such as: old = 2,3, new = 1,2
# 4.different, such as: old = 2,3, new = 1,4
# detach unmatched ones in old, attach unmatched ones in new
self._remove_matched_ifaces(old_network_ifaces,
new_network_ifaces)
if old_network_ifaces:
old_nics = self._build_nics(old_network_ifaces)
for nic in old_nics:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
complete=True,
handler_extra={'args': (nic['port-id'],)})
)
if new_network_ifaces:
new_nics = self._build_nics(new_network_ifaces)
for nic in new_nics:
handler_kwargs = {'port_id': nic['port-id']}
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
complete=True,
handler_extra={'kwargs': handler_kwargs})
)
# if the interfaces not come from property 'NetworkInterfaces',
# the situation is somewhat complex, so to detach the old ifaces,
# and then attach the new ones.
else:
if not server:
server = self.client().servers.get(self.resource_id)
interfaces = server.interface_list()
for iface in interfaces:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
complete=True,
handler_extra={'args': (iface.port_id,)})
)
# first to delete the port which implicit-created by heat
self._port_data_delete()
nics = self._build_nics(new_network_ifaces,
security_groups=security_groups,
subnet_id=subnet_id)
# 'SubnetId' property is empty(or None) and
# 'NetworkInterfaces' property is empty(or None),
# _build_nics() will return nics = None,we should attach
# first free port, according to similar behavior during
# instance creation
if not nics:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach', complete=True)
)
else:
for nic in nics:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
complete=True,
handler_extra={'kwargs':
{'port_id': nic['port-id']}})
)
return updaters
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if 'Metadata' in tmpl_diff:
self.metadata_set(tmpl_diff['Metadata'])
checkers = []
updaters = []
server = None
if self.TAGS in prop_diff:
server = self.client().servers.get(self.resource_id)
@ -644,90 +736,30 @@ class Instance(resource.Resource):
server, self._get_nova_metadata(prop_diff))
if self.INSTANCE_TYPE in prop_diff:
flavor = prop_diff[self.INSTANCE_TYPE]
flavor_id = self.client_plugin().get_flavor_id(flavor)
if not server:
server = self.client().servers.get(self.resource_id)
checker = scheduler.TaskRunner(self.client_plugin().resize,
server, flavor, flavor_id)
checkers.append(checker)
updaters.extend(self._update_instance_type(prop_diff))
if self.NETWORK_INTERFACES in prop_diff:
new_network_ifaces = prop_diff.get(self.NETWORK_INTERFACES)
old_network_ifaces = self.properties.get(self.NETWORK_INTERFACES)
subnet_id = (
prop_diff.get(self.SUBNET_ID) or
self.properties.get(self.SUBNET_ID))
security_groups = self._get_security_groups()
if not server:
server = self.client().servers.get(self.resource_id)
# if there is entrys in old_network_ifaces and new_network_ifaces,
# remove the same entrys from old and new ifaces
if old_network_ifaces and new_network_ifaces:
# there are four situations:
# 1.old includes new, such as: old = 2,3, new = 2
# 2.new includes old, such as: old = 2,3, new = 1,2,3
# 3.has overlaps, such as: old = 2,3, new = 1,2
# 4.different, such as: old = 2,3, new = 1,4
# detach unmatched ones in old, attach unmatched ones in new
self._remove_matched_ifaces(old_network_ifaces,
new_network_ifaces)
if old_network_ifaces:
old_nics = self._build_nics(old_network_ifaces)
for nic in old_nics:
checker = scheduler.TaskRunner(
server.interface_detach,
nic['port-id'])
checkers.append(checker)
if new_network_ifaces:
new_nics = self._build_nics(new_network_ifaces)
for nic in new_nics:
checker = scheduler.TaskRunner(
server.interface_attach,
nic['port-id'],
None, None)
checkers.append(checker)
# if the interfaces not come from property 'NetworkInterfaces',
# the situation is somewhat complex, so to detach the old ifaces,
# and then attach the new ones.
else:
interfaces = server.interface_list()
for iface in interfaces:
checker = scheduler.TaskRunner(server.interface_detach,
iface.port_id)
checkers.append(checker)
# first to delete the port which implicit-created by heat
self._port_data_delete()
nics = self._build_nics(new_network_ifaces,
security_groups=security_groups,
subnet_id=subnet_id)
# 'SubnetId' property is empty(or None) and
# 'NetworkInterfaces' property is empty(or None),
# _build_nics() will return nics = None,we should attach
# first free port, according to similar behavior during
# instance creation
if not nics:
checker = scheduler.TaskRunner(server.interface_attach,
None, None, None)
checkers.append(checker)
else:
for nic in nics:
checker = scheduler.TaskRunner(
server.interface_attach,
nic['port-id'], None, None)
checkers.append(checker)
updaters.extend(self._update_network_interfaces(server, prop_diff))
if checkers:
checkers[0].start()
return checkers
# NOTE(pas-ha) optimization is possible (starting first task
# right away), but we'd rather not, as this method already might
# have called several APIs
return updaters
def check_update_complete(self, checkers):
'''Push all checkers to completion in list order.'''
for checker in checkers:
if not checker.started():
checker.start()
if not checker.step():
def check_update_complete(self, updaters):
'''Push all updaters to completion in list order.'''
for prg in updaters:
if not prg.called:
handler = getattr(self.client_plugin(), prg.handler)
prg.called = handler(*prg.handler_args,
**prg.handler_kwargs)
return False
return True
if not prg.complete:
check_complete = getattr(self.client_plugin(), prg.checker)
prg.complete = check_complete(*prg.checker_args,
**prg.checker_kwargs)
break
return all(prg.complete for prg in updaters)
def metadata_update(self, new_metadata=None):
'''

View File

@ -25,14 +25,13 @@ from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine.clients.os import nova as nova_cp
from heat.engine.clients import progress
from heat.engine import constraints
from heat.engine import function
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.neutron import subnet
from heat.engine.resources import stack_user
from heat.engine import scheduler
from heat.engine import support
from heat.rpc import api as rpc_api
@ -1025,7 +1024,7 @@ class Server(stack_user.StackUser):
if net is not None:
net['port'] = props['port']
def _update_flavor(self, server, prop_diff):
def _update_flavor(self, prop_diff):
flavor_update_policy = (
prop_diff.get(self.FLAVOR_UPDATE_POLICY) or
self.properties[self.FLAVOR_UPDATE_POLICY])
@ -1035,12 +1034,18 @@ class Server(stack_user.StackUser):
raise resource.UpdateReplace(self.name)
flavor_id = self.client_plugin().get_flavor_id(flavor)
if not server:
server = self.client().servers.get(self.resource_id)
return scheduler.TaskRunner(self.client_plugin().resize,
server, flavor, flavor_id)
handler_args = {'args': (flavor_id,)}
checker_args = {'args': (flavor_id, flavor)}
def _update_image(self, server, prop_diff):
prg_resize = progress.ServerUpdateProgress(self.resource_id,
'resize',
handler_extra=handler_args,
checker_extra=checker_args)
prg_verify = progress.ServerUpdateProgress(self.resource_id,
'verify_resize')
return prg_resize, prg_verify
def _update_image(self, prop_diff):
image_update_policy = (
prop_diff.get(self.IMAGE_UPDATE_POLICY) or
self.properties[self.IMAGE_UPDATE_POLICY])
@ -1048,19 +1053,20 @@ class Server(stack_user.StackUser):
raise resource.UpdateReplace(self.name)
image = prop_diff[self.IMAGE]
image_id = self.client_plugin('glance').get_image_id(image)
if not server:
server = self.client().servers.get(self.resource_id)
preserve_ephemeral = (
image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL')
password = (prop_diff.get(self.ADMIN_PASS) or
self.properties[self.ADMIN_PASS])
return scheduler.TaskRunner(
self.client_plugin().rebuild, server, image_id,
password=password,
preserve_ephemeral=preserve_ephemeral)
kwargs = {'password': password,
'preserve_ephemeral': preserve_ephemeral}
prg = progress.ServerUpdateProgress(self.resource_id,
'rebuild',
handler_extra={'args': (image_id,),
'kwargs': kwargs})
return prg
def _update_networks(self, server, prop_diff):
checkers = []
updaters = []
new_networks = prop_diff.get(self.NETWORKS)
attach_first_free_port = False
if not new_networks:
@ -1076,9 +1082,12 @@ class Server(stack_user.StackUser):
# free port. so we should detach this interface.
if old_networks is None:
for iface in interfaces:
checker = scheduler.TaskRunner(server.interface_detach,
iface.port_id)
checkers.append(checker)
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
complete=True,
handler_extra={'args': (iface.port_id,)})
)
# if we have any information in networks field, we should:
# 1. find similar networks, if they exist
@ -1099,44 +1108,50 @@ class Server(stack_user.StackUser):
# will be deleted
for net in old_networks:
if net.get(self.NETWORK_PORT):
checker = scheduler.TaskRunner(server.interface_detach,
net.get(self.NETWORK_PORT))
checkers.append(checker)
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
complete=True,
handler_extra={'args':
(net.get(self.NETWORK_PORT),)})
)
handler_kwargs = {'port_id': None, 'net_id': None, 'fip': None}
# attach section similar for both variants that
# were mentioned above
for net in new_networks:
if net.get(self.NETWORK_PORT):
checker = scheduler.TaskRunner(server.interface_attach,
net.get(self.NETWORK_PORT),
None, None)
checkers.append(checker)
handler_kwargs['port_id'] = net.get(self.NETWORK_PORT)
elif net.get(self.NETWORK_ID):
checker = scheduler.TaskRunner(server.interface_attach,
None, self._get_network_id(net),
net.get('fixed_ip'))
checkers.append(checker)
handler_kwargs['net_id'] = self._get_network_id(net)
handler_kwargs['fip'] = net.get('fixed_ip')
elif net.get('uuid'):
checker = scheduler.TaskRunner(server.interface_attach,
None, net['uuid'],
net.get('fixed_ip'))
checkers.append(checker)
handler_kwargs['net_id'] = net['uuid']
handler_kwargs['fip'] = net.get('fixed_ip')
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
complete=True,
handler_extra={'kwargs': handler_kwargs})
)
# if new_networks is None, we should attach first free port,
# according to similar behavior during instance creation
if attach_first_free_port:
checker = scheduler.TaskRunner(server.interface_attach,
None, None, None)
checkers.append(checker)
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
complete=True)
)
return checkers
return updaters
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if 'Metadata' in tmpl_diff:
self.metadata_set(tmpl_diff['Metadata'])
checkers = []
updaters = []
server = None
if self.METADATA in prop_diff:
@ -1145,10 +1160,10 @@ class Server(stack_user.StackUser):
prop_diff[self.METADATA])
if self.FLAVOR in prop_diff:
checkers.append(self._update_flavor(server, prop_diff))
updaters.extend(self._update_flavor(prop_diff))
if self.IMAGE in prop_diff:
checkers.append(self._update_image(server, prop_diff))
updaters.append(self._update_image(prop_diff))
elif self.ADMIN_PASS in prop_diff:
if not server:
server = self.client().servers.get(self.resource_id)
@ -1160,23 +1175,27 @@ class Server(stack_user.StackUser):
self.client_plugin().rename(server, prop_diff[self.NAME])
if self.NETWORKS in prop_diff:
checkers.extend(self._update_networks(server, prop_diff))
updaters.extend(self._update_networks(server, prop_diff))
# Optimization: make sure the first task is started before
# check_update_complete.
if checkers:
checkers[0].start()
# NOTE(pas-ha) optimization is possible (starting first task
# right away), but we'd rather not, as this method already might
# have called several APIs
return updaters
return checkers
def check_update_complete(self, checkers):
'''Push all checkers to completion in list order.'''
for checker in checkers:
if not checker.started():
checker.start()
if not checker.step():
def check_update_complete(self, updaters):
'''Push all updaters to completion in list order.'''
for prg in updaters:
if not prg.called:
handler = getattr(self.client_plugin(), prg.handler)
prg.called = handler(*prg.handler_args,
**prg.handler_kwargs)
return False
return True
if not prg.complete:
check_complete = getattr(self.client_plugin(), prg.checker)
prg.complete = check_complete(*prg.checker_args,
**prg.checker_kwargs)
break
return all(prg.complete for prg in updaters)
def metadata_update(self, new_metadata=None):
'''
@ -1380,7 +1399,7 @@ class Server(stack_user.StackUser):
if state[0] != self.FAILED:
image_id = self.client().servers.create_image(
self.resource_id, self.physical_resource_name())
return nova_cp.ServerDeleteProgress(
return progress.ServerDeleteProgress(
self.resource_id, image_id, False)
return self.handle_delete()
@ -1399,24 +1418,24 @@ class Server(stack_user.StackUser):
except Exception as e:
self.client_plugin().ignore_not_found(e)
return
return nova_cp.ServerDeleteProgress(self.resource_id)
return progress.ServerDeleteProgress(self.resource_id)
def check_delete_complete(self, progress):
if not progress:
def check_delete_complete(self, prg):
if not prg:
return True
if not progress.image_complete:
image = self.client().images.get(progress.image_id)
if not prg.image_complete:
image = self.client().images.get(prg.image_id)
if image.status in ('DELETED', 'ERROR'):
raise exception.Error(image.status)
elif image.status == 'ACTIVE':
progress.image_complete = True
prg.image_complete = True
if not self.handle_delete():
return True
return False
return self.client_plugin().check_delete_server_complete(
progress.server_id)
prg.server_id)
def handle_suspend(self):
'''

View File

@ -26,6 +26,7 @@ from heat.engine.clients.os import cinder
from heat.engine.clients.os import glance
from heat.engine.clients.os import neutron
from heat.engine.clients.os import nova
from heat.engine.clients import progress
from heat.engine import environment
from heat.engine import resource
from heat.engine.resources.aws.ec2 import instance as instances
@ -518,7 +519,7 @@ class InstancesTest(common.HeatTestCase):
instance = self._create_test_instance(return_server,
'test_instance_create')
creator = nova.ServerCreateProgress(instance.resource_id)
creator = progress.ServerCreateProgress(instance.resource_id)
self.m.StubOutWithMock(self.fc.servers, 'get')
return_server.status = 'BOGUS'
self.fc.servers.get(instance.resource_id).AndReturn(return_server)
@ -536,7 +537,7 @@ class InstancesTest(common.HeatTestCase):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'test_instance_create')
creator = nova.ServerCreateProgress(instance.resource_id)
creator = progress.ServerCreateProgress(instance.resource_id)
return_server.status = 'ERROR'
return_server.fault = {
'message': 'NoValidHost',
@ -562,7 +563,7 @@ class InstancesTest(common.HeatTestCase):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_create')
creator = nova.ServerCreateProgress(instance.resource_id)
creator = progress.ServerCreateProgress(instance.resource_id)
return_server.status = 'ERROR'
self.m.StubOutWithMock(self.fc.servers, 'get')
@ -721,11 +722,26 @@ class InstancesTest(common.HeatTestCase):
update_template['Properties']['InstanceType'] = 'm1.small'
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
def activate_status(server):
server.status = 'VERIFY_RESIZE'
return_server.get = activate_status.__get__(return_server)
def status_resize(*args):
return_server.status = 'RESIZE'
def status_verify_resize(*args):
return_server.status = 'VERIFY_RESIZE'
def status_active(*args):
return_server.status = 'ACTIVE'
self.fc.servers.get('1234').WithSideEffects(
status_active).AndReturn(return_server)
self.fc.servers.get('1234').WithSideEffects(
status_resize).AndReturn(return_server)
self.fc.servers.get('1234').WithSideEffects(
status_verify_resize).AndReturn(return_server)
self.fc.servers.get('1234').WithSideEffects(
status_verify_resize).AndReturn(return_server)
self.fc.servers.get('1234').WithSideEffects(
status_active).AndReturn(return_server)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.client.post_servers_1234_action(
@ -753,11 +769,18 @@ class InstancesTest(common.HeatTestCase):
update_template['Properties']['InstanceType'] = 'm1.small'
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
def fail_status(server):
server.status = 'ERROR'
return_server.get = fail_status.__get__(return_server)
def status_resize(*args):
return_server.status = 'RESIZE'
def status_error(*args):
return_server.status = 'ERROR'
self.fc.servers.get('1234').AndReturn(return_server)
self.fc.servers.get('1234').WithSideEffects(
status_resize).AndReturn(return_server)
self.fc.servers.get('1234').WithSideEffects(
status_error).AndReturn(return_server)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.client.post_servers_1234_action(
@ -810,7 +833,7 @@ class InstancesTest(common.HeatTestCase):
update_template['Properties']['NetworkInterfaces'] = new_interfaces
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
self.fc.servers.get('1234').MultipleTimes().AndReturn(return_server)
self.m.StubOutWithMock(return_server, 'interface_detach')
return_server.interface_detach(
'd1e9c73c-04fe-4e9e-983c-d5ef94cd1a46').AndReturn(None)
@ -918,15 +941,15 @@ class InstancesTest(common.HeatTestCase):
update_template['Properties']['NetworkInterfaces'] = new_interfaces
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
self.fc.servers.get('1234').MultipleTimes().AndReturn(return_server)
self.m.StubOutWithMock(return_server, 'interface_detach')
return_server.interface_detach(
'ea29f957-cd35-4364-98fb-57ce9732c10d').AndReturn(None)
self.m.StubOutWithMock(return_server, 'interface_attach')
return_server.interface_attach('d1e9c73c-04fe-4e9e-983c-d5ef94cd1a46',
None, None).AndReturn(None)
None, None).InAnyOrder().AndReturn(None)
return_server.interface_attach('34b752ec-14de-416a-8722-9531015e04a5',
None, None).AndReturn(None)
None, None).InAnyOrder().AndReturn(None)
self.m.ReplayAll()
@ -956,7 +979,7 @@ class InstancesTest(common.HeatTestCase):
update_template['Properties']['NetworkInterfaces'] = new_interfaces
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
self.fc.servers.get('1234').MultipleTimes().AndReturn(return_server)
self.m.StubOutWithMock(return_server, 'interface_list')
return_server.interface_list().AndReturn([iface])
self.m.StubOutWithMock(return_server, 'interface_detach')
@ -992,7 +1015,7 @@ class InstancesTest(common.HeatTestCase):
update_template['Properties']['NetworkInterfaces'] = []
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
self.fc.servers.get('1234').MultipleTimes().AndReturn(return_server)
self.m.StubOutWithMock(return_server, 'interface_list')
return_server.interface_list().AndReturn([iface])
self.m.StubOutWithMock(return_server, 'interface_detach')
@ -1026,7 +1049,7 @@ class InstancesTest(common.HeatTestCase):
update_template['Properties']['SubnetId'] = subnet_id
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
self.fc.servers.get('1234').MultipleTimes().AndReturn(return_server)
self.m.StubOutWithMock(return_server, 'interface_list')
return_server.interface_list().AndReturn([iface])
self.m.StubOutWithMock(return_server, 'interface_detach')

View File

@ -0,0 +1,48 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine.clients import progress
from heat.tests import common
class ServerUpdateProgressObjectTests(common.HeatTestCase):
def setUp(self):
super(ServerUpdateProgressObjectTests, self).setUp()
self.server_id = '1234'
self.handler = 'test'
def _assert_common(self, prg):
self.assertEqual(self.server_id, prg.server_id)
self.assertEqual(self.handler, prg.handler)
self.assertEqual('check_%s' % self.handler, prg.checker)
self.assertFalse(prg.called)
self.assertFalse(prg.complete)
def test_extra_all_defaults(self):
prg = progress.ServerUpdateProgress(self.server_id, self.handler)
self._assert_common(prg)
self.assertEqual((self.server_id,), prg.handler_args)
self.assertEqual((self.server_id,), prg.checker_args)
self.assertEqual({}, prg.handler_kwargs)
self.assertEqual({}, prg.checker_kwargs)
def test_handler_extra_kwargs_missing(self):
handler_extra = {'args': ()}
prg = progress.ServerUpdateProgress(self.server_id, self.handler,
handler_extra=handler_extra)
self._assert_common(prg)
self.assertEqual((self.server_id,), prg.handler_args)
self.assertEqual((self.server_id,), prg.checker_args)
self.assertEqual({}, prg.handler_kwargs)
self.assertEqual({}, prg.checker_kwargs)

View File

@ -1588,11 +1588,26 @@ class ServersTest(common.HeatTestCase):
self._stub_glance_for_update()
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
def activate_status(server):
server.status = 'VERIFY_RESIZE'
return_server.get = activate_status.__get__(return_server)
def status_resize(*args):
return_server.status = 'RESIZE'
def status_verify_resize(*args):
return_server.status = 'VERIFY_RESIZE'
def status_active(*args):
return_server.status = 'ACTIVE'
self.fc.servers.get('1234').WithSideEffects(
status_active).AndReturn(return_server)
self.fc.servers.get('1234').WithSideEffects(
status_resize).AndReturn(return_server)
self.fc.servers.get('1234').WithSideEffects(
status_verify_resize).AndReturn(return_server)
self.fc.servers.get('1234').WithSideEffects(
status_verify_resize).AndReturn(return_server)
self.fc.servers.get('1234').WithSideEffects(
status_active).AndReturn(return_server)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.client.post_servers_1234_action(
@ -1620,11 +1635,18 @@ class ServersTest(common.HeatTestCase):
update_template['Properties']['flavor'] = 'm1.small'
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
def fail_status(server):
server.status = 'ERROR'
return_server.get = fail_status.__get__(return_server)
def status_resize(*args):
return_server.status = 'RESIZE'
def status_error(*args):
return_server.status = 'ERROR'
self.fc.servers.get('1234').AndReturn(return_server)
self.fc.servers.get('1234').WithSideEffects(
status_resize).AndReturn(return_server)
self.fc.servers.get('1234').WithSideEffects(
status_error).AndReturn(return_server)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.client.post_servers_1234_action(
@ -1658,22 +1680,31 @@ class ServersTest(common.HeatTestCase):
self._stub_glance_for_update()
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(server)
# define status transition when server resize
# ACTIVE(initial) -> ACTIVE -> RESIZE -> VERIFY_RESIZE
def active_status(srv):
srv.status = 'ACTIVE'
server.get = active_status.__get__(server)
def status_resize(*args):
server.status = 'RESIZE'
def resize_status(srv):
srv.status = 'RESIZE'
server.get = resize_status.__get__(server)
def status_verify_resize(*args):
server.status = 'VERIFY_RESIZE'
def verify_resize_status(srv):
srv.status = 'VERIFY_RESIZE'
server.get = verify_resize_status.__get__(server)
def status_active(*args):
server.status = 'ACTIVE'
self.fc.servers.get('1234').WithSideEffects(
status_active).AndReturn(server)
self.fc.servers.get('1234').WithSideEffects(
status_active).AndReturn(server)
self.fc.servers.get('1234').WithSideEffects(
status_resize).AndReturn(server)
self.fc.servers.get('1234').WithSideEffects(
status_verify_resize).AndReturn(server)
self.fc.servers.get('1234').WithSideEffects(
status_verify_resize).AndReturn(server)
self.fc.servers.get('1234').WithSideEffects(
status_active).AndReturn(server)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.client.post_servers_1234_action(