heat 7.0.0 release

meta:version: 7.0.0
 meta:diff-start: 6.0.0.0rc1
 meta:series: newton
 meta:release-type: release
 meta:announce: openstack-announce@lists.openstack.org
 meta:pypi: no
 meta:first: yes
 meta:release:Author: Doug Hellmann <doug@doughellmann.com>
 meta:release:Commit: Doug Hellmann <doug@doughellmann.com>
 meta:release:Change-Id: I74b66ffd484f2f3a2b84c39e62bcb718cef7b906
 meta:release:Code-Review+1: Ian Cordasco <sigmavirus24@gmail.com>
 meta:release:Code-Review+1: Sean McGinnis <sean.mcginnis@gmail.com>
 meta:release:Code-Review+1: Vitaly Gridnev <vgridnev@mirantis.com>
 meta:release:Code-Review+1: Rob Cresswell <robert.cresswell@outlook.com>
 meta:release:Code-Review+1: Steve Martinelli <s.martinelli@gmail.com>
 meta:release:Code-Review+1: Steve McLellan <steven.j.mclellan@gmail.com>
 meta:release:Code-Review+1: Richard Jones <r1chardj0n3s@gmail.com>
 meta:release:Code-Review+2: Davanum Srinivas (dims) <davanum@gmail.com>
 meta:release:Code-Review+2: Thierry Carrez <thierry@openstack.org>
 meta:release:Code-Review+1: Emilien Macchi <emilien@redhat.com>
 meta:release:Code-Review+1: Julien Danjou <julien@danjou.info>
 meta:release:Code-Review+1: amrith <amrith@tesora.com>
 meta:release:Code-Review+1: Graham Hayes <graham.hayes@hpe.com>
 meta:release:Code-Review+1: Jim Rollenhagen <jim@jimrollenhagen.com>
 meta:release:Code-Review+2: Doug Hellmann <doug@doughellmann.com>
 meta:release:Workflow+1: Doug Hellmann <doug@doughellmann.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJX9k2sAAoJENljH+rwzGInpGgIALJNiyDYegj+C99xU6m/3vKH
 LKFZ6e3SF+n2x55DcAnEkrx55HZTvluBHmoH0G38msxtRaTnuSPs/N+HsjVxQGYS
 Js2GbB3H9okn1ABO7aTwDcjqVR80w1Dy7CCMtw80kCj0LTrIR2OoBTcq2n1JSQEY
 vdbMPh2ikSr6joVXIKFLTScyjcYRadXYFVAIxfxduIL2XRYicYZFbbI6yeSj3HcX
 Gnw4GQHzSW2AmYwGG9swzWJtALLQqZPkNDUXizis76190ieSx0LdwfHFjU+Ba1Rc
 lif8FYCLxEZXC0klD8/vUn4uqqyNiqeAnO37UyH4aGsbTSdFDuEztVCBsf8j3HU=
 =RUxe
 -----END PGP SIGNATURE-----

Merge tag '7.0.0' into debian/newton

heat 7.0.0 release

  * New upstream release.

Change-Id: I7fe0fafa47667b52edd31d6987354c03b85cc097
This commit is contained in:
Thomas Goirand 2016-10-06 17:49:15 +02:00
commit 609540fc98
30 changed files with 546 additions and 173 deletions

6
debian/changelog vendored
View File

@ -1,3 +1,9 @@
heat (1:7.0.0-1) unstabe; urgency=medium
* New upstream release.
-- Thomas Goirand <zigo@debian.org> Thu, 06 Oct 2016 17:50:16 +0200
heat (1:7.0.0~rc1-2) unstable; urgency=medium
[ Ondřej Nový ]

4
debian/control vendored
View File

@ -43,7 +43,7 @@ Build-Depends-Indep: bandit (>= 0.13.2),
python-mock (>= 2.0),
python-monascaclient (>= 1.1.0),
python-mysqldb,
python-netaddr (>= 0.7.12),
python-netaddr (>= 0.7.13),
python-neutronclient (>= 1:5.1.0),
python-novaclient (>= 2:2.29.0),
python-openstackclient (>= 2.1.0),
@ -127,7 +127,7 @@ Depends: python-aodhclient (>= 0.5.0),
python-mistralclient (>= 2.0.0),
python-monascaclient (>= 1.1.0),
python-mysqldb,
python-netaddr (>= 0.7.12),
python-netaddr (>= 0.7.13),
python-neutronclient (>= 1:5.1.0),
python-novaclient (>= 2:2.29.0),
python-openstackclient (>= 2.1.0),

View File

@ -21,7 +21,7 @@ Last-Update: 2016-07-15
-keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0
+keystonemiddleware
lxml>=2.3 # BSD
-netaddr!=0.7.16,>=0.7.12 # BSD
-netaddr!=0.7.16,>=0.7.13 # BSD
+netaddr
oslo.cache>=1.5.0 # Apache-2.0
oslo.config>=3.14.0 # Apache-2.0

View File

@ -94,8 +94,8 @@ class CheckResource(object):
latest_stack = parser.Stack.load(cnxt, stack_id=stack.id,
force_reload=True)
if traversal != latest_stack.current_traversal:
self._retrigger_check_resource(cnxt, is_update, rsrc_id,
latest_stack)
self.retrigger_check_resource(cnxt, is_update, rsrc_id,
latest_stack)
def _handle_stack_timeout(self, cnxt, stack):
failure_reason = u'Timed out'
@ -158,7 +158,7 @@ class CheckResource(object):
return False
def _retrigger_check_resource(self, cnxt, is_update, resource_id, stack):
def retrigger_check_resource(self, cnxt, is_update, resource_id, stack):
current_traversal = stack.current_traversal
graph = stack.convergence_dependencies.graph()
key = (resource_id, is_update)
@ -239,8 +239,8 @@ class CheckResource(object):
current_traversal)
return
self._retrigger_check_resource(cnxt, is_update,
resource_id, stack)
self.retrigger_check_resource(cnxt, is_update,
resource_id, stack)
else:
raise

View File

@ -37,7 +37,7 @@ class Event(object):
already in the database.
"""
self.context = context
self.stack = stack
self._stack_identifier = stack.identifier()
self.action = action
self.status = status
self.reason = reason
@ -57,7 +57,7 @@ class Event(object):
ev = {
'resource_name': self.resource_name,
'physical_resource_id': self.physical_resource_id,
'stack_id': self.stack.id,
'stack_id': self._stack_identifier.stack_id,
'resource_action': self.action,
'resource_status': self.status,
'resource_status_reason': self.reason,
@ -114,7 +114,7 @@ class Event(object):
return None
res_id = identifier.ResourceIdentifier(
resource_name=self.resource_name, **self.stack.identifier())
resource_name=self.resource_name, **self._stack_identifier)
return identifier.EventIdentifier(event_id=str(self.uuid), **res_id)
@ -127,7 +127,7 @@ class Event(object):
'payload': {
'resource_name': self.resource_name,
'physical_resource_id': self.physical_resource_id,
'stack_id': self.stack.id,
'stack_id': self._stack_identifier.stack_id,
'resource_action': self.action,
'resource_status': self.status,
'resource_status_reason': self.reason,

View File

@ -1813,14 +1813,15 @@ class Resource(object):
raise
if not updated_ok:
ex = exception.UpdateInProgress(self.name)
LOG.error(_LE(
'Error acquiring lock for resource id:%(resource_id)s with '
'atomic_key:%(atomic_key)s, '
'engine_id:%(rs_engine_id)s/%(engine_id)s') % {
'resource_id': rs.id, 'atomic_key': rs.atomic_key,
'rs_engine_id': rs.engine_id, 'engine_id': engine_id})
raise ex
LOG.info(_LI('Resource %s is locked for update; deferring'),
six.text_type(self))
LOG.debug(('Resource id:%(resource_id)s with '
'atomic_key:%(atomic_key)s, locked '
'by engine_id:%(rs_engine_id)s/%(engine_id)s') % {
'resource_id': rs.id, 'atomic_key': rs.atomic_key,
'rs_engine_id': rs.engine_id,
'engine_id': engine_id})
raise exception.UpdateInProgress(self.name)
def _release(self, engine_id):
rs = None

View File

@ -642,6 +642,19 @@ class SoftwareDeploymentGroup(resource_group.ResourceGroup):
default=0),
}
update_policy_schema = {
resource_group.ResourceGroup.ROLLING_UPDATE: properties.Schema(
properties.Schema.MAP,
schema=rolling_update_schema,
support_status=support.SupportStatus(version='7.0.0')
),
resource_group.ResourceGroup.BATCH_CREATE: properties.Schema(
properties.Schema.MAP,
schema=resource_group.ResourceGroup.batch_create_schema,
support_status=support.SupportStatus(version='7.0.0')
)
}
def get_size(self):
return len(self.properties[self.SERVERS])

View File

@ -250,7 +250,8 @@ class Port(neutron.NeutronResource):
]
),
},
)
),
update_allowed=True,
),
VNIC_TYPE: properties.Schema(
properties.Schema.STRING,

View File

@ -13,6 +13,7 @@
import itertools
import eventlet
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import netutils
@ -21,9 +22,7 @@ import retrying
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import resource
from heat.engine.resources.openstack.neutron import port as neutron_port
LOG = logging.getLogger(__name__)
@ -413,6 +412,46 @@ class ServerNetworkMixin(object):
elif not self.is_using_neutron():
self._floating_ip_nova_associate(floating_ip)
@staticmethod
def get_all_ports(server):
return itertools.chain(
server._data_get_ports(),
server._data_get_ports('external_ports')
)
def detach_ports(self, server):
existing_server_id = server.resource_id
for port in self.get_all_ports(server):
self.client_plugin().interface_detach(
existing_server_id, port['id'])
try:
if self.client_plugin().check_interface_detach(
existing_server_id, port['id']):
LOG.info(_LI('Detach interface %(port)s successful from '
'server %(server)s.')
% {'port': port['id'],
'server': existing_server_id})
except retrying.RetryError:
raise exception.InterfaceDetachFailed(
port=port['id'], server=existing_server_id)
def attach_ports(self, server):
prev_server_id = server.resource_id
for port in self.get_all_ports(server):
self.client_plugin().interface_attach(prev_server_id,
port['id'])
try:
if self.client_plugin().check_interface_attach(
prev_server_id, port['id']):
LOG.info(_LI('Attach interface %(port)s successful to '
'server %(server)s')
% {'port': port['id'],
'server': prev_server_id})
except retrying.RetryError:
raise exception.InterfaceAttachFailed(
port=port['id'], server=prev_server_id)
def prepare_ports_for_replace(self):
if not self.is_using_neutron():
return
@ -426,21 +465,7 @@ class ServerNetworkMixin(object):
for port_type, port in port_data:
data[port_type].append({'id': port['id']})
# detach the ports from the server
server_id = self.resource_id
for port_type, port in port_data:
self.client_plugin().interface_detach(server_id, port['id'])
try:
if self.client_plugin().check_interface_detach(
server_id, port['id']):
LOG.info(_LI('Detach interface %(port)s successful '
'from server %(server)s when prepare '
'for replace.')
% {'port': port['id'],
'server': server_id})
except retrying.RetryError:
raise exception.InterfaceDetachFailed(
port=port['id'], server=server_id)
self.detach_ports(self)
def restore_ports_after_rollback(self, convergence):
if not self.is_using_neutron():
@ -460,46 +485,23 @@ class ServerNetworkMixin(object):
else:
existing_server = self
port_data = itertools.chain(
existing_server._data_get_ports(),
existing_server._data_get_ports('external_ports')
)
existing_server_id = existing_server.resource_id
for port in port_data:
# detach the ports from current resource
self.client_plugin().interface_detach(
existing_server_id, port['id'])
# Wait until server will move to active state. We can't
# detach interfaces from server in BUILDING state.
# In case of convergence, the replacement resource may be
# created but never have been worked on because the rollback was
# trigerred or new update was trigerred.
if existing_server.resource_id is not None:
try:
if self.client_plugin().check_interface_detach(
existing_server_id, port['id']):
LOG.info(_LI('Detach interface %(port)s successful from '
'server %(server)s when restore after '
'rollback.')
% {'port': port['id'],
'server': existing_server_id})
except retrying.RetryError:
raise exception.InterfaceDetachFailed(
port=port['id'], server=existing_server_id)
while True:
active = self.client_plugin()._check_active(
existing_server.resource_id)
if active:
break
eventlet.sleep(1)
except exception.ResourceInError:
pass
# attach the ports for old resource
prev_port_data = itertools.chain(
prev_server._data_get_ports(),
prev_server._data_get_ports('external_ports'))
self.store_external_ports()
self.detach_ports(existing_server)
prev_server_id = prev_server.resource_id
for port in prev_port_data:
self.client_plugin().interface_attach(prev_server_id,
port['id'])
try:
if self.client_plugin().check_interface_attach(
prev_server_id, port['id']):
LOG.info(_LI('Attach interface %(port)s successful to '
'server %(server)s when restore after '
'rollback.')
% {'port': port['id'],
'server': prev_server_id})
except retrying.RetryError:
raise exception.InterfaceAttachFailed(
port=port['id'], server=prev_server_id)
self.attach_ports(prev_server)

View File

@ -524,9 +524,10 @@ class StackResource(resource.Resource):
if self.abandon_in_progress:
self.rpc_client().abandon_stack(self.context, stack_identity)
else:
self.rpc_client().delete_stack(self.context, stack_identity)
self.rpc_client().delete_stack(self.context, stack_identity,
cast=False)
except Exception as ex:
self.rpc_client().ignore_error_named(ex, 'NotFound')
self.rpc_client().ignore_error_named(ex, 'EntityNotFound')
def handle_delete(self):
return self.delete_nested()

View File

@ -79,6 +79,10 @@ cfg.CONF.import_opt('enable_stack_abandon', 'heat.common.config')
cfg.CONF.import_opt('enable_stack_adopt', 'heat.common.config')
cfg.CONF.import_opt('convergence_engine', 'heat.common.config')
# Time to wait for a stack to stop when cancelling running threads, before
# giving up on being able to start a delete.
STOP_STACK_TIMEOUT = 30
LOG = logging.getLogger(__name__)
@ -1127,12 +1131,13 @@ class EngineService(service.Service):
self.thread_group_mgr.start(current_stack.id, func)
return
# stop the running update and take the lock
# as we cancel only running update, the acquire_result is
# always some engine_id, not None
lock = stack_lock.StackLock(cnxt, current_stack.id,
self.engine_id)
engine_id = lock.try_acquire()
engine_id = lock.get_engine_id()
if engine_id is None:
LOG.debug('No lock found on stack %s', db_stack.name)
return
if cancel_with_rollback:
cancel_message = rpc_api.THREAD_CANCEL_WITH_ROLLBACK
@ -1146,7 +1151,8 @@ class EngineService(service.Service):
# Another active engine has the lock
elif service_utils.engine_alive(cnxt, engine_id):
cancel_result = self._remote_call(
cnxt, engine_id, self.listener.SEND,
cnxt, engine_id, cfg.CONF.engine_life_check_timeout,
self.listener.SEND,
stack_identity=stack_identity, message=cancel_message)
if cancel_result is None:
LOG.debug("Successfully sent %(msg)s message "
@ -1156,6 +1162,12 @@ class EngineService(service.Service):
raise exception.EventSendFailed(stack_name=current_stack.name,
engine_id=engine_id)
else:
LOG.warning(_('Cannot cancel stack %(stack_name)s: lock held by '
'unknown engine %(engine_id)s') % {
'stack_name': db_stack.name,
'engine_id': engine_id})
@context.request_context
def validate_template(self, cnxt, template, params=None, files=None,
environment_files=None, show_nested=False,
@ -1330,8 +1342,7 @@ class EngineService(service.Service):
return api.format_stack_output(outputs[output_key])
def _remote_call(self, cnxt, lock_engine_id, call, **kwargs):
timeout = cfg.CONF.engine_life_check_timeout
def _remote_call(self, cnxt, lock_engine_id, timeout, call, **kwargs):
self.cctxt = self._client.prepare(
version='1.0',
timeout=timeout,
@ -1351,6 +1362,10 @@ class EngineService(service.Service):
"""
st = self._get_stack(cnxt, stack_identity)
if (st.status == parser.Stack.COMPLETE and
st.action == parser.Stack.DELETE):
raise exception.EntityNotFound(entity='Stack', name=st.name)
LOG.info(_LI('Deleting stack %s'), st.name)
stack = parser.Stack.load(cnxt, stack=st)
self.resource_enforcer.enforce_stack(stack)
@ -1380,31 +1395,70 @@ class EngineService(service.Service):
if acquire_result == self.engine_id:
# give threads which are almost complete an opportunity to
# finish naturally before force stopping them
eventlet.sleep(0.2)
self.thread_group_mgr.stop(stack.id)
self.thread_group_mgr.send(stack.id, rpc_api.THREAD_CANCEL)
# Another active engine has the lock
elif service_utils.engine_alive(cnxt, acquire_result):
stop_result = self._remote_call(
cnxt, acquire_result, self.listener.STOP_STACK,
stack_identity=stack_identity)
if stop_result is None:
LOG.debug("Successfully stopped remote task on engine %s"
% acquire_result)
cancel_result = self._remote_call(
cnxt, acquire_result, cfg.CONF.engine_life_check_timeout,
self.listener.SEND,
stack_identity=stack_identity, message=rpc_api.THREAD_CANCEL)
if cancel_result is None:
LOG.debug("Successfully sent %(msg)s message "
"to remote task on engine %(eng)s" % {
'eng': acquire_result,
'msg': rpc_api.THREAD_CANCEL})
else:
raise exception.StopActionFailed(stack_name=stack.name,
engine_id=acquire_result)
raise exception.EventSendFailed(stack_name=stack.name,
engine_id=acquire_result)
# There may be additional resources that we don't know about
# if an update was in-progress when the stack was stopped, so
# reload the stack from the database.
st = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=st)
self.resource_enforcer.enforce_stack(stack)
def reload():
st = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=st)
self.resource_enforcer.enforce_stack(stack)
return stack
self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id,
stack.delete)
return
def wait_then_delete(stack):
watch = timeutils.StopWatch(cfg.CONF.error_wait_time + 10)
watch.start()
while not watch.expired():
LOG.debug('Waiting for stack cancel to complete: %s' %
stack.name)
with lock.try_thread_lock() as acquire_result:
if acquire_result is None:
stack = reload()
# do the actual delete with the aquired lock
self.thread_group_mgr.start_with_acquired_lock(
stack, lock, stack.delete)
return
eventlet.sleep(1.0)
if acquire_result == self.engine_id:
# cancel didn't finish in time, attempt a stop instead
self.thread_group_mgr.stop(stack.id)
elif service_utils.engine_alive(cnxt, acquire_result):
# Another active engine has the lock
stop_result = self._remote_call(
cnxt, acquire_result, STOP_STACK_TIMEOUT,
self.listener.STOP_STACK,
stack_identity=stack_identity)
if stop_result is None:
LOG.debug("Successfully stopped remote task "
"on engine %s" % acquire_result)
else:
raise exception.StopActionFailed(
stack_name=stack.name, engine_id=acquire_result)
stack = reload()
# do the actual delete in a locked task
self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id,
stack.delete)
# Cancelling the stack could take some time, so do it in a task
self.thread_group_mgr.start(stack.id, wait_then_delete,
stack)
@context.request_context
def export_stack(self, cnxt, stack_identity):
@ -1482,7 +1536,7 @@ class EngineService(service.Service):
for name in mgr.names()]
versions = []
for t in sorted(_template_classes): # Sort to ensure dates come first
if issubclass(t[1], cfntemplate.CfnTemplate):
if issubclass(t[1], cfntemplate.CfnTemplateBase):
type = 'cfn'
else:
type = 'hot'

View File

@ -927,14 +927,9 @@ class Stack(collections.Mapping):
self._send_notification_and_add_event()
if self.convergence:
# do things differently for convergence
exp_trvsl = self.current_traversal
if self.status == self.FAILED:
self.current_traversal = ''
values['current_traversal'] = self.current_traversal
updated = stack_object.Stack.select_and_update(
self.context, self.id, values,
exp_trvsl=exp_trvsl)
exp_trvsl=self.current_traversal)
return updated
@ -2024,13 +2019,17 @@ class Stack(collections.Mapping):
"""
resource_objects.Resource.purge_deleted(self.context, self.id)
exp_trvsl = self.current_traversal
if self.status == self.FAILED:
self.current_traversal = ''
prev_tmpl_id = None
if (self.prev_raw_template_id is not None and
self.status != self.FAILED):
prev_tmpl_id = self.prev_raw_template_id
self.prev_raw_template_id = None
stack_id = self.store()
stack_id = self.store(exp_trvsl=exp_trvsl)
if stack_id is None:
# Failed concurrent update
LOG.warning(_LW("Failed to store stack %(name)s with traversal ID "
@ -2042,7 +2041,7 @@ class Stack(collections.Mapping):
if prev_tmpl_id is not None:
raw_template_object.RawTemplate.delete(self.context, prev_tmpl_id)
sync_point.delete_all(self.context, self.id, self.current_traversal)
sync_point.delete_all(self.context, self.id, exp_trvsl)
if (self.action, self.status) == (self.DELETE, self.COMPLETE):
if not self.owner_id:

View File

@ -35,6 +35,10 @@ class StackLock(object):
self.listener = None
def get_engine_id(self):
"""Return the ID of the engine which currently holds the lock.
Returns None if there is no lock held on the stack.
"""
return stack_lock_object.StackLock.get_engine_id(self.context,
self.stack_id)

View File

@ -18,6 +18,7 @@ import eventlet.queue
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from oslo_utils import uuidutils
from osprofiler import profiler
from heat.common import context
@ -27,7 +28,9 @@ from heat.common.i18n import _LW
from heat.common import messaging as rpc_messaging
from heat.db import api as db_api
from heat.engine import check_resource
from heat.engine import stack as parser
from heat.engine import sync_point
from heat.objects import stack as stack_objects
from heat.rpc import api as rpc_api
from heat.rpc import worker_client as rpc_client
@ -102,14 +105,17 @@ class WorkerService(service.Service):
in_progress resources to complete normally; no worker is stopped
abruptly.
"""
reason = 'User cancelled stack %s ' % stack.action
# state_set will update the current traversal to '' for FAILED state
old_trvsl = stack.current_traversal
updated = stack.state_set(stack.action, stack.FAILED, reason)
if not updated:
LOG.warning(_LW("Failed to stop traversal %(trvsl)s of stack "
"%(name)s while cancelling the operation."),
{'name': stack.name, 'trvsl': old_trvsl})
_stop_traversal(stack)
db_child_stacks = stack_objects.Stack.get_all_by_root_owner_id(
stack.context, stack.id)
for db_child in db_child_stacks:
if db_child.status == parser.Stack.IN_PROGRESS:
child = parser.Stack.load(stack.context,
stack_id=db_child.id,
stack=db_child)
_stop_traversal(child)
def stop_all_workers(self, stack):
# stop the traversal
@ -131,6 +137,23 @@ class WorkerService(service.Service):
return True
def _retrigger_replaced(self, is_update, rsrc, stack, msg_queue):
graph = stack.convergence_dependencies.graph()
key = (rsrc.id, is_update)
if key not in graph and rsrc.replaces is not None:
# This resource replaces old one and is not needed in
# current traversal. You need to mark the resource as
# DELETED so that it gets cleaned up in purge_db.
values = {'action': rsrc.DELETE}
db_api.resource_update_and_save(stack.context, rsrc.id, values)
# The old resource might be in the graph (a rollback case);
# just re-trigger it.
key = (rsrc.replaces, is_update)
cr = check_resource.CheckResource(self.engine_id, self._rpc_client,
self.thread_group_mgr, msg_queue)
cr.retrigger_check_resource(stack.context, is_update, key[0],
stack)
@context.request_context
def check_resource(self, cnxt, resource_id, current_traversal, data,
is_update, adopt_stack_data):
@ -146,18 +169,20 @@ class WorkerService(service.Service):
if rsrc is None:
return
if current_traversal != stack.current_traversal:
LOG.debug('[%s] Traversal cancelled; stopping.', current_traversal)
return
msg_queue = eventlet.queue.LightQueue()
try:
self.thread_group_mgr.add_msg_queue(stack.id, msg_queue)
cr = check_resource.CheckResource(self.engine_id, self._rpc_client,
self.thread_group_mgr, msg_queue)
cr.check(cnxt, resource_id, current_traversal, resource_data,
is_update, adopt_stack_data, rsrc, stack)
if current_traversal != stack.current_traversal:
LOG.debug('[%s] Traversal cancelled; re-trigerring.',
current_traversal)
self._retrigger_replaced(is_update, rsrc, stack, msg_queue)
else:
cr = check_resource.CheckResource(self.engine_id,
self._rpc_client,
self.thread_group_mgr,
msg_queue)
cr.check(cnxt, resource_id, current_traversal, resource_data,
is_update, adopt_stack_data, rsrc, stack)
finally:
self.thread_group_mgr.remove_msg_queue(None,
stack.id, msg_queue)
@ -172,6 +197,36 @@ class WorkerService(service.Service):
_cancel_check_resource(stack_id, self.engine_id, self.thread_group_mgr)
def _stop_traversal(stack):
old_trvsl = stack.current_traversal
updated = _update_current_traversal(stack)
if not updated:
LOG.warning(_LW("Failed to update stack %(name)s with new "
"traversal, aborting stack cancel"),
{'name': stack.name})
return
reason = 'Stack %(action)s cancelled' % {'action': stack.action}
updated = stack.state_set(stack.action, stack.FAILED, reason)
if not updated:
LOG.warning(_LW("Failed to update stack %(name)s status"
" to %(action)_%(state)"),
{'name': stack.name, 'action': stack.action,
'state': stack.FAILED})
return
sync_point.delete_all(stack.context, stack.id, old_trvsl)
def _update_current_traversal(stack):
previous_traversal = stack.current_traversal
stack.current_traversal = uuidutils.generate_uuid()
values = {'current_traversal': stack.current_traversal}
return stack_objects.Stack.select_and_update(
stack.context, stack.id, values,
exp_trvsl=previous_traversal)
def _cancel_check_resource(stack_id, engine_id, tgm):
LOG.debug('Cancelling workers for stack [%s] in engine [%s]',
stack_id, engine_id)

View File

@ -11,7 +11,9 @@
# under the License.
import mock
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
from oslo_utils import timeutils
from heat.common import exception
from heat.common import service_utils
@ -97,8 +99,11 @@ class StackDeleteTest(common.HeatTestCase):
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
@mock.patch.object(stack_lock.StackLock, 'acquire')
def test_stack_delete_current_engine_active_lock(self, mock_acquire,
mock_try, mock_load):
@mock.patch.object(timeutils.StopWatch, 'expired')
def test_stack_delete_current_engine_active_lock(self, mock_expired,
mock_acquire, mock_try,
mock_load):
cfg.CONF.set_override('error_wait_time', 0)
self.man.start()
stack_name = 'service_delete_test_stack_current_active_lock'
stack = tools.get_stack(stack_name, self.ctx)
@ -108,27 +113,32 @@ class StackDeleteTest(common.HeatTestCase):
stack_lock_object.StackLock.create(
self.ctx, stack.id, self.man.engine_id)
# Create a fake ThreadGroup too
self.man.thread_group_mgr.groups[stack.id] = tools.DummyThreadGroup()
st = stack_object.Stack.get_by_id(self.ctx, sid)
mock_load.return_value = stack
mock_try.return_value = self.man.engine_id
mock_stop = self.patchobject(self.man.thread_group_mgr, 'stop')
mock_send = self.patchobject(self.man.thread_group_mgr, 'send')
mock_expired.side_effect = [False, True]
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
mock_load.assert_called_with(self.ctx, stack=st)
self.assertEqual(2, len(mock_load.mock_calls))
mock_try.assert_called_once_with()
mock_acquire.assert_called_once_with(True)
mock_send.assert_called_once_with(stack.id, 'cancel')
mock_stop.assert_called_once_with(stack.id)
self.assertEqual(2, len(mock_load.mock_calls))
mock_try.assert_called_with()
mock_acquire.assert_called_once_with(True)
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
@mock.patch.object(service_utils, 'engine_alive')
def test_stack_delete_other_engine_active_lock_failed(self, mock_alive,
mock_try, mock_load):
@mock.patch.object(timeutils.StopWatch, 'expired')
def test_stack_delete_other_engine_active_lock_failed(self, mock_expired,
mock_alive, mock_try,
mock_load):
cfg.CONF.set_override('error_wait_time', 0)
OTHER_ENGINE = "other-engine-fake-uuid"
self.man.start()
stack_name = 'service_delete_test_stack_other_engine_lock_fail'
@ -142,6 +152,7 @@ class StackDeleteTest(common.HeatTestCase):
mock_load.return_value = stack
mock_try.return_value = OTHER_ENGINE
mock_alive.return_value = True
mock_expired.side_effect = [False, True]
mock_call = self.patchobject(self.man, '_remote_call',
return_value=False)
@ -149,20 +160,24 @@ class StackDeleteTest(common.HeatTestCase):
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.delete_stack,
self.ctx, stack.identifier())
self.assertEqual(exception.StopActionFailed, ex.exc_info[0])
self.assertEqual(exception.EventSendFailed, ex.exc_info[0])
mock_load.assert_called_once_with(self.ctx, stack=st)
mock_try.assert_called_once_with()
mock_alive.assert_called_once_with(self.ctx, OTHER_ENGINE)
mock_call.assert_called_once_with(self.ctx, OTHER_ENGINE, "stop_stack",
mock_call.assert_called_once_with(self.ctx, OTHER_ENGINE, mock.ANY,
"send",
message='cancel',
stack_identity=mock.ANY)
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
@mock.patch.object(service_utils, 'engine_alive')
@mock.patch.object(stack_lock.StackLock, 'acquire')
@mock.patch.object(timeutils.StopWatch, 'expired')
def test_stack_delete_other_engine_active_lock_succeeded(
self, mock_acquire, mock_alive, mock_try, mock_load):
self, mock_expired, mock_acquire, mock_alive, mock_try, mock_load):
cfg.CONF.set_override('error_wait_time', 0)
OTHER_ENGINE = "other-engine-fake-uuid"
self.man.start()
@ -177,6 +192,7 @@ class StackDeleteTest(common.HeatTestCase):
mock_load.return_value = stack
mock_try.return_value = OTHER_ENGINE
mock_alive.return_value = True
mock_expired.side_effect = [False, True]
mock_call = self.patchobject(self.man, '_remote_call',
return_value=None)
@ -185,18 +201,25 @@ class StackDeleteTest(common.HeatTestCase):
self.assertEqual(2, len(mock_load.mock_calls))
mock_load.assert_called_with(self.ctx, stack=st)
mock_try.assert_called_once_with()
mock_alive.assert_called_once_with(self.ctx, OTHER_ENGINE)
mock_call.assert_called_once_with(self.ctx, OTHER_ENGINE, "stop_stack",
stack_identity=mock.ANY)
mock_try.assert_called_with()
mock_alive.assert_called_with(self.ctx, OTHER_ENGINE)
mock_call.assert_has_calls([
mock.call(self.ctx, OTHER_ENGINE, mock.ANY, "send",
message='cancel',
stack_identity=mock.ANY),
mock.call(self.ctx, OTHER_ENGINE, mock.ANY, "stop_stack",
stack_identity=mock.ANY)
])
mock_acquire.assert_called_once_with(True)
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
@mock.patch.object(service_utils, 'engine_alive')
@mock.patch.object(stack_lock.StackLock, 'acquire')
@mock.patch.object(timeutils.StopWatch, 'expired')
def test_stack_delete_other_dead_engine_active_lock(
self, mock_acquire, mock_alive, mock_try, mock_load):
self, mock_expired, mock_acquire, mock_alive, mock_try, mock_load):
cfg.CONF.set_override('error_wait_time', 0)
OTHER_ENGINE = "other-engine-fake-uuid"
stack_name = 'service_delete_test_stack_other_dead_engine'
stack = tools.get_stack(stack_name, self.ctx)
@ -210,11 +233,12 @@ class StackDeleteTest(common.HeatTestCase):
mock_load.return_value = stack
mock_try.return_value = OTHER_ENGINE
mock_alive.return_value = False
mock_expired.side_effect = [False, True]
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
mock_load.assert_called_with(self.ctx, stack=st)
mock_try.assert_called_once_with()
mock_try.assert_called_with()
mock_acquire.assert_called_once_with(True)
mock_alive.assert_called_once_with(self.ctx, OTHER_ENGINE)
mock_alive.assert_called_with(self.ctx, OTHER_ENGINE)

View File

@ -482,8 +482,10 @@ resources:
stk.disable_rollback = False
stk.store()
self.man.engine_id = service_utils.generate_engine_id()
self.patchobject(stack.Stack, 'load', return_value=stk)
self.patchobject(stack_lock.StackLock, 'try_acquire',
self.patchobject(stack_lock.StackLock, 'get_engine_id',
return_value=self.man.engine_id)
self.patchobject(self.man.thread_group_mgr, 'send')
@ -500,7 +502,7 @@ resources:
stk.disable_rollback = False
stk.store()
self.patchobject(stack.Stack, 'load', return_value=stk)
self.patchobject(stack_lock.StackLock, 'try_acquire',
self.patchobject(stack_lock.StackLock, 'get_engine_id',
return_value=str(uuid.uuid4()))
self.patchobject(service_utils, 'engine_alive',
return_value=True)
@ -514,6 +516,23 @@ resources:
self.man.stack_cancel_update,
self.ctx, stk.identifier())
def test_stack_cancel_update_no_lock(self):
stack_name = 'service_update_stack_test_cancel_same_engine'
stk = tools.get_stack(stack_name, self.ctx)
stk.state_set(stk.UPDATE, stk.IN_PROGRESS, 'test_override')
stk.disable_rollback = False
stk.store()
self.patchobject(stack.Stack, 'load', return_value=stk)
self.patchobject(stack_lock.StackLock, 'get_engine_id',
return_value=None)
self.patchobject(self.man.thread_group_mgr, 'send')
self.man.stack_cancel_update(self.ctx, stk.identifier(),
cancel_with_rollback=False)
self.assertFalse(self.man.thread_group_mgr.send.called)
def test_stack_cancel_update_wrong_state_fails(self):
stack_name = 'service_update_cancel_test_stack'
stk = tools.get_stack(stack_name, self.ctx)

View File

@ -77,12 +77,12 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
for mocked in [mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid]:
self.assertFalse(mocked.called)
@mock.patch.object(worker.WorkerService, '_retrigger_replaced')
def test_stale_traversal(
self, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self, mock_rnt, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self.worker.check_resource(self.ctx, self.resource.id,
'stale-traversal', {}, True, None)
for mocked in [mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid]:
self.assertFalse(mocked.called)
self.assertTrue(mock_rnt.called)
def test_is_update_traversal(
self, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
@ -320,7 +320,7 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
self.assertTrue(self.stack.purge_db.called)
@mock.patch.object(check_resource.CheckResource,
'_retrigger_check_resource')
'retrigger_check_resource')
@mock.patch.object(stack.Stack, 'load')
def test_initiate_propagate_rsrc_retriggers_check_rsrc_on_new_stack_update(
self, mock_stack_load, mock_rcr, mock_cru, mock_crc, mock_pcr,
@ -368,8 +368,8 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
# A, B are predecessors to C when is_update is True
expected_predecessors = {(self.stack['A'].id, True),
(self.stack['B'].id, True)}
self.cr._retrigger_check_resource(self.ctx, self.is_update,
resC.id, self.stack)
self.cr.retrigger_check_resource(self.ctx, self.is_update,
resC.id, self.stack)
mock_pcr.assert_called_once_with(self.ctx, mock.ANY, resC.id,
self.stack.current_traversal,
mock.ANY, (resC.id, True), None,
@ -386,7 +386,7 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
[(1, False), (1, True)], [(2, False), None]])
# simulate rsrc 2 completing its update for old traversal
# and calling rcr
self.cr._retrigger_check_resource(self.ctx, True, 2, self.stack)
self.cr.retrigger_check_resource(self.ctx, True, 2, self.stack)
# Ensure that pcr was called with proper delete traversal
mock_pcr.assert_called_once_with(self.ctx, mock.ANY, 2,
self.stack.current_traversal,
@ -401,7 +401,7 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
[(1, False), (1, True)], [(2, False), (2, True)]])
# simulate rsrc 2 completing its delete for old traversal
# and calling rcr
self.cr._retrigger_check_resource(self.ctx, False, 2, self.stack)
self.cr.retrigger_check_resource(self.ctx, False, 2, self.stack)
# Ensure that pcr was called with proper delete traversal
mock_pcr.assert_called_once_with(self.ctx, mock.ANY, 2,
self.stack.current_traversal,
@ -426,7 +426,7 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
@mock.patch.object(stack.Stack, 'purge_db')
@mock.patch.object(stack.Stack, 'state_set')
@mock.patch.object(check_resource.CheckResource,
'_retrigger_check_resource')
'retrigger_check_resource')
@mock.patch.object(check_resource.CheckResource, '_trigger_rollback')
def test_handle_rsrc_failure_when_update_fails(
self, mock_tr, mock_rcr, mock_ss, mock_pdb, mock_cru, mock_crc,
@ -444,7 +444,7 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
@mock.patch.object(stack.Stack, 'purge_db')
@mock.patch.object(stack.Stack, 'state_set')
@mock.patch.object(check_resource.CheckResource,
'_retrigger_check_resource')
'retrigger_check_resource')
@mock.patch.object(check_resource.CheckResource, '_trigger_rollback')
def test_handle_rsrc_failure_when_update_fails_different_traversal(
self, mock_tr, mock_rcr, mock_ss, mock_pdb, mock_cru, mock_crc,

View File

@ -17,7 +17,10 @@ import mock
from heat.db import api as db_api
from heat.engine import check_resource
from heat.engine import stack as parser
from heat.engine import template as templatem
from heat.engine import worker
from heat.objects import stack as stack_objects
from heat.rpc import worker_client as wc
from heat.tests import common
from heat.tests import utils
@ -177,6 +180,26 @@ class WorkerServiceTest(common.HeatTestCase):
mock_ccr.assert_has_calls(calls, any_order=True)
self.assertTrue(mock_wc.called)
@mock.patch.object(worker, '_stop_traversal')
def test_stop_traversal_stops_nested_stack(self, mock_st):
mock_tgm = mock.Mock()
ctx = utils.dummy_context()
tmpl = templatem.Template.create_empty_template()
stack1 = parser.Stack(ctx, 'stack1', tmpl,
current_traversal='123')
stack1.store()
stack2 = parser.Stack(ctx, 'stack2', tmpl,
owner_id=stack1.id, current_traversal='456')
stack2.store()
_worker = worker.WorkerService('host-1', 'topic-1', 'engine-001',
mock_tgm)
_worker.stop_traversal(stack1)
self.assertEqual(2, mock_st.call_count)
call1, call2 = mock_st.call_args_list
call_args1, call_args2 = call1[0][0], call2[0][0]
self.assertEqual('stack1', call_args1.name)
self.assertEqual('stack2', call_args2.name)
@mock.patch.object(worker, '_cancel_workers')
@mock.patch.object(worker.WorkerService, 'stop_traversal')
def test_stop_all_workers_when_stack_in_progress(self, mock_st, mock_cw):
@ -220,3 +243,13 @@ class WorkerServiceTest(common.HeatTestCase):
mock_cw.assert_called_with(stack, mock_tgm, 'engine-001',
_worker._rpc_client)
self.assertFalse(stack.rollback.called)
@mock.patch.object(stack_objects.Stack, 'select_and_update')
def test_update_current_traversal(self, mock_sau):
stack = mock.MagicMock()
stack.current_traversal = 'some-thing'
old_trvsl = stack.current_traversal
worker._update_current_traversal(stack)
self.assertNotEqual(old_trvsl, stack.current_traversal)
mock_sau.assert_called_once_with(mock.ANY, stack.id, mock.ANY,
exp_trvsl=old_trvsl)

View File

@ -933,7 +933,7 @@ class UpdatePortTest(common.HeatTestCase):
return_value=fake_groups_list)
props = {'network_id': u'net1234',
'name': utils.PhysName(stack.name, 'port'),
'name': str(utils.PhysName(stack.name, 'port')),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
@ -970,6 +970,15 @@ class UpdatePortTest(common.HeatTestCase):
update_props)())
update_port.assset_called_once_with(update_dict)
# check, that update does not cause of Update Replace
create_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
props)
after_props, before_props = port._prepare_update_props(update_snippet,
create_snippet)
self.assertIsNotNone(
port.update_template_diff_properties(after_props, before_props))
# update with empty prop_diff
scheduler.TaskRunner(port.handle_update, update_snippet, {}, {})()
self.assertEqual(1, update_port.call_count)

View File

@ -35,6 +35,7 @@ from heat.engine.clients.os import zaqar
from heat.engine import environment
from heat.engine import resource
from heat.engine.resources.openstack.nova import server as servers
from heat.engine.resources.openstack.nova import server_network_mixin
from heat.engine.resources import scheduler_hints as sh
from heat.engine import scheduler
from heat.engine import stack as parser
@ -4395,7 +4396,9 @@ class ServerInternalPortTest(common.HeatTestCase):
mock.call('test_server', 3344),
mock.call('test_server', 5566)])
def test_restore_ports_after_rollback(self):
@mock.patch.object(server_network_mixin.ServerNetworkMixin,
'store_external_ports')
def test_restore_ports_after_rollback(self, store_ports):
t, stack, server = self._return_template_stack_and_rsrc_defn(
'test', tmpl_server_with_network_id)
server.resource_id = 'existing_server'
@ -4403,6 +4406,8 @@ class ServerInternalPortTest(common.HeatTestCase):
external_port_ids = [{'id': 5566}]
server._data = {"internal_ports": jsonutils.dumps(port_ids),
"external_ports": jsonutils.dumps(external_port_ids)}
self.patchobject(nova.NovaClientPlugin, '_check_active')
nova.NovaClientPlugin._check_active.side_effect = [False, True]
# add data to old server in backup stack
old_server = mock.Mock()
@ -4420,6 +4425,8 @@ class ServerInternalPortTest(common.HeatTestCase):
server.restore_prev_rsrc()
self.assertEqual(2, nova.NovaClientPlugin._check_active.call_count)
# check, that ports were detached from new server
nova.NovaClientPlugin.interface_detach.assert_has_calls([
mock.call('existing_server', 1122),
@ -4432,12 +4439,16 @@ class ServerInternalPortTest(common.HeatTestCase):
mock.call('old_server', 3344),
mock.call('old_server', 5566)])
def test_restore_ports_after_rollback_attach_failed(self):
@mock.patch.object(server_network_mixin.ServerNetworkMixin,
'store_external_ports')
def test_restore_ports_after_rollback_attach_failed(self, store_ports):
t, stack, server = self._return_template_stack_and_rsrc_defn(
'test', tmpl_server_with_network_id)
server.resource_id = 'existing_server'
port_ids = [{'id': 1122}, {'id': 3344}]
server._data = {"internal_ports": jsonutils.dumps(port_ids)}
self.patchobject(nova.NovaClientPlugin, '_check_active')
nova.NovaClientPlugin._check_active.return_value = True
# add data to old server in backup stack
old_server = mock.Mock()
@ -4465,10 +4476,14 @@ class ServerInternalPortTest(common.HeatTestCase):
'(old_server)',
six.text_type(exc))
def test_restore_ports_after_rollback_convergence(self):
@mock.patch.object(server_network_mixin.ServerNetworkMixin,
'store_external_ports')
def test_restore_ports_after_rollback_convergence(self, store_ports):
t = template_format.parse(tmpl_server_with_network_id)
stack = utils.parse_stack(t)
stack.store()
self.patchobject(nova.NovaClientPlugin, '_check_active')
nova.NovaClientPlugin._check_active.return_value = True
# mock resource from previous template
prev_rsrc = stack['server']

View File

@ -375,8 +375,9 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
stack = tools.get_stack('test_stack', utils.dummy_context(),
template=tools.string_template_five,
convergence=True)
stack.status = stack.FAILED
stack.store()
stack.state_set(stack.action, stack.FAILED, 'test-reason')
stack.purge_db()
self.assertEqual('', stack.current_traversal)
@mock.patch.object(raw_template_object.RawTemplate, 'delete')

View File

@ -975,6 +975,17 @@ class StackServiceTest(common.HeatTestCase):
outputs = self.eng.list_outputs(self.ctx, mock.ANY)
self.assertEqual([], outputs)
def test_stack_delete_complete_is_not_found(self):
mock_get_stack = self.patchobject(self.eng, '_get_stack')
mock_get_stack.return_value = mock.MagicMock()
mock_get_stack.return_value.status = parser.Stack.COMPLETE
mock_get_stack.return_value.action = parser.Stack.DELETE
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.delete_stack,
'irrelevant',
'irrelevant')
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
def test_get_environment(self):
# Setup
t = template_format.parse(tools.wp_template)

View File

@ -413,4 +413,4 @@ Outputs:
self.res.nested().identifier.return_value = stack_identity
self.res.handle_delete()
self.res.rpc_client.return_value.delete_stack.assert_called_once_with(
self.ctx, self.res.nested().identifier())
self.ctx, self.res.nested().identifier(), cast=False)

View File

@ -1021,4 +1021,5 @@ class TemplateResourceCrudTest(common.HeatTestCase):
rpcc = self.res.rpc_client.return_value
rpcc.delete_stack.assert_called_once_with(
self.ctx,
self.res.nested().identifier())
self.res.nested().identifier(),
cast=False)

View File

@ -514,7 +514,7 @@ class StackResourceTest(StackResourceBaseTest):
side_effect=exception.NotFound())
self.assertIsNone(self.parent_resource.delete_nested())
rpcc.return_value.delete_stack.assert_called_once_with(
self.parent_resource.context, mock.ANY)
self.parent_resource.context, mock.ANY, cast=False)
def test_need_update_for_nested_resource(self):
"""Test the resource with nested stack should need update.

View File

@ -411,6 +411,25 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
self._wait_for_stack_status(**kwargs)
def cancel_update_stack(self, stack_identifier,
expected_status='ROLLBACK_COMPLETE'):
stack_name = stack_identifier.split('/')[0]
self.updated_time[stack_identifier] = self.client.stacks.get(
stack_identifier, resolve_outputs=False).updated_time
self.client.actions.cancel_update(stack_name)
kwargs = {'stack_identifier': stack_identifier,
'status': expected_status}
if expected_status in ['ROLLBACK_COMPLETE']:
# To trigger rollback you would intentionally fail the stack
# Hence check for rollback failures
kwargs['failure_pattern'] = '^ROLLBACK_FAILED$'
self._wait_for_stack_status(**kwargs)
def preview_update_stack(self, stack_identifier, template,
environment=None, files=None, parameters=None,
tags=None, disable_rollback=True,

View File

@ -0,0 +1,63 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
class CancelUpdateTest(functional_base.FunctionalTestsBase):
template = '''
heat_template_version: '2013-05-23'
parameters:
InstanceType:
type: string
ImageId:
type: string
network:
type: string
resources:
port:
type: OS::Neutron::Port
properties:
network: {get_param: network}
Server:
type: OS::Nova::Server
properties:
flavor_update_policy: REPLACE
image: {get_param: ImageId}
flavor: {get_param: InstanceType}
networks:
- port: {get_resource: port}
'''
def setUp(self):
super(CancelUpdateTest, self).setUp()
if not self.conf.image_ref:
raise self.skipException("No image configured to test.")
if not self.conf.instance_type:
raise self.skipException("No flavor configured to test.")
if not self.conf.minimal_instance_type:
raise self.skipException("No minimal flavor configured to test.")
def test_cancel_update_server_with_port(self):
parameters = {'InstanceType': self.conf.minimal_instance_type,
'ImageId': self.conf.image_ref,
'network': self.conf.fixed_network_name}
stack_identifier = self.stack_create(template=self.template,
parameters=parameters)
parameters['InstanceType'] = 'm1.large'
self.update_stack(stack_identifier, self.template,
parameters=parameters,
expected_status='UPDATE_IN_PROGRESS')
self.cancel_update_stack(stack_identifier)

View File

@ -0,0 +1,42 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from heat_integrationtests.functional import functional_base
class DeleteInProgressTest(functional_base.FunctionalTestsBase):
root_template = '''
heat_template_version: 2013-05-23
resources:
rg:
type: OS::Heat::ResourceGroup
properties:
count: 125
resource_def:
type: empty.yaml
'''
empty_template = '''
heat_template_version: 2013-05-23
resources:
'''
def test_delete_nested_stacks_create_in_progress(self):
files = {'empty.yaml': self.empty_template}
identifier = self.stack_create(template=self.root_template,
files=files,
expected_status='CREATE_IN_PROGRESS')
time.sleep(20)
self._stack_delete(identifier)

View File

@ -12,7 +12,7 @@ greenlet>=0.3.2 # MIT
keystoneauth1>=2.10.0 # Apache-2.0
keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0
lxml>=2.3 # BSD
netaddr!=0.7.16,>=0.7.12 # BSD
netaddr!=0.7.16,>=0.7.13 # BSD
oslo.cache>=1.5.0 # Apache-2.0
oslo.config>=3.14.0 # Apache-2.0
oslo.concurrency>=3.8.0 # Apache-2.0

View File

@ -8,7 +8,7 @@ setenv = VIRTUAL_ENV={envdir}
OS_TEST_PATH=heat/tests
TESTR_START_DIR=heat/tests
usedevelop = True
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/newton} {opts} {packages}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =