Fix for timeouts on scale down
There were two issues: The action timeout and the wait_for_messages
timeout.
The wait_for_messages did not need a timeout here and it was causing the
bug. I added an cli option for the timeout and setup a reasonable
default (the previous default was over 4 hours that would invalidate
keystone session duration)
Change-Id: I232d071516d3f53f9f87143d9d65ebe3e58b0f38
Closes-Bug: 1734712
(cherry picked from commit 715324adf9
)
This commit is contained in:
parent
15db65eabc
commit
25170c68c8
|
@ -46,3 +46,4 @@ PUPPET_MODULES = "/etc/puppet/modules/"
|
|||
PUPPET_BASE = "/etc/puppet/"
|
||||
# Update Queue
|
||||
UPDATE_QUEUE = 'update'
|
||||
STACK_TIMEOUT = 240
|
||||
|
|
|
@ -56,7 +56,7 @@ class TestDeleteNode(fakes.TestDeleteNode):
|
|||
# probably be fixed so that it can pass with that.
|
||||
def test_node_delete(self):
|
||||
argslist = ['instance1', 'instance2', '--templates',
|
||||
'--stack', 'overcast']
|
||||
'--stack', 'overcast', '--timeout', '90']
|
||||
verifylist = [
|
||||
('stack', 'overcast'),
|
||||
('nodes', ['instance1', 'instance2'])
|
||||
|
@ -78,7 +78,8 @@ class TestDeleteNode(fakes.TestDeleteNode):
|
|||
workflow_input={
|
||||
'container': 'overcast',
|
||||
'queue_name': 'UUID4',
|
||||
'nodes': ['instance1', 'instance2']
|
||||
'nodes': ['instance1', 'instance2'],
|
||||
'timeout': 90
|
||||
})
|
||||
|
||||
def test_node_wrong_stack(self):
|
||||
|
@ -122,7 +123,8 @@ class TestDeleteNode(fakes.TestDeleteNode):
|
|||
workflow_input={
|
||||
'container': 'overcloud',
|
||||
'queue_name': 'UUID4',
|
||||
'nodes': ['instance1', ]
|
||||
'nodes': ['instance1', ],
|
||||
'timeout': 240
|
||||
})
|
||||
|
||||
def test_node_delete_wrong_instance(self):
|
||||
|
@ -150,7 +152,8 @@ class TestDeleteNode(fakes.TestDeleteNode):
|
|||
workflow_input={
|
||||
'container': 'overcloud',
|
||||
'queue_name': 'UUID4',
|
||||
'nodes': ['wrong_instance', ]
|
||||
'nodes': ['wrong_instance', ],
|
||||
'timeout': 240
|
||||
})
|
||||
|
||||
|
||||
|
|
|
@ -61,6 +61,13 @@ class DeleteNode(command.Command):
|
|||
"Otherwise this argument will be silently ignored."),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--timeout', metavar='<TIMEOUT>',
|
||||
type=int, default=constants.STACK_TIMEOUT, dest='timeout',
|
||||
help=_("Timeout in minutes to wait for the nodes to be deleted. "
|
||||
"Keep in mind that due to keystone session duration "
|
||||
"that timeout has an upper bound of 4 hours ")
|
||||
)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
|
@ -78,7 +85,12 @@ class DeleteNode(command.Command):
|
|||
print("Deleting the following nodes from stack {stack}:\n{nodes}"
|
||||
.format(stack=stack.stack_name, nodes=nodes))
|
||||
|
||||
scale.scale_down(clients, stack.stack_name, parsed_args.nodes)
|
||||
scale.scale_down(
|
||||
clients,
|
||||
stack.stack_name,
|
||||
parsed_args.nodes,
|
||||
parsed_args.timeout
|
||||
)
|
||||
|
||||
|
||||
class ProvideNode(command.Command):
|
||||
|
|
|
@ -33,8 +33,7 @@ def delete_node(clients, **workflow_input):
|
|||
workflow_input=workflow_input
|
||||
)
|
||||
|
||||
for payload in base.wait_for_messages(workflow_client, ws, execution,
|
||||
360):
|
||||
for payload in base.wait_for_messages(workflow_client, ws, execution):
|
||||
if payload['status'] != "SUCCESS":
|
||||
raise InvalidConfiguration(payload['message'])
|
||||
|
||||
|
|
Loading…
Reference in New Issue