Implement subgraph (aka start/end) execution
Pass through subgraphs list for Astute execution. The format is a list of subgraphs with start/end. The option is passed as-is to Astute. Change-Id: I5c705076adcce4e47a62edc30d3d5a191f5ec813 Closes-bug: #1612616
This commit is contained in:
parent
7f0213610f
commit
901c9a540e
|
@ -318,6 +318,9 @@ class BaseDeploySelectedNodes(SelectedNodesBase):
|
|||
def get_graph_type(self):
|
||||
return web.input(graph_type=None).graph_type or None
|
||||
|
||||
def get_force(self):
|
||||
return utils.parse_bool(web.input(force='0').force)
|
||||
|
||||
def get_nodes(self, cluster):
|
||||
nodes_to_deploy = super(
|
||||
BaseDeploySelectedNodes, self).get_nodes(cluster)
|
||||
|
@ -346,13 +349,12 @@ class DeploySelectedNodes(BaseDeploySelectedNodes, RunMixin):
|
|||
* 404 (cluster or nodes not found in db)
|
||||
"""
|
||||
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
|
||||
force = utils.parse_bool(web.input(force='0').force)
|
||||
return self.handle_task(
|
||||
cluster=cluster,
|
||||
graph_type=self.get_graph_type(),
|
||||
force=force,
|
||||
dry_run=self.get_dry_run(),
|
||||
noop_run=self.get_noop_run()
|
||||
noop_run=self.get_noop_run(),
|
||||
force=self.get_force()
|
||||
)
|
||||
|
||||
|
||||
|
@ -371,7 +373,6 @@ class DeploySelectedNodesWithTasks(BaseDeploySelectedNodes, RunMixin):
|
|||
* 404 (cluster or nodes not found in db)
|
||||
"""
|
||||
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
|
||||
force = utils.parse_bool(web.input(force='0').force)
|
||||
|
||||
data = self.checked_data(
|
||||
self.validator.validate_deployment,
|
||||
|
@ -381,9 +382,9 @@ class DeploySelectedNodesWithTasks(BaseDeploySelectedNodes, RunMixin):
|
|||
cluster,
|
||||
deployment_tasks=data,
|
||||
graph_type=self.get_graph_type(),
|
||||
force=force,
|
||||
dry_run=self.get_dry_run(),
|
||||
noop_run=self.get_noop_run()
|
||||
noop_run=self.get_noop_run(),
|
||||
force=self.get_force()
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -107,6 +107,20 @@ GRAPH_EXECUTE_PARAMS_SCHEMA = {
|
|||
},
|
||||
"debug": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"subgraphs": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"parameters": {
|
||||
"start": {
|
||||
"type": "string"
|
||||
},
|
||||
"end": {
|
||||
"type:": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -106,3 +106,57 @@ class TestGraphExecutorHandler(base.BaseIntegrationTest):
|
|||
'api_version': '1'
|
||||
}]
|
||||
)
|
||||
|
||||
@mock.patch('nailgun.transactions.manager.rpc')
|
||||
def test_execute_w_subgraph(self, rpc_mock):
|
||||
resp = self.app.post(
|
||||
reverse('GraphsExecutorHandler'),
|
||||
params=jsonutils.dumps(
|
||||
{
|
||||
"cluster": self.cluster.id,
|
||||
"graphs": [{"type": "test_graph"}],
|
||||
"debug": True,
|
||||
"noop_run": True,
|
||||
"dry_run": True,
|
||||
"subgraphs": [{"start": "primary-database",
|
||||
"end": "keystone-db"
|
||||
}
|
||||
]
|
||||
}
|
||||
),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
task = objects.Task.get_by_uid(resp.json_body['id'])
|
||||
sub_task = task.subtasks[0]
|
||||
self.expected_metadata['subgraphs'] = [
|
||||
{"start": "primary-database",
|
||||
"end": "keystone-db"}
|
||||
]
|
||||
rpc_mock.cast.assert_called_once_with(
|
||||
'naily',
|
||||
[{
|
||||
'args': {
|
||||
'tasks_metadata': self.expected_metadata,
|
||||
'task_uuid': sub_task.uuid,
|
||||
'tasks_graph': {
|
||||
None: [],
|
||||
self.cluster.nodes[0].uid: [
|
||||
{
|
||||
'id': 'test_task',
|
||||
'type': 'puppet',
|
||||
'fail_on_error': True,
|
||||
'parameters': {'cwd': '/'}
|
||||
},
|
||||
]
|
||||
},
|
||||
'tasks_directory': {},
|
||||
'dry_run': True,
|
||||
'noop_run': True,
|
||||
'debug': True
|
||||
},
|
||||
'respond_to': 'transaction_resp',
|
||||
'method': 'task_deploy',
|
||||
'api_version': '1'
|
||||
}]
|
||||
)
|
||||
|
|
|
@ -23,7 +23,6 @@ from nailgun import errors
|
|||
from nailgun import objects
|
||||
from nailgun.objects import DeploymentGraph
|
||||
from nailgun.orchestrator.task_based_deployment import TaskProcessor
|
||||
|
||||
from nailgun.test.base import BaseIntegrationTest
|
||||
from nailgun.test.base import mock_rpc
|
||||
from nailgun.utils import reverse
|
||||
|
@ -215,7 +214,13 @@ class BaseSelectedNodesTest(BaseIntegrationTest):
|
|||
|
||||
def check_deployment_call_made(self, nodes_uids, mcast):
|
||||
args, kwargs = mcast.call_args
|
||||
deployed_uids = [n['uid'] for n in args[1]['args']['deployment_info']]
|
||||
if objects.Release.is_lcm_supported(self.cluster.release):
|
||||
deployed_uids = list(args[1]['args']['tasks_graph'])
|
||||
deployed_uids.remove('master')
|
||||
deployed_uids.remove(None)
|
||||
else:
|
||||
deployed_uids = [n['uid'] for n in
|
||||
args[1]['args']['deployment_info']]
|
||||
self.assertEqual(len(nodes_uids), len(deployed_uids))
|
||||
self.assertItemsEqual(nodes_uids, deployed_uids)
|
||||
|
||||
|
|
|
@ -61,6 +61,9 @@ def make_astute_message(transaction, context, graph, node_resolver):
|
|||
'failed': _get_node_attributes(graph, 'on_error'),
|
||||
'stopped': _get_node_attributes(graph, 'on_stop')
|
||||
}
|
||||
subgraphs = transaction.cache.get('subgraphs')
|
||||
if subgraphs:
|
||||
metadata['subgraphs'] = subgraphs
|
||||
objects.DeploymentHistoryCollection.create(transaction, tasks)
|
||||
|
||||
return {
|
||||
|
@ -125,7 +128,7 @@ class TransactionsManager(object):
|
|||
self.cluster_id = cluster_id
|
||||
|
||||
def execute(self, graphs, dry_run=False, noop_run=False, force=False,
|
||||
debug=False):
|
||||
debug=False, subgraphs=None):
|
||||
"""Start a new transaction with a given parameters.
|
||||
|
||||
Under the hood starting a new transaction means serialize a lot of
|
||||
|
@ -185,6 +188,7 @@ class TransactionsManager(object):
|
|||
cache['noop_run'] = noop_run
|
||||
cache['dry_run'] = dry_run
|
||||
cache['debug'] = debug
|
||||
cache['subgraphs'] = subgraphs
|
||||
|
||||
transaction.create_subtask(
|
||||
self.task_name,
|
||||
|
|
Loading…
Reference in New Issue