[APIv2]Consolidate cluster creation endpoints

Creation of a single cluster and creation of multiple clusters will now
share an API endpoint (in the APIv2 case). More specifically, the
original single-cluster endpoint will accept a `count` parameter in the
request and the multiple-cluster endpoint has been removed.

We can make this kind of change because APIv2 is still experimental.

Also, when creating multiple clusters, the response will now contain
all details about the clusters; previously, the response simply gave
cluster IDs.

Change-Id: I90faf4956a8ea4b4ae31a29382732771fdfddecb
Story: 2002099
Task: 19777
This commit is contained in:
Jeremy Freudberg 2018-06-04 15:26:18 -04:00
parent 4f074856b6
commit f96709a59e
6 changed files with 30 additions and 23 deletions

View File

@ -0,0 +1,5 @@
---
features:
- The experimental APIv2 supports simultaneous creation of multiple clusters
only through POST /v2/clusters (using the `count` parameter). The POST
/v2/clusters/multiple endpoint has been removed.

View File

@ -38,23 +38,17 @@ def clusters_list():
@rest.post('/clusters')
@acl.enforce("data-processing:clusters:create")
@v.validate(v_c_schema.CLUSTER_SCHEMA_V2, v_c.check_cluster_create)
@v.validate(v_c_schema.CLUSTER_SCHEMA_V2,
v_c.check_one_or_multiple_clusters_create)
def clusters_create(data):
# renaming hadoop_version -> plugin_version
# this can be removed once APIv1 is deprecated
data['hadoop_version'] = data['plugin_version']
del data['plugin_version']
return u.render(api.create_cluster(data).to_wrapped_dict())
@rest.post('/clusters/multiple')
@acl.enforce("data-processing:clusters:create")
@v.validate(
v_c_schema.MULTIPLE_CLUSTER_SCHEMA_V2, v_c.check_multiple_clusters_create)
def clusters_create_multiple(data):
data['hadoop_version'] = data['plugin_version']
del data['plugin_version']
return u.render(api.create_multiple_clusters(data))
if data.get('count', None) is not None:
return u.render(api.create_multiple_clusters(data))
else:
return u.render(api.create_cluster(data).to_wrapped_dict())
@rest.put('/clusters/<cluster_id>')

View File

@ -105,9 +105,9 @@ def create_multiple_clusters(values):
cluster_dict['name'] = get_multiple_cluster_name(num_of_clusters,
cluster_name,
counter + 1)
cluster = _cluster_create(cluster_dict, plugin)
cluster = _cluster_create(cluster_dict, plugin).to_wrapped_dict()
clusters.append(cluster.id)
clusters.append(cluster)
clusters_dict = {'clusters': clusters}
return clusters_dict

View File

@ -40,6 +40,13 @@ def check_multiple_clusters_create(data, **kwargs):
b.check_cluster_unique_name(cluster_name)
def check_one_or_multiple_clusters_create(data, **kwargs):
if data.get('count', None) is not None:
check_multiple_clusters_create(data, **kwargs)
else:
check_cluster_create(data, **kwargs)
def _check_cluster_create(data):
plugin_version = 'hadoop_version'
if data.get('plugin_version'):

View File

@ -46,6 +46,12 @@ def _build_cluster_schema(api_version='v1'):
"type": "string",
"format": "uuid",
}})
if api_version == 'v2':
cluster_schema['properties'].update({
"count": {
"type": "integer"
}})
return cluster_schema
@ -59,13 +65,6 @@ MULTIPLE_CLUSTER_SCHEMA['properties'].update({
}})
MULTIPLE_CLUSTER_SCHEMA['required'].append('count')
MULTIPLE_CLUSTER_SCHEMA_V2 = copy.deepcopy(CLUSTER_SCHEMA_V2)
MULTIPLE_CLUSTER_SCHEMA_V2['properties'].update({
"count": {
"type": "integer"
}})
MULTIPLE_CLUSTER_SCHEMA_V2['required'].append('count')
CLUSTER_UPDATE_SCHEMA = {
"type": "object",
"properties": {

View File

@ -123,8 +123,10 @@ class TestClusterApi(base.SaharaWithDbTestCase):
MULTIPLE_CLUSTERS['count'] = 2
clusters = api.create_multiple_clusters(MULTIPLE_CLUSTERS)
self.assertEqual(2, check_cluster.call_count)
result_cluster1 = api.get_cluster(clusters['clusters'][0])
result_cluster2 = api.get_cluster(clusters['clusters'][1])
result_cluster1 = api.get_cluster(
clusters['clusters'][0]['cluster']['id'])
result_cluster2 = api.get_cluster(
clusters['clusters'][1]['cluster']['id'])
self.assertEqual(c_u.CLUSTER_STATUS_ACTIVE, result_cluster1.status)
self.assertEqual(c_u.CLUSTER_STATUS_ACTIVE, result_cluster2.status)
expected_count = {