Add REST API endpoint for second page view

This commit adds a rest api endpoint for the second page view. It
takes in a key value pair from the url and returns a time series dict
of all the runs which had that key value pair in it's respective run
metadata. The datetime resolution, start date and stop date are all
adjustable with url parameters.

The second page view will use this with the key being project and the
value being whatever project the page is being generated for.

Co-Authored-By: Glauco Oliveira <gvinici@thoughtworks.com>
Co-Authored-By: Moises Trovó <mtrovo@thoughtworks.com>

Change-Id: I7837073c9029014e03b2faca642f77f997ebdf82
This commit is contained in:
Matthew Treinish 2015-09-22 00:37:08 -04:00 committed by Glauco Oliveira
parent ab3d3e7621
commit 4cc00e8999
3 changed files with 430 additions and 3 deletions

View File

@ -15,14 +15,19 @@
import ConfigParser
from dateutil import parser as date_parser
import itertools
import sys
import flask
from flask import abort
from flask.ext.jsonpify import jsonify
from flask import make_response
from operator import itemgetter
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from subunit2sql.db import api
app = flask.Flask(__name__)
engine = None
Session = None
@ -47,7 +52,7 @@ def _filter_by_date_res(date_res, sec_runs):
corr_res = run.replace(minute=0, second=0, microsecond=0)
elif date_res == 'day':
corr_res = run.date()
# Vuild runs dict with correct resolution
# Build runs dict with correct resolution
if corr_res in runs:
for local_run in sec_runs[run]:
if runs[corr_res].get(local_run, None):
@ -94,6 +99,53 @@ def get_runs_grouped_by_metadata_per_datetime(key):
return jsonify({'runs': out_runs})
def _group_runs_by_key(runs_by_time, groupby_key):
"""
Groups runs by a key.
This function assumes that your runs are already grouped by time.
"""
keyfunc = lambda c: c['metadata'][groupby_key]
grouped_runs_by = {}
for timestamp, runs_by_time in runs_by_time.iteritems():
if timestamp not in grouped_runs_by:
grouped_runs_by[timestamp] = {}
for key, val in itertools.groupby(runs_by_time, keyfunc):
grouped_runs_by[timestamp][key] = list(val)
return grouped_runs_by
def _get_runs_for_key_value_grouped_by(key, value, groupby_key,
start_date=None, stop_date=None,
date_range=None):
if date_range not in ['sec', 'min', 'hour', 'day']:
return ('Datetime resolution: %s, is not a valid'
' choice' % date_range), 400
global Session
session = Session()
runs_by_time = api.get_time_series_runs_by_key_value(key,
value,
start_date,
stop_date,
session)
# Groups runs by metadata
runs_by_groupby_key = _group_runs_by_key(runs_by_time, groupby_key)
# Group runs by the chosen data_range.
# That does not apply when you choose 'sec' since runs are already grouped
# by it.
if date_range != 'sec':
runs_by_groupby_key = _filter_by_date_res(date_range,
runs_by_groupby_key)
out_runs = {}
for run in runs_by_groupby_key:
out_runs[run.isoformat()] = runs_by_groupby_key[run]
return out_runs, 200
@app.route('/runs', methods=['GET'])
def get_runs():
global Session
@ -107,6 +159,69 @@ def get_runs():
return jsonify({'runs': runs})
def _calc_amount_of_successful_runs(runs):
"""
Calculates the amount of successful runs.
If there were any failures, then the whole run failed.
If there were no failures, then the whole run succeeded.
"""
was_run_successful = lambda x: 1 if x['fail'] == 0 else 0
successful_runs = map(was_run_successful, runs)
return sum(successful_runs)
def _calc_amount_of_failed_runs(runs, amount_of_success_runs):
"""
Calculates the amount of failed runs.
It simply subtracts the amount of runs by the amount of successful ones.
"""
total_runs = len(runs)
return total_runs - amount_of_success_runs
def _aggregate_runs(runs_by_time_delta):
aggregated_runs = []
for time in runs_by_time_delta:
runs_by_job_name = runs_by_time_delta[time]
job_data = []
for job_name in runs_by_job_name:
runs = runs_by_job_name[job_name]
amount_of_success = _calc_amount_of_successful_runs(runs)
amount_of_failures = _calc_amount_of_failed_runs(runs,
amount_of_success)
avg_runtime = sum(map(itemgetter('run_time'), runs)) / len(runs)
job_data.append({'fail': amount_of_failures,
'pass': amount_of_success,
'mean_run_time': avg_runtime,
'job_name': job_name})
runs_by_time = dict(datetime=time)
runs_by_time['job_data'] = sorted(job_data, key=itemgetter('job_name'))
aggregated_runs.append(runs_by_time)
aggregated_runs.sort(key=itemgetter('datetime'))
return dict(timedelta=aggregated_runs)
@app.route('/projects/<path:project>/runs', methods=['GET'])
def get_runs_by_project(project):
start_date = flask.request.args.get('start_date', None)
stop_date = flask.request.args.get('stop_date', None)
date_range = flask.request.args.get('datetime_resolution', 'day')
filter_by_project = "project"
group_by_build_name = "build_name"
runs_by_time, err = _get_runs_for_key_value_grouped_by(filter_by_project,
project,
group_by_build_name,
start_date,
stop_date,
date_range)
if err != 200:
return abort(make_response(runs_by_time, err))
return jsonify(_aggregate_runs(runs_by_time))
@app.route('/run/<string:run_id>/tests', methods=['GET'])
def get_tests_from_run(run_id):
global Session

View File

@ -24,8 +24,24 @@ from openstack_health.tests import base
timestamp_a = datetime.datetime(1914, 8, 26, 20, 0, 0)
timestamp_b = datetime.datetime(1914, 8, 26, 20, 0, 1)
timestamp_s1 = datetime.datetime(1914, 8, 26, 20, 0, 0)
timestamp_s2 = datetime.datetime(1914, 8, 26, 20, 0, 1)
timestamp_s3 = datetime.datetime(1914, 8, 26, 20, 0, 2)
timestamp_m1 = datetime.datetime(1914, 8, 26, 20, 0, 0)
timestamp_m2 = datetime.datetime(1914, 8, 26, 20, 1, 0)
timestamp_m3 = datetime.datetime(1914, 8, 26, 20, 2, 0)
timestamp_h1 = datetime.datetime(1914, 8, 26, 20, 0, 0)
timestamp_h2 = datetime.datetime(1914, 8, 26, 21, 0, 0)
timestamp_h3 = datetime.datetime(1914, 8, 26, 22, 0, 0)
timestamp_d1 = datetime.datetime(1914, 8, 26, 20, 0, 1)
timestamp_d2 = datetime.datetime(1914, 8, 27, 20, 0, 1)
class TestRestAPI(base.TestCase):
def setUp(self):
super(TestRestAPI, self).setUp()
api.app.config['TESTING'] = True
@ -138,8 +154,8 @@ class TestRestAPI(base.TestCase):
'metadata': {
'attrs': 'ab,cd',
'tags': 'worker-1',
}
}
}
})
def test_get_run_test_runs(self, api_mock):
api.Session = mock.MagicMock()
@ -285,3 +301,299 @@ class TestRestAPI(base.TestCase):
self.assertEqual(res.status_code, 400)
self.assertEqual('Datetime resolution: century, is not a valid choice',
res.data)
@mock.patch('subunit2sql.db.api.get_time_series_runs_by_key_value',
return_value={
timestamp_s1: [{'pass': 1,
'fail': 1,
'skip': 0,
'id': 'abc1',
'run_time': 1.0,
'metadata': {'build_name': 'value-1'}}],
timestamp_s2: [{'pass': 1,
'fail': 1,
'skip': 0,
'id': 'abc2',
'run_time': 2.0,
'metadata': {'build_name': 'value-2'}}],
timestamp_s3: [{'pass': 1,
'fail': 0,
'skip': 0,
'id': 'abc3',
'run_time': 3.0,
'metadata': {'build_name': 'value-3'}}],
})
def test_get_runs_by_project_resolution_sec(self, api_mock):
api.Session = mock.MagicMock()
query = 'datetime_resolution=sec'
res = self.app.get('/projects/openstack/trove/runs?{0}'.format(query))
self.assertEqual(200, res.status_code)
expected_response_data = {'timedelta': [
{'datetime': timestamp_s1.isoformat(),
'job_data': [{'pass': 0,
'fail': 1,
'job_name': 'value-1',
'mean_run_time': 1.0}]},
{'datetime': timestamp_s2.isoformat(),
'job_data': [{'pass': 0,
'fail': 1,
'job_name': 'value-2',
'mean_run_time': 2.0}]},
{'datetime': timestamp_s3.isoformat(),
'job_data': [{'pass': 1,
'fail': 0,
'job_name': 'value-3',
'mean_run_time': 3.0}]},
]}
response_data = json.loads(res.data)
self.assertEqual(expected_response_data, response_data)
api_mock.assert_called_once_with('project',
'openstack/trove',
None,
None,
api.Session())
@mock.patch('subunit2sql.db.api.get_time_series_runs_by_key_value',
return_value={
timestamp_m1: [{'pass': 1,
'fail': 1,
'skip': 0,
'id': 'abc1',
'run_time': 1.0,
'metadata': {'build_name': 'value-1'}}],
timestamp_m2: [{'pass': 1,
'fail': 1,
'skip': 0,
'id': 'abc2',
'run_time': 2.0,
'metadata': {'build_name': 'value-2'}}],
timestamp_m3: [{'pass': 1,
'fail': 0,
'skip': 0,
'id': 'abc3',
'run_time': 3.0,
'metadata': {'build_name': 'value-3'}}],
})
def test_get_runs_by_project_resolution_min(self, api_mock):
api.Session = mock.MagicMock()
query = 'datetime_resolution=min'
res = self.app.get('/projects/openstack/trove/runs?{0}'.format(query))
self.assertEqual(200, res.status_code)
expected_response_data = {'timedelta': [
{'datetime': timestamp_m1.isoformat(),
'job_data': [{'pass': 0,
'fail': 1,
'job_name': 'value-1',
'mean_run_time': 1.0}]},
{'datetime': timestamp_m2.isoformat(),
'job_data': [{'pass': 0,
'fail': 1,
'job_name': 'value-2',
'mean_run_time': 2.0}]},
{'datetime': timestamp_m3.isoformat(),
'job_data': [{'pass': 1,
'fail': 0,
'job_name': 'value-3',
'mean_run_time': 3.0}]},
]}
response_data = json.loads(res.data)
self.assertEqual(expected_response_data, response_data)
api_mock.assert_called_once_with('project',
'openstack/trove',
None,
None,
api.Session())
@mock.patch('subunit2sql.db.api.get_time_series_runs_by_key_value',
return_value={
timestamp_h1: [{'pass': 1,
'fail': 1,
'skip': 0,
'id': 'abc1',
'run_time': 1.0,
'metadata': {'build_name': 'value-1'}}],
timestamp_h2: [{'pass': 1,
'fail': 1,
'skip': 0,
'id': 'abc2',
'run_time': 2.0,
'metadata': {'build_name': 'value-2'}}],
timestamp_h3: [{'pass': 1,
'fail': 0,
'skip': 0,
'id': 'abc3',
'run_time': 3.0,
'metadata': {'build_name': 'value-3'}}],
})
def test_get_runs_by_project_resolution_hour(self, api_mock):
api.Session = mock.MagicMock()
query = 'datetime_resolution=hour'
res = self.app.get('/projects/openstack/trove/runs?{0}'.format(query))
self.assertEqual(200, res.status_code)
expected_response_data = {'timedelta': [
{'datetime': timestamp_h1.isoformat(),
'job_data': [{'pass': 0,
'fail': 1,
'job_name': 'value-1',
'mean_run_time': 1.0}]},
{'datetime': timestamp_h2.isoformat(),
'job_data': [{'pass': 0,
'fail': 1,
'job_name': 'value-2',
'mean_run_time': 2.0}]},
{'datetime': timestamp_h3.isoformat(),
'job_data': [{'pass': 1,
'fail': 0,
'job_name': 'value-3',
'mean_run_time': 3.0}]},
]}
response_data = json.loads(res.data)
self.assertEqual(expected_response_data, response_data)
api_mock.assert_called_once_with('project',
'openstack/trove',
None,
None,
api.Session())
@mock.patch('subunit2sql.db.api.get_time_series_runs_by_key_value',
return_value={
timestamp_d1: [{'pass': 1,
'fail': 1,
'skip': 0,
'id': 'abc1',
'run_time': 1.0,
'metadata': {'build_name': 'value-1'}},
{'pass': 10,
'fail': 0,
'skip': 0,
'id': 'abc1',
'run_time': 9.0,
'metadata': {'build_name': 'value-1'}},
{'pass': 2,
'fail': 2,
'skip': 0,
'id': 'abc2',
'run_time': 2.0,
'metadata': {'build_name': 'value-2'}}],
timestamp_d2: [{'pass': 100,
'fail': 0,
'skip': 0,
'id': 'abc3',
'run_time': 20.0,
'metadata': {'build_name': 'value-3'}}]
})
def test_get_runs_by_project_resolution_day(self, api_mock):
api.Session = mock.MagicMock()
query = 'datetime_resolution=day'
res = self.app.get('/projects/openstack/trove/runs?{0}'.format(query))
self.assertEqual(200, res.status_code)
expected_response_data = {'timedelta': [
{'datetime': timestamp_d1.date().isoformat(),
'job_data': [{'pass': 1,
'fail': 1,
'job_name': 'value-1',
'mean_run_time': 5.0},
{'pass': 0,
'fail': 1,
'job_name': 'value-2',
'mean_run_time': 2.0},
]},
{'datetime': timestamp_d2.date().isoformat(),
'job_data': [{'pass': 1,
'fail': 0,
'job_name': 'value-3',
'mean_run_time': 20.0},
]}
]}
response_data = json.loads(res.data)
self.assertEqual(expected_response_data, response_data)
api_mock.assert_called_once_with('project',
'openstack/trove',
None,
None,
api.Session())
@mock.patch('subunit2sql.db.api.get_time_series_runs_by_key_value',
return_value={
timestamp_d1: [{'pass': 1,
'fail': 1,
'skip': 0,
'id': 'abc1',
'run_time': 1.0,
'metadata': {'build_name': 'value-1'}},
{'pass': 10,
'fail': 0,
'skip': 0,
'id': 'abc1',
'run_time': 9.0,
'metadata': {'build_name': 'value-1'}},
{'pass': 2,
'fail': 2,
'skip': 0,
'id': 'abc2',
'run_time': 2.0,
'metadata': {'build_name': 'value-2'}}],
timestamp_d2: [{'pass': 100,
'fail': 0,
'skip': 0,
'id': 'abc3',
'run_time': 20.0,
'metadata': {'build_name': 'value-3'}}]
})
def test_get_runs_by_project_by_start_and_end_date(self, api_mock):
api.Session = mock.MagicMock()
start_date = timestamp_d1.date().isoformat()
stop_date = timestamp_d2.date().isoformat()
query = ('datetime_resolution=day&start_date={0}&stop_date={1}'
.format(start_date, stop_date))
res = self.app.get('/projects/openstack/trove/runs?{0}'.format(query))
self.assertEqual(200, res.status_code)
expected_response_data = {'timedelta': [
{'datetime': timestamp_d1.date().isoformat(),
'job_data': [{'pass': 1,
'fail': 1,
'job_name': 'value-1',
'mean_run_time': 5.0},
{'pass': 0,
'fail': 1,
'job_name': 'value-2',
'mean_run_time': 2.0},
]},
{'datetime': timestamp_d2.date().isoformat(),
'job_data': [{'pass': 1,
'fail': 0,
'job_name': 'value-3',
'mean_run_time': 20.0},
]}
]}
response_data = json.loads(res.data)
self.assertEqual(expected_response_data, response_data)
api_mock.assert_called_once_with('project',
'openstack/trove',
start_date,
stop_date,
api.Session())
def test_get_runs_by_project_invalid_resolution(self):
api.Session = mock.MagicMock()
res = self.app.get(
'/projects/openstack/trove/runs?datetime_resolution=century')
self.assertEqual(res.status_code, 400)
self.assertEqual('Datetime resolution: century, is not a valid choice',
res.data)

View File

@ -3,7 +3,7 @@
# process, which may cause wedges in the gate later.
pbr<2.0,>=1.6
flask
subunit2sql>=0.8.0
subunit2sql>=0.11.0
sqlalchemy
flask-jsonpify
PyMySQL>=0.6.2