Add run time graph for jobs

Motivation for this change:
  By adding the run time graph to the job's page it will raise
  awareness to users of changes in run times among jobs.
  By adding the scatter graph to the job's page it can raise awareness
  of the run time deviation.

New behavior:
  A linear and scatter chart called Job Run Time is available in the jobs
  view. It gathers run_time data from the job_data objects provided by
  the timedelta in the response of /runs/key/<key>/<value> limited
  by the values in the resolution dropdown.
  The original response of this API call is wrapped in a 'data' property
  and the additional information to support drawing the scatter chart on
  the canvas is added to a new dict property called 'numeric'.

NOTE:
  * The methods in run_aggregator were derived from [1] and [2]
  * An experiment was done to see which implementation would
    result in the lowest load time and the results for the job
    'tempest-full' for 2 weeks and 1 month periods are as follows:
    - latest patch with 1 db call
        2.21s - 2 weeks
        3.40s - 1 month

    - older patch with 2 db calls
        7.86s - 2 weeks
        10.16s - 1 month

completes queens priority "Job duration graph in o-h" from:
  https://etherpad.openstack.org/p/qa-queens-priorities

[1] https://review.openstack.org/#/c/370913/4
[2] 4db9a61471/openstack_health/test_run_aggregator.py (L70-L90)
Change-Id: Ib5196d86b6b5efa0083d4aa4dd28f1fac3493560
This commit is contained in:
mccasland, trevor (tm2086) 2018-04-11 11:46:15 -05:00
parent e4dff96029
commit 741a457f72
9 changed files with 517 additions and 149 deletions

View File

@ -137,7 +137,7 @@ function GroupedRunsController(
stop_date: viewService.periodEnd(),
datetime_resolution: viewService.resolution().key
}).then(function(response) {
vm.processData(response.data, vm.searchJob);
vm.processData(response.data.data, vm.searchJob);
vm.loaded = true;
});
healthService.getRecentGroupedRuns(vm.runMetadataKey, vm.name).then(function(response) {

View File

@ -16,6 +16,7 @@ function JobController(
vm.name = decodeURIComponent(jobName);
vm.recentRuns = [];
vm.loaded = false;
vm.runTimeLoaded = false;
vm.hold = 0;
pageTitleService.update('Job: ' + vm.name);
@ -152,6 +153,45 @@ function JobController(
});
};
vm.processRunData = function(data) {
if (!data.data.hasOwnProperty('timedelta')) {
return;
}
var jobName = data.data.timedelta[0].job_data[0].job_name;
var jobRunTimeEntries = [];
var jobMeanRunTimeEntries = [];
var runTimeObjs = data.numeric[jobName];
var runTimeAvgObjs = data.numeric[jobName + '-avg'];
for (var date in runTimeObjs) {
// parse dates and create data series
if (runTimeObjs.hasOwnProperty(date)) {
var runTime = runTimeObjs[date];
date = new Date(date).getTime();
if (!isNaN(runTime)) {
jobRunTimeEntries.push({
x: date,
y: parseFloat(runTime)
});
}
}
}
for (var date in runTimeAvgObjs) {
// parse dates and create data series
if (runTimeAvgObjs.hasOwnProperty(date)) {
var runTimeAvg = runTimeAvgObjs[date];
date = new Date(date).getTime();
if (!isNaN(runTimeAvg)) {
jobMeanRunTimeEntries.push({
x: date,
y: parseFloat(runTimeAvg)
});
}
}
}
vm.jobMeanRunTimeEntries = jobMeanRunTimeEntries;
vm.jobRunTimeEntries = jobRunTimeEntries;
};
vm.loadData = function() {
if (vm.hold > 0) {
return;
@ -165,6 +205,14 @@ function JobController(
vm.processData(response.data, vm.searchTest);
vm.loaded = true;
});
healthService.getRunsForRunMetadataKey ('build_name', vm.name, {
start_date: viewService.periodStart(),
stop_date: viewService.periodEnd(),
datetime_resolution: viewService.resolution().key
}).then(function(response) {
vm.processRunData(response.data);
vm.runTimeLoaded = true;
});
healthService.getRecentGroupedRuns('build_name', vm.name).then(function(response) {
vm.recentRuns = response.data;
});
@ -181,7 +229,7 @@ function JobController(
});
$scope.$on('view:period', function(event, corrected) {
if (vm.loaded && !corrected) {
if (vm.loaded && vm.runTimeLoaded && !corrected) {
vm.loadData();
}
});

View File

@ -14,6 +14,45 @@
<fresh-check></fresh-check>
</div>
</div>
<div class="row">
<div class="col-lg-12">
<div class="panel panel-default">
<div class="panel-heading">
<h3 class="panel-title">Job Run Time</h3>
</div>
<div class="panel-body">
<chart width="100%" height="250px">
<chart-axis name="x" type="time" path=".x" opposes="y"
align="bottom" orient="horizontal" draw="true"
granular-format="%x %X"></chart-axis>
<chart-axis name="y" type="linear" path=".y" opposes="x"
align="left" orient="vertical" draw="true" grid="true"
granular-format=".1f| sec"></chart-axis>
<chart-dataset name="jobmeanruntime"
title="Mean Run Time"
data="job.jobMeanRunTimeEntries"></chart-dataset>
<chart-canvas-line dataset="jobmeanruntime"
axes="x y"
stroke="black"
line-width="2"></chart-canvas-line>
<chart-dataset name="jobruntime"
title="Run Time"
data="job.jobRunTimeEntries"></chart-dataset>
<chart-canvas-scatter dataset="jobruntime"
axes="x y"
fill="rgba(0,0,255,0.5)"
radius="2"></chart-canvas-scatter>
<chart-tooltip primary="x" secondary="y"></chart-tooltip>
</chart>
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-lg-12">
<div class="panel panel-default">

View File

@ -18,6 +18,7 @@ from contextlib import contextmanager
import datetime
from dateutil import parser as date_parser
import itertools
import numpy
import os
from six.moves import configparser as ConfigParser
from six.moves.urllib import parse
@ -40,7 +41,7 @@ from sqlalchemy.orm import sessionmaker
from subunit2sql.db import api
from openstack_health import distributed_dbm
from openstack_health.run_aggregator import RunAggregator
from openstack_health import run_aggregator
from openstack_health import test_run_aggregator
try:
@ -253,7 +254,8 @@ def get_runs_grouped_by_metadata_per_datetime(key):
return ('Datetime resolution: %s, is not a valid'
' choice' % datetime_resolution), 400
runs = RunAggregator(sec_runs).aggregate(datetime_resolution)
runs = run_aggregator.RunAggregator(sec_runs).aggregate(
datetime_resolution)
return jsonify({'runs': runs})
@ -371,17 +373,34 @@ def get_runs_by_run_metadata_key(run_metadata_key, value):
start_date,
stop_date,
session))
# prepare run_times to be consumed for producing 'numeric' data.
run_times = {}
for run_at, run_data in runs.items():
for run in run_data:
if run['fail'] > 0 or run['pass'] == 0:
continue
build_name = run['metadata']['build_name']
if run_at in run_times:
run_times[run_at][build_name].append(run['run_time'])
else:
run_times[run_at] = {build_name: [run['run_time']]}
# if there is more than one run with the same run_at time
# and build_name just average the results.
for run_at, run_time_data in run_times.items():
for build_name, times in run_time_data.items():
run_times[run_at][build_name] = numpy.mean(times)
numeric = run_aggregator.get_numeric_data(
run_times, datetime_resolution)
# Groups runs by metadata
group_by = "build_name"
runs_by_build_name = _group_runs_by_key(runs, group_by)
# Group runs by the chosen data_range.
# That does not apply when you choose 'sec' since runs are already
# grouped by it.
aggregated_runs = \
RunAggregator(runs_by_build_name).aggregate(datetime_resolution)
return jsonify(_aggregate_runs(aggregated_runs))
aggregated_runs = run_aggregator.RunAggregator(
runs_by_build_name).aggregate(datetime_resolution)
data = _aggregate_runs(aggregated_runs)
return jsonify({'numeric': numeric, 'data': data})
@app.route('/runs/key/<path:run_metadata_key>/<path:value>/recent',

View File

@ -11,14 +11,44 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from dateutil import parser
import pandas as pd
from openstack_health.base_aggregator import BaseAggregator
from openstack_health import base_aggregator as base
class RunAggregator(BaseAggregator):
def format_output_numeric_dicts(df):
numeric_dict = {}
temp_dict = df.to_dict()
for build_name in temp_dict:
numeric_dict[build_name] = {}
for run_at in temp_dict[build_name]:
numeric_dict[build_name][
run_at.isoformat()] = temp_dict[build_name][run_at]
return numeric_dict
def get_numeric_data(run_times_time_series, sample_rate):
temp_dict = {}
sample_rate = base.resample_matrix[sample_rate]
for run_at, run in run_times_time_series.items():
build_name, run_time = list(run.items())[0]
if build_name in temp_dict:
temp_dict[build_name][run_at] = run_time
else:
temp_dict[build_name] = {run_at: run_time}
df = pd.DataFrame(temp_dict)
numeric_df = df.resample(sample_rate).mean()
temp_numeric_df = numeric_df.interpolate(method='time', limit=10)
for build_name in numeric_df:
numeric_df[build_name + '-avg'] = temp_numeric_df[build_name].rolling(
10).mean()
numeric_df = numeric_df.dropna(how='all', axis=1)
return format_output_numeric_dicts(numeric_df)
class RunAggregator(base.BaseAggregator):
def __init__(self, runs):
self.runs = runs

View File

@ -421,23 +421,23 @@ class TestRestAPI(base.TestCase):
self.assertEqual(200, res.status_code)
expected_response_data = {'timedelta': [
{'datetime': timestamp_s1.isoformat(),
'job_data': [{'pass': 0,
'fail': 1,
'job_name': 'value-1',
'mean_run_time': 1.0}]},
{'datetime': timestamp_s2.isoformat(),
'job_data': [{'pass': 0,
'fail': 1,
'job_name': 'value-2',
'mean_run_time': 2.0}]},
{'datetime': timestamp_s3.isoformat(),
'job_data': [{'pass': 1,
'fail': 0,
'job_name': 'value-3',
'mean_run_time': 3.0}]},
]}
expected_response_data = {u'data': {u'timedelta': [
{u'datetime': u'%s' % timestamp_s1.isoformat(),
u'job_data': [{u'pass': 0,
u'fail': 1,
u'job_name': u'value-1',
u'mean_run_time': 1.0}]},
{u'datetime': u'%s' % timestamp_s2.isoformat(),
u'job_data': [{u'pass': 0,
u'fail': 1,
u'job_name': u'value-2',
u'mean_run_time': 2.0}]},
{u'datetime': u'%s' % timestamp_s3.isoformat(),
u'job_data': [{u'pass': 1,
u'fail': 0,
u'job_name': u'value-3',
u'mean_run_time': 3.0}]},
]}, u'numeric': {u'value-3': {u'%s' % timestamp_s3.isoformat(): 3.0}}}
response_data = json.loads(res.data.decode('utf-8'))
self.assertEqual(expected_response_data, response_data)
@ -466,7 +466,7 @@ class TestRestAPI(base.TestCase):
'skip': 0,
'id': 'abc3',
'run_time': 3.0,
'metadata': {'build_name': 'value-3'}}],
'metadata': {'build_name': 'value-3'}}]
})
def test_get_runs_by_project_resolution_min(self, api_mock):
query = 'datetime_resolution=min'
@ -475,23 +475,23 @@ class TestRestAPI(base.TestCase):
self.assertEqual(200, res.status_code)
expected_response_data = {'timedelta': [
{'datetime': timestamp_m1.isoformat(),
'job_data': [{'pass': 0,
'fail': 1,
'job_name': 'value-1',
'mean_run_time': 1.0}]},
{'datetime': timestamp_m2.isoformat(),
'job_data': [{'pass': 0,
'fail': 1,
'job_name': 'value-2',
'mean_run_time': 2.0}]},
{'datetime': timestamp_m3.isoformat(),
'job_data': [{'pass': 1,
'fail': 0,
'job_name': 'value-3',
'mean_run_time': 3.0}]},
]}
expected_response_data = {u'data': {u'timedelta': [
{u'datetime': u'%s' % timestamp_m1.isoformat(),
u'job_data': [{u'pass': 0,
u'fail': 1,
u'job_name': u'value-1',
u'mean_run_time': 1.0}]},
{u'datetime': u'%s' % timestamp_m2.isoformat(),
u'job_data': [{'pass': 0,
u'fail': 1,
u'job_name': u'value-2',
u'mean_run_time': 2.0}]},
{u'datetime': u'%s' % timestamp_m3.isoformat(),
u'job_data': [{u'pass': 1,
u'fail': 0,
u'job_name': u'value-3',
u'mean_run_time': 3.0}]},
]}, u'numeric': {u'value-3': {u'%s' % timestamp_m3.isoformat(): 3.0}}}
response_data = json.loads(res.data.decode('utf-8'))
self.assertEqual(expected_response_data, response_data)
@ -520,7 +520,7 @@ class TestRestAPI(base.TestCase):
'skip': 0,
'id': 'abc3',
'run_time': 3.0,
'metadata': {'build_name': 'value-3'}}],
'metadata': {'build_name': 'value-3'}}]
})
def test_get_runs_by_project_resolution_hour(self, api_mock):
query = 'datetime_resolution=hour'
@ -529,23 +529,23 @@ class TestRestAPI(base.TestCase):
self.assertEqual(200, res.status_code)
expected_response_data = {'timedelta': [
{'datetime': timestamp_h1.isoformat(),
'job_data': [{'pass': 0,
'fail': 1,
'job_name': 'value-1',
'mean_run_time': 1.0}]},
{'datetime': timestamp_h2.isoformat(),
'job_data': [{'pass': 0,
'fail': 1,
'job_name': 'value-2',
'mean_run_time': 2.0}]},
{'datetime': timestamp_h3.isoformat(),
'job_data': [{'pass': 1,
'fail': 0,
'job_name': 'value-3',
'mean_run_time': 3.0}]},
]}
expected_response_data = {u'data': {u'timedelta': [
{u'datetime': u'%s' % timestamp_h1.isoformat(),
u'job_data': [{u'pass': 0,
u'fail': 1,
u'job_name': u'value-1',
u'mean_run_time': 1.0}]},
{u'datetime': u'%s' % timestamp_h2.isoformat(),
u'job_data': [{u'pass': 0,
u'fail': 1,
u'job_name': u'value-2',
u'mean_run_time': 2.0}]},
{u'datetime': u'%s' % timestamp_h3.isoformat(),
u'job_data': [{u'pass': 1,
u'fail': 0,
u'job_name': u'value-3',
u'mean_run_time': 3.0}]},
]}, u'numeric': {u'value-3': {u'%s' % timestamp_h3.isoformat(): 3.0}}}
response_data = json.loads(res.data.decode('utf-8'))
self.assertEqual(expected_response_data, response_data)
@ -589,26 +589,37 @@ class TestRestAPI(base.TestCase):
self.assertEqual(200, res.status_code)
expected_response_data = {'timedelta': [
{'datetime': timestamp_d1.date().isoformat(),
'job_data': [{'pass': 1,
'fail': 1,
'job_name': 'value-1',
'mean_run_time': 5.0},
{'pass': 0,
'fail': 1,
'job_name': 'value-2',
'mean_run_time': 2.0},
]},
{'datetime': timestamp_d2.date().isoformat(),
'job_data': [{'pass': 1,
'fail': 0,
'job_name': 'value-3',
'mean_run_time': 20.0},
]}
]}
expected_response_data = {u'data': {u'timedelta': [
{u'datetime': u'%s' % timestamp_d1.date().isoformat(),
u'job_data': [{u'pass': 1,
u'fail': 1,
u'job_name': u'value-1',
u'mean_run_time': 5.0},
{u'pass': 0,
u'fail': 1,
u'job_name': 'value-2',
u'mean_run_time': 2.0}]},
{u'datetime': u'%s' % timestamp_d2.date().isoformat(),
u'job_data': [{u'pass': 1,
u'fail': 0,
u'job_name': u'value-3',
u'mean_run_time': 20.0}]}
]}, u'numeric': {u'value-1': {u'%s' % timestamp_d1.isoformat(): 9.0,
u'%s' % timestamp_d2.isoformat():
numpy.NaN},
u'value-3': {u'%s' % timestamp_d1.isoformat():
numpy.NaN,
u'%s' % timestamp_d2.isoformat():
20.0}}}
response_data = json.loads(res.data.decode('utf-8'))
# numpy.NaN == numpy.NaN result is False, a key error here means the
# dicts are not equal
for project, item in list(expected_response_data['numeric'].items()):
for date, run_time in list(item.items()):
if (numpy.isnan(run_time) and
numpy.isnan(response_data['numeric'][project][date])):
del expected_response_data['numeric'][project][date]
del response_data['numeric'][project][date]
self.assertEqual(expected_response_data, response_data)
api_mock.assert_called_once_with('project',
'openstack/trove',
@ -653,26 +664,36 @@ class TestRestAPI(base.TestCase):
self.assertEqual(200, res.status_code)
expected_response_data = {'timedelta': [
{'datetime': timestamp_d1.date().isoformat(),
'job_data': [{'pass': 1,
'fail': 1,
'job_name': 'value-1',
'mean_run_time': 5.0},
{'pass': 0,
'fail': 1,
'job_name': 'value-2',
'mean_run_time': 2.0},
]},
{'datetime': timestamp_d2.date().isoformat(),
'job_data': [{'pass': 1,
'fail': 0,
'job_name': 'value-3',
'mean_run_time': 20.0},
]}
]}
expected_response_data = {u'data': {u'timedelta': [
{u'datetime': u'%s' % timestamp_d1.date().isoformat(),
u'job_data': [{u'pass': 1,
u'fail': 1,
u'job_name': u'value-1',
u'mean_run_time': 5.0},
{u'pass': 0,
u'fail': 1,
u'job_name': u'value-2',
u'mean_run_time': 2.0}]},
{u'datetime': u'%s' % timestamp_d2.date().isoformat(),
u'job_data': [{u'pass': 1,
u'fail': 0,
u'job_name': u'value-3',
u'mean_run_time': 20.0}]}
]}, u'numeric': {u'value-1': {u'%s' % timestamp_d1.isoformat(): 9.0,
u'%s' % timestamp_d2.isoformat():
numpy.NaN},
u'value-3': {u'%s' % timestamp_d1.isoformat():
numpy.NaN,
u'%s' % timestamp_d2.isoformat(): 20.0}}}
response_data = json.loads(res.data.decode('utf-8'))
# numpy.NaN == numpy.NaN result is False, a key error here means the
# dicts are not equal
for project, item in expected_response_data['numeric'].items():
for date, run_time in list(item.items()):
if (numpy.isnan(run_time) and
numpy.isnan(response_data['numeric'][project][date])):
del expected_response_data['numeric'][project][date]
del response_data['numeric'][project][date]
self.assertEqual(expected_response_data, response_data)
api_mock.assert_called_once_with('project',
'openstack/trove',
@ -687,6 +708,68 @@ class TestRestAPI(base.TestCase):
self.assertEqual('Datetime resolution: century, is not a valid choice',
res.data.decode('utf-8'))
@mock.patch('subunit2sql.db.api.get_time_series_runs_by_key_value',
return_value={
timestamp_d1: [{'pass': 1,
'fail': 0,
'skip': 0,
'id': 'abc1',
'run_time': 4.0,
'metadata': {
'build_name':
'tempest-dsvm-neutron-full'}},
{'pass': 10,
'fail': 1,
'skip': 0,
'id': 'abc1',
'run_time': 9.0,
'metadata': {
'build_name':
'tempest-dsvm-neutron-full'}},
{'pass': 2,
'fail': 0,
'skip': 0,
'id': 'abc2',
'run_time': 2.0,
'metadata': {
'build_name':
'tempest-dsvm-neutron-full'}}],
timestamp_d2: [{'pass': 100,
'fail': 0,
'skip': 0,
'id': 'abc3',
'run_time': 20.0,
'metadata': {
'build_name':
'tempest-dsvm-neutron-full'}}]
})
def test_get_runs_by_build_name_and_same_run_at_times(self, api_mock):
start_date = timestamp_d1.date().isoformat()
stop_date = timestamp_d2.date().isoformat()
query = ('datetime_resolution=day&start_date={0}&stop_date={1}'
.format(start_date, stop_date))
res = self.app.get('/runs/key/build_name/tempest-dsvm-neutron-full?{0}'
.format(query))
self.assertEqual(200, res.status_code)
expected_response_data = {u'data': {u'timedelta': [
{u'datetime': u'%s' % timestamp_d1.date().isoformat(),
u'job_data': [{u'pass': 2,
u'fail': 1,
u'job_name': u'tempest-dsvm-neutron-full',
u'mean_run_time': 5.0}]},
{u'datetime': u'%s' % timestamp_d2.date().isoformat(),
u'job_data': [{u'pass': 1,
u'fail': 0,
u'job_name': u'tempest-dsvm-neutron-full',
u'mean_run_time': 20.0}]}]},
u'numeric': {
u'tempest-dsvm-neutron-full': {
u'%s' % timestamp_d1.isoformat(): 3.0,
u'%s' % timestamp_d2.isoformat(): 20.0}}}
response_data = json.loads(res.data.decode('utf-8'))
self.maxDiff = None
self.assertDictEqual(expected_response_data, response_data)
@mock.patch('openstack_health.api._check_db_availability',
return_value=False)
@mock.patch('openstack_health.api._check_er_availability',

View File

@ -13,11 +13,103 @@
# under the License.
import datetime
import numpy as np
from openstack_health.run_aggregator import RunAggregator
from openstack_health import run_aggregator
from openstack_health.tests import base
class TestRunAggregatorGetNumericData(base.TestCase):
def setUp(self):
super(TestRunAggregatorGetNumericData, self).setUp()
self.runs = {
datetime.datetime(2018, 6, 22, 1, 22, 45): {
'tempest-dsvm-neutron-full': 4495.36
},
datetime.datetime(2018, 6, 20, 17, 34, 30): {
'tempest-dsvm-neutron-full': 4133.03
},
datetime.datetime(2018, 6, 25, 1, 13, 22): {
'tempest-dsvm-neutron-full': 6047.95
},
datetime.datetime(2018, 6, 22, 21, 44, 44): {
'tempest-dsvm-neutron-full': 6689.23
},
datetime.datetime(2018, 6, 18, 5, 48, 36): {
'tempest-dsvm-neutron-full': 4183.85
},
datetime.datetime(2018, 6, 23, 3, 44, 18): {
'tempest-dsvm-neutron-full': 6150.95
},
datetime.datetime(2018, 6, 19, 10, 7, 35): {
'tempest-dsvm-neutron-full': 4545.41
},
datetime.datetime(2018, 6, 13, 7, 52, 34): {
'tempest-dsvm-neutron-full': 5651.39
},
datetime.datetime(2018, 6, 14, 6, 18, 18): {
'tempest-dsvm-neutron-full': 4307.42
},
datetime.datetime(2018, 6, 13, 13, 44, 25): {
'tempest-dsvm-neutron-full': 5131.0
},
datetime.datetime(2018, 6, 14, 3, 52, 24): {
'tempest-dsvm-neutron-full': 5228.78
}
}
def test_get_numeric_data(self):
self.maxDiff = None
expected = {
'tempest-dsvm-neutron-full': {
'2018-06-13T00:00:00': 5391.195,
'2018-06-14T00:00:00': 4768.1,
'2018-06-15T00:00:00': np.nan,
'2018-06-16T00:00:00': np.nan,
'2018-06-17T00:00:00': np.nan,
'2018-06-18T00:00:00': 4183.85,
'2018-06-19T00:00:00': 4545.41,
'2018-06-20T00:00:00': 4133.03,
'2018-06-21T00:00:00': np.nan,
'2018-06-22T00:00:00': 5592.295,
'2018-06-23T00:00:00': 6150.95,
'2018-06-24T00:00:00': np.nan,
'2018-06-25T00:00:00': 6047.95
},
'tempest-dsvm-neutron-full-avg': {
'2018-06-13T00:00:00': np.nan,
'2018-06-14T00:00:00': np.nan,
'2018-06-15T00:00:00': np.nan,
'2018-06-16T00:00:00': np.nan,
'2018-06-17T00:00:00': np.nan,
'2018-06-18T00:00:00': np.nan,
'2018-06-19T00:00:00': np.nan,
'2018-06-20T00:00:00': np.nan,
'2018-06-21T00:00:00': np.nan,
'2018-06-22T00:00:00': 4690.44675,
'2018-06-23T00:00:00': 4766.42225,
'2018-06-24T00:00:00': 4899.55725,
'2018-06-25T00:00:00': 5042.148499999999
}
}
actual = run_aggregator.get_numeric_data(self.runs, 'day')
self.assertItemsEqual(expected, actual)
self.assertItemsEqual(
expected['tempest-dsvm-neutron-full'].keys(),
actual['tempest-dsvm-neutron-full'].keys())
self.assertItemsEqual(
expected['tempest-dsvm-neutron-full-avg'].keys(),
actual['tempest-dsvm-neutron-full-avg'].keys())
# np.nan == np.nan is False, remove the key entries with np.nan values,
# if a key error is thrown then expected does not equal actual.
for key in expected:
for date, run_time in list(expected[key].items()):
if np.isnan(run_time) and np.isnan(actual[key][date]):
del actual[key][date]
del expected[key][date]
self.assertDictEqual(expected, actual)
class TestRunAggregator(base.TestCase):
def setUp(self):
super(TestRunAggregator, self).setUp()
@ -38,7 +130,7 @@ class TestRunAggregator(base.TestCase):
}
def test_that_runs_will_be_aggregated_by_seconds_and_project(self):
aggregator = RunAggregator(self.runs)
aggregator = run_aggregator.RunAggregator(self.runs)
aggregated_runs = aggregator.aggregate(datetime_resolution='sec')
expected_response = {
@ -70,7 +162,7 @@ class TestRunAggregator(base.TestCase):
self.assertItemsEqual(expected_response, aggregated_runs)
def test_that_runs_will_be_aggregated_by_minute_and_project(self):
aggregator = RunAggregator(self.runs)
aggregator = run_aggregator.RunAggregator(self.runs)
aggregated_runs = aggregator.aggregate(datetime_resolution='min')
expected_response = {
@ -87,7 +179,7 @@ class TestRunAggregator(base.TestCase):
self.assertItemsEqual(expected_response, aggregated_runs)
def test_that_runs_will_be_aggregated_by_hour_and_project(self):
aggregator = RunAggregator(self.runs)
aggregator = run_aggregator.RunAggregator(self.runs)
aggregated_runs = aggregator.aggregate(datetime_resolution='hour')
expected_response = {
@ -104,7 +196,7 @@ class TestRunAggregator(base.TestCase):
self.assertItemsEqual(expected_response, aggregated_runs)
def test_that_runs_will_be_aggregated_by_day_and_project(self):
aggregator = RunAggregator(self.runs)
aggregator = run_aggregator.RunAggregator(self.runs)
aggregated_runs = aggregator.aggregate(datetime_resolution='day')
expected_response = {

View File

@ -33,43 +33,45 @@ describe('GroupedRunsController', function() {
function mockHealthService() {
var expectedResponse = {
timedelta: [
{
datetime: '2014-11-19T00:00:00.000Z',
job_data: [
{
fail: 1,
job_name: 'gate-grenade-dsvm',
mean_run_time: 1154.6675000000002,
pass: 27
},
{
fail: 0,
job_name: 'gate-tempest-dsvm-full',
mean_run_time: 4366.415384615385,
pass: 13
},
{
fail: 0,
job_name: 'gate-tempest-dsvm-neutron-full',
mean_run_time: 5170.95,
pass: 12
},
{
fail: 1,
job_name: 'gate-tempest-dsvm-neutron-heat-slow',
mean_run_time: 273.05544444444445,
pass: 17
},
{
fail: 0,
job_name: 'gate-tempest-dsvm-postgres-full',
mean_run_time: 4439.482857142857,
pass: 14
}
]
}
]
data: {
timedelta: [
{
datetime: '2014-11-19T00:00:00.000Z',
job_data: [
{
fail: 1,
job_name: 'gate-grenade-dsvm',
mean_run_time: 1154.6675000000002,
pass: 27
},
{
fail: 0,
job_name: 'gate-tempest-dsvm-full',
mean_run_time: 4366.415384615385,
pass: 13
},
{
fail: 0,
job_name: 'gate-tempest-dsvm-neutron-full',
mean_run_time: 5170.95,
pass: 12
},
{
fail: 1,
job_name: 'gate-tempest-dsvm-neutron-heat-slow',
mean_run_time: 273.05544444444445,
pass: 17
},
{
fail: 0,
job_name: 'gate-tempest-dsvm-postgres-full',
mean_run_time: 4439.482857142857,
pass: 14
}
]
}
]
}
};
var endpoint = API_ROOT +

View File

@ -34,7 +34,7 @@ describe('JobController', function() {
}));
function mockHealthService() {
var expectedResponse = {
var testRunsResponse = {
tests: {
'2014-11-19T01:00:00.000Z': {
'tempest.api.compute.admin.test_fixed_ips:FixedIPsTestJson.test_list_fixed_ip_details': {
@ -58,14 +58,69 @@ describe('JobController', function() {
}
}
};
var endpoint = API_ROOT +
var endpointTestRuns = API_ROOT +
'/build_name/gate-tempest-dsvm-neutron-full/test_runs?' +
'callback=JSON_CALLBACK&' +
'datetime_resolution=hour&' +
'start_date=' + DEFAULT_START_DATE.toISOString() + '&' +
'stop_date=' + DEFAULT_END_DATE.toISOString();
$httpBackend.expectJSONP(endpoint).respond(200, expectedResponse);
$httpBackend.expectJSONP(endpointTestRuns).respond(200, testRunsResponse);
var runMetaResponse = {
data: {
timedelta: [
{
'datetime': '2015-10-23T20:00:00',
'job_data': [
{
'fail': 0,
'job_name': 'gate-tempest-dsvm-neutron-src-taskflow',
'mean_run_time': 4859.3,
'pass': 1
}
]
},
{
'datetime': '2015-11-10T23:00:00',
'job_data': [
{
'fail': 0,
'job_name': 'gate-tempest-dsvm-neutron-src-taskflow',
'mean_run_time': 6231.47,
'pass': 1
}
]
}
]
},
numeric: {
'tempest-dsvm-neutron-full': {
'2015-10-23T20:00:00': 4859.3,
'2015-10-23T21:00:00': NaN,
'2015-10-23T22:00:00': NaN,
'2015-10-23T23:00:00': NaN,
'2015-11-10T23:00:00': 6231.47
},
'tempest-dsvm-neutron-full-avg': {
'2015-10-23T20:00:00': 4859.3,
'2015-10-23T21:00:00': NaN,
'2015-10-23T22:00:00': NaN,
'2015-10-23T23:00:00': NaN,
'2015-11-10T23:00:00': 6231.47
}
}
};
var endpoint = API_ROOT +
'/runs/key/build_name/gate-tempest-dsvm-neutron-full?' +
'callback=JSON_CALLBACK&' +
'datetime_resolution=hour&' +
'start_date=' + DEFAULT_START_DATE.toISOString() + '&' +
'stop_date=' + DEFAULT_END_DATE.toISOString();
$httpBackend.expectJSONP(endpoint).respond(200, runMetaResponse);
var recentResponse = [
{
@ -102,6 +157,7 @@ describe('JobController', function() {
'/runs/key/build_name/gate-tempest-dsvm-neutron-full/recent?callback=JSON_CALLBACK';
$httpBackend.expectJSONP(endpointRecent)
.respond(200, recentResponse);
}
function mockConfigService() {
@ -119,7 +175,6 @@ describe('JobController', function() {
viewService: viewService
});
$httpBackend.flush();
expect(jobController.passes).toEqual([{
x: 1416358800000,
y: 52