Add failed runs list to test page

This commit adds a new table to the per test page which shows all the
runs which have failed in the current view. This is useful for
debugging spikes in failures on the pass fail graph.

Change-Id: Idc8c5d41467ac0302a4e8a256e64af1bb73cae24
This commit is contained in:
Matthew Treinish 2016-08-08 15:23:42 -04:00
parent 1dd17aa6ef
commit e86564d6a0
No known key found for this signature in database
GPG Key ID: FD12A0F214C9E177
6 changed files with 149 additions and 17 deletions

View File

@ -133,6 +133,7 @@ function TestController(
{key: 'Run Time (sec.)', values: runTimeEntries, color: 'blue'},
{key: 'Avg. Run Time (sec.)', values: avgRunTimeEntries, color: 'black'}
];
vm.recentRuns = data.failed_runs;
};
vm.loadData = function() {

View File

@ -43,7 +43,10 @@ table.default-cols {
}
tbody {
td {
td.nowrap{
white-space: nowrap;
}
td:not(.nowrap) {
white-space: normal !important;
word-wrap: break-word;
max-width: 650px;

View File

@ -73,4 +73,50 @@
</div>
</div>
</div>
<div class="row">
<div class="col-lg-12">
<div class="panel panel-default">
<div class="panel-heading">
<h3 class="panel-title">Recent Failures</h3>
</div>
<div class="table-responsive">
<table table-sort data="testCtrl.recentRuns"
class="table table-hover default-cols">
<thead>
<tr>
<th sort-field="job_name">Job Name</th>
<th sort-field="provider">Node Provider</th>
<th sort-default sort-field="run_at">Run At</th>
<th sort-field="artifacts">Link</th>
<th sort-field="bugs">Likely Bugs</th>
</tr>
</thead>
<tbody>
<tr table-ref="table" ng-repeat="run in table.dataSorted">
<td><a ui-sref="job({ jobName: run.job_name })">{{ run.job_name }}</a></td>
<td>{{ run.provider }}</td>
<td class="nowrap">{{ run.run_at | date:'M/d/yyyy HH:mm' }}</td>
<td>
<a target="_blank" href="{{ run.artifacts }}">
{{ run.artifacts }}
<fa name="external-link"></fa>
</a>
</td>
<td>
<span ng-if="!!value.bugs">
<a ng-repeat="bug in value.bugs"
href="https://launchpad.net/bugs/{{bug}}"
uib-tooltip="Launchpad Bugs: {{bug}}"
target="_blank">{{bug}} <fa name="external-link"></fa>
</a>
</span>
<span ng-if="!value.bugs">-</span>
</td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
</div>
</div>

View File

@ -660,20 +660,95 @@ def get_test_runs_for_test(test_id):
' choice' % datetime_resolution)
status_code = 400
return abort(make_response(message, status_code))
with session_scope() as session:
db_test_runs = api.get_test_runs_by_test_test_id(test_id,
session=session,
start_date=start_date,
stop_date=stop_date)
if not db_test_runs:
# NOTE(mtreinish) if no data is returned from the DB just return an
# empty set response, the test_run_aggregator function assumes data
# is present.
return jsonify({'numeric': {}, 'data': {}})
test_runs =\
test_run_aggregator.convert_test_runs_list_to_time_series_dict(
db_test_runs, datetime_resolution)
return jsonify(test_runs)
bug_dict = {}
query_threads = []
def _populate_bug_dict(change_dict):
for run in change_dict:
change_num = change_dict[run]['change_num']
patch_num = change_dict[run]['patch_num']
short_uuid = change_dict[run]['short_uuid']
result = classifier.classify(change_num, patch_num,
short_uuid)
bug_dict[run] = result
@region.cache_on_arguments()
def _get_data(test_id, start_date, stop_date):
with session_scope() as session:
db_test_runs = api.get_test_runs_by_test_test_id(
test_id, session=session, start_date=start_date,
stop_date=stop_date)
if not db_test_runs:
# NOTE(mtreinish) if no data is returned from the DB just
# return an empty set response, the test_run_aggregator
# function assumes data is present.
return jsonify({'numeric': {}, 'data': {}, 'failed_runs': {}})
test_runs =\
test_run_aggregator.convert_test_runs_list_to_time_series_dict(
db_test_runs, datetime_resolution)
failed_run_ids = [
x.run_id for x in db_test_runs if x.status == 'fail']
failed_runs = api.get_runs_by_ids(failed_run_ids, session=session)
job_names = {}
providers = {}
failed_uuids = [x.uuid for x in failed_runs]
split_uuids = []
if len(failed_uuids) <= 10:
split_uuids = [[x] for x in failed_uuids]
else:
for i in range(0, len(failed_uuids), 10):
end = i + 10
split_uuids.append(failed_uuids[i:end])
for uuids in split_uuids:
change_dict = {}
for uuid in uuids:
metadata = api.get_run_metadata(uuid, session=session)
short_uuid = None
change_num = None
patch_num = None
for meta in metadata:
if meta.key == 'build_short_uuid':
short_uuid = meta.value
elif meta.key == 'build_change':
change_num = meta.value
elif meta.key == 'build_patchset':
patch_num = meta.value
elif meta.key == 'build_name':
job_names[uuid] = meta.value
elif meta.key == 'node_provider':
providers[uuid] = meta.value
# NOTE(mtreinish): If the required metadata fields
# aren't present skip ES lookup
if not short_uuid or not change_num or not patch_num:
continue
global classifier
if classifier:
change_dict[uuid] = {
'change_num': change_num,
'patch_num': patch_num,
'short_uuid': short_uuid,
}
query_thread = threading.Thread(
target=_populate_bug_dict, args=[change_dict])
query_threads.append(query_thread)
query_thread.start()
output = []
for thread in query_threads:
thread.join()
for run in failed_runs:
temp_run = {}
temp_run['provider'] = providers.get(run.uuid)
temp_run['job_name'] = job_names.get(run.uuid)
temp_run['run_at'] = run.run_at.isoformat()
temp_run['artifacts'] = run.artifacts
temp_run['bugs'] = bug_dict.get(run.uuid, [])
output.append(temp_run)
test_runs['failed_runs'] = output
return test_runs
results = _get_data(test_id, start_date, stop_date)
return jsonify(results)
def main():

View File

@ -748,6 +748,13 @@ class TestRestAPI(base.TestCase):
stop_time=timestamp_b,
stop_time_microsecond=0)])
def test_get_test_runs_for_test(self, api_mock):
setup_mock = mock.patch('openstack_health.api.setup')
setup_mock.start()
self.addCleanup(setup_mock.stop)
api.classifier = None
api.region = mock.MagicMock()
api.region.cache_on_arguments = mock.MagicMock()
api.region.cache_on_arguments.return_value = lambda x: x
res = self.app.get('/test_runs/fake.test.id')
self.assertEqual(200, res.status_code)
exp_result = {'data': {
@ -759,7 +766,7 @@ class TestRestAPI(base.TestCase):
'avg_run_time': numpy.NaN,
'run_time': 1.0,
'std_dev_run_time': numpy.NaN
}}
}}, 'failed_runs': []
}
response_data = json.loads(res.data.decode('utf-8'))
numpy.testing.assert_equal(exp_result, response_data)

View File

@ -5,7 +5,7 @@ pbr>=1.6 # Apache-2.0
Flask<1.0,>=0.11 # BSD
pymemcache >= 1.3.5 # Apache-2.0
dogpile.cache>=0.6.0 # BSD
subunit2sql>=1.6.0 # Apache-2.0
subunit2sql>=1.7.0 # Apache-2.0
SQLAlchemy<1.1.0,>=1.0.10 # MIT
flask-jsonpify>=1.5.0 # MIT
PyMySQL>=0.6.2 # MIT License