Add new graph time for variance of individual tests

This commit adds a new graph type which is used to show run time
variance for individual tests. It will get all the run times for each
test and then generate a box and whiskers (gag) plot for this data.

Change-Id: Ia44ea36486296fb0db705b1c55cd2338c204dc91
This commit is contained in:
Matthew Treinish 2015-10-02 14:59:12 -04:00
parent c7c7f83f45
commit 7f87b412bc
No known key found for this signature in database
GPG Key ID: FD12A0F214C9E177
5 changed files with 169 additions and 1 deletions

View File

@ -0,0 +1,8 @@
---
features:
- |
A new subunit2sql-graph type, `test_run_time`, is added. This will graph the
run time variance for the specified tests as box and wiskers plots.
- |
A new DB API function, get_run_times_all_test_runs(), was added. This will
return a all the individual durations for sucessful executions for tests.

View File

@ -24,6 +24,7 @@ import subunit2sql.analysis.failures
import subunit2sql.analysis.run_failure_rate
import subunit2sql.analysis.run_time
import subunit2sql.analysis.run_time_meta
import subunit2sql.analysis.test_run_time
from subunit2sql import shell
CONF = cfg.CONF
@ -57,7 +58,7 @@ def add_command_parsers(subparsers):
graph_commands = {}
# Put commands from in-tree commands on init list
for command in ['failures', 'run_time', 'agg_count', 'dailycount',
'run_failure_rate', 'run_time_meta']:
'run_failure_rate', 'run_time_meta', 'test_run_time']:
graph_commands[command] = getattr(subunit2sql.analysis, command)
# Load any installed out of tree commands on the init list

View File

@ -0,0 +1,66 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import matplotlib
import matplotlib.pyplot as plt
from oslo_config import cfg
import pandas as pd
from subunit2sql.db import api
CONF = cfg.CONF
matplotlib.style.use('ggplot')
def set_cli_opts(parser):
parser.add_argument('test_ids', nargs='*',
help='Test ids to graph the run time for, if none '
'are specified all tests will be used')
def generate_series():
session = api.get_session()
test_ids = None
if CONF.command.test_ids:
test_ids = CONF.command.test_ids
if CONF.start_date:
start_date = datetime.datetime.strptime(CONF.start_date, '%Y-%m-%d')
else:
start_date = None
if CONF.stop_date:
stop_date = datetime.datetime.strptime(CONF.stop_date, '%Y-%m-%d')
else:
stop_date = None
run_times = api.get_run_times_all_test_runs(tests=test_ids,
start_date=start_date,
stop_date=stop_date,
session=session)
df = pd.DataFrame(dict(
[(k, pd.Series(v)) for k, v in run_times.iteritems()]))
if not CONF.title:
title = "Run aggregate run time grouped by metadata"
else:
title = CONF.title
# NOTE(mtreinish): Decrease label font size for the worst case where we
# have tons of groups
matplotlib.rcParams['xtick.labelsize'] = '3'
plt.figure()
plt.title(title)
df.plot(kind='box', rot=90)
plt.ylabel('Time (sec.)')
plt.tight_layout()
plt.savefig(CONF.output, dpi=900)

View File

@ -1220,6 +1220,60 @@ def get_ids_for_all_tests(session=None):
return db_utils.model_query(models.Test, session).values(models.Test.id)
def get_run_times_all_test_runs(tests=None, start_date=None, stop_date=None,
session=None):
"""Return the all the individual duration times for each test_run
This function will return a dictionary where each key is a test_id and
the value is a list of all the durations for each run of that test
:param list tests: the list of test_ids to get results for, if none is
specified all tests
:param str start_date: The date to use as the start date for results
:param str stop_date: The date to use as the cutoff date for results
:param session: optional session object if one isn't provided a new session
will be acquired for the duration of this operation
:return run_times: all the durations for test_runs grouped by test_id
:rtype: dict
"""
session = session or get_session()
run_times_query = db_utils.model_query(models.TestRun, session).filter(
models.TestRun.status == 'success').join(
models.Test, models.TestRun.test_id == models.Test.id)
if tests:
run_times_query = run_times_query.filter(
models.Test.test_id.in_(tests))
if start_date:
run_times_query = run_times_query.filter(
models.TestRun.start_time >= start_date)
if stop_date:
run_times_query = run_times_query.filter(
models.TestRun.start_time <= stop_date)
run_times = run_times_query.values(models.Test.test_id,
models.TestRun.start_time,
models.TestRun.start_time_microsecond,
models.TestRun.stop_time,
models.TestRun.stop_time_microsecond)
run_times_dict = {}
for run_time in run_times:
test_id = run_time[0]
if run_time[1]:
start_time = run_time[1]
start_time = start_time.replace(microsecond=run_time[2])
else:
continue
if run_time[3]:
stop_time = run_time[3]
stop_time = stop_time.replace(microsecond=run_time[4])
duration = read_subunit.get_duration(start_time, stop_time)
if test_id in run_times_dict:
run_times_dict[test_id].append(duration)
else:
run_times_dict[test_id] = [duration]
return run_times_dict
def get_run_times_grouped_by_run_metadata_key(key, start_date=None,
stop_date=None, session=None,
match_key=None,

View File

@ -248,6 +248,45 @@ class TestDatabaseAPI(base.TestCase):
id_value = api.get_id_from_test_id('fake_test')
self.assertEqual(test_a.id, id_value)
def test_get_run_times_all_test_runs(self):
timestamp_a = datetime.datetime.utcnow()
timestamp_b = timestamp_a + datetime.timedelta(seconds=3)
run_a = api.create_run()
run_b = api.create_run()
test_a = api.create_test('test_a')
test_b = api.create_test('test_b')
api.create_test_run(test_a.id, run_a.id, 'success', timestamp_a,
timestamp_b)
api.create_test_run(test_a.id, run_b.id, 'success', timestamp_a,
timestamp_b)
api.create_test_run(test_b.id, run_b.id, 'success', timestamp_a,
timestamp_b)
res = api.get_run_times_all_test_runs()
expected_dict = {
'test_a': [3, 3],
'test_b': [3]
}
self.assertEqual(expected_dict, res)
def test_get_run_times_all_test_runs_with_tests_filter(self):
timestamp_a = datetime.datetime.utcnow()
timestamp_b = timestamp_a + datetime.timedelta(seconds=3)
run_a = api.create_run()
run_b = api.create_run()
test_a = api.create_test('test_a')
test_b = api.create_test('test_b')
api.create_test_run(test_a.id, run_a.id, 'success', timestamp_a,
timestamp_b)
api.create_test_run(test_a.id, run_b.id, 'success', timestamp_a,
timestamp_b)
api.create_test_run(test_b.id, run_b.id, 'success', timestamp_a,
timestamp_b)
res = api.get_run_times_all_test_runs(tests=['test_a'])
expected_dict = {
'test_a': [3, 3],
}
self.assertEqual(expected_dict, res)
def test_get_test_runs_by_run_id(self):
run_b = api.create_run()
run_a = api.create_run()