Add ability to grab stream processing variables from kwargs

This commit allows the 'run_at', 'artifacts', run_id',
'run_meta', and 'test_attr_prefix' to be passed in to the
process_results function, thus allowing for the programmatic
usage of subunit2sql without having to access and update
CONF for every run that gets processed.

Change-Id: Ie5db660a2b1a8162403b0475d13686e80b1b9cc6
This commit is contained in:
Megan Guiney 2018-02-09 12:00:35 -08:00
parent 02a25056c4
commit fe52f342cc
3 changed files with 100 additions and 10 deletions

View File

@ -0,0 +1,5 @@
---
features:
- add the ability to pass in the arguments 'run_at', 'artifacts', 'run_id',
'run_meta', and 'run_meta' from a programatic call, rather than from conf
or via command line.

View File

@ -14,6 +14,7 @@
# limitations under the License.
import copy
import datetime
import sys
from dateutil import parser as date_parser
@ -140,19 +141,46 @@ def _get_test_attrs_list(attrs):
return None
def process_results(results):
def _override_conf(value, value_name):
if (not value) and hasattr(CONF, value_name):
return CONF[value_name]
else:
return value
def process_results(results, run_at=None, artifacts=None, run_id=None,
run_meta=None, test_attr_prefix=None):
"""Insert converted subunit data into the database.
Allows for run-specific information to be passed in via kwargs,
checks CONF if no run-specific information is supplied.
:param results: subunit stream to be inserted
:param run_at: Optional time at which the run was started.
:param artifacts: Link to any artifacts from the test run.
:param run_id: The run id for the new run. Must be unique.
:param run_meta: Metadata corresponding to the new run.
:param test_attr_prefix: Optional test attribute prefix.
"""
run_at = _override_conf(run_at, 'run_at')
artifacts = _override_conf(artifacts, 'artifacts')
run_id = _override_conf(run_id, 'run_id')
run_meta = _override_conf(run_meta, 'run_meta')
test_attr_prefix = _override_conf(test_attr_prefix, 'test_attr_prefix')
if run_at:
if not isinstance(run_at, datetime.datetime):
run_at = date_parser.parse(run_at)
else:
run_at = None
session = api.get_session()
run_time = results.pop('run_time')
totals = get_run_totals(results)
if CONF.run_at:
run_at = date_parser.parse(CONF.run_at)
else:
run_at = None
db_run = api.create_run(totals['skips'], totals['fails'],
totals['success'], run_time, CONF.artifacts,
id=CONF.run_id, run_at=run_at, session=session)
if CONF.run_meta:
api.add_run_metadata(CONF.run_meta, db_run.id, session)
totals['success'], run_time, artifacts,
id=run_id, run_at=run_at, session=session)
if run_meta:
api.add_run_metadata(run_meta, db_run.id, session)
for test in results:
db_test = api.get_test_by_test_id(test, session)
if not db_test:
@ -181,7 +209,7 @@ def process_results(results):
results[test]['end_time'],
session)
if results[test]['metadata']:
if CONF.test_attr_prefix:
if test_attr_prefix:
attrs = results[test]['metadata'].get('attrs')
test_attr_list = _get_test_attrs_list(attrs)
test_metadata = api.get_test_metadata(db_test.id, session)
@ -204,6 +232,7 @@ def process_results(results):
api.add_test_run_attachments(results[test]['attachments'],
test_run.id, session)
session.close()
return db_run
def get_extensions():

View File

@ -246,6 +246,62 @@ class TestProcessResults(base.TestCase):
fixtures.MockPatch('subunit2sql.shell.get_run_totals',
return_value=self.fake_totals)).mock
def test_process_results_no_conf(self):
fake_run_time = 'run time'
# Setup a common fake DB test
fake_db_test = mock.Mock(name='db test')
fake_db_test.id = 'test id'
fake_db_test.run_count = 3
fake_db_test.success = 2
fake_db_test.failure = 1
# Setup results
fake_results = dict(test1={'status': 'success', 'start_time': 0,
'end_time': 1, 'metadata': None,
'attachments': None},
test2={'status': 'fail', 'start_time': 0,
'end_time': 2, 'metadata': None,
'attachments': None},
test3={'status': 'skip', 'start_time': 0,
'end_time': 3, 'metadata': None,
'attachments': None})
fake_results_cpy = copy.deepcopy(fake_results)
fake_results_cpy['run_time'] = fake_run_time
# create mocked resources to use in testing
fake_db_run_id = 'run id'
fake_db_run = mock.Mock(name='db run')
fake_db_run.id = fake_db_run_id
self.db_api_mock.create_run.return_value = fake_db_run
self.db_api_mock.get_test_by_test_id.return_value = fake_db_test
# define config objects to use in our function call
fake_run_at = '2018-04-03 00:01:00.000'
fake_artifacts = 'artifacts'
fake_run_id = 'run_id'
fake_run_meta = {'run_meta': 'value'}
# Run process_results
shell.process_results(fake_results_cpy,
run_at=fake_run_at,
artifacts=fake_artifacts,
run_id=fake_run_id,
run_meta=fake_run_meta)
# Check that we lookup all tests in the DB
expected_test_by_id_calls = [mock.call(x, self.fake_session) for x in
fake_results]
self.db_api_mock.get_test_by_test_id.assert_has_calls(
expected_test_by_id_calls, any_order=True)
# Check that a test_run for each test is created in the DB
expected_create_test_run_calls = [
mock.call(fake_db_test.id, fake_db_run_id,
fake_results[x]['status'],
fake_results[x]['start_time'],
fake_results[x]['end_time'], self.fake_session)
for x in fake_results]
self.db_api_mock.create_test_run.assert_has_calls(
expected_create_test_run_calls, any_order=True)
self.fake_session.close.assert_called_once()
def test_process_results_no_results(self):
fake_run_time = 'run time'
fake_results = dict(run_time=fake_run_time)