Merged common/unused files with regular runner and small bug fixes

Change-Id: I039522788bb4b171111ef904f519e73af8a1f56e
This commit is contained in:
Nathan Buckner 2015-05-13 18:06:41 -05:00
parent 8c234e2f3f
commit 02a97f1c2a
15 changed files with 277 additions and 1044 deletions

View File

@ -11,9 +11,13 @@
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from traceback import print_exc
from warnings import warn
import argparse
import os
from warnings import warn
import sys
from cafe.common.reporting.cclogging import \
get_object_namespace, getLogger, setup_new_cchandler, log_info_block
from cafe.common.reporting.metrics import \
@ -185,3 +189,27 @@ def print_mug(name, brewing_from):
print(border)
print(mug)
print(border)
def print_exception(file_=None, method=None, value=None, exception=None):
"""
Prints exceptions in a standard format to stderr.
"""
print("{0}".format("=" * 70), file=sys.stderr)
if file_:
print("{0}:".format(file_), file=sys.stderr, end=" ")
if method:
print("{0}:".format(method), file=sys.stderr, end=" ")
if value:
print("{0}:".format(value), file=sys.stderr, end=" ")
if exception:
print("{0}:".format(exception), file=sys.stderr, end=" ")
print("\n{0}".format("-" * 70), file=sys.stderr)
if exception is not None:
print_exc(file=sys.stderr)
print(file=sys.stderr)
def get_error(exception=None):
"""Gets errno from exception or returns one"""
return getattr(exception, "errno", 1)

View File

@ -17,10 +17,11 @@ import argparse
import errno
import importlib
import os
import re
import sys
from cafe.configurator.managers import EngineConfigManager
from cafe.drivers.unittest.common import print_exception, get_error
from cafe.drivers.base import print_exception, get_error
from cafe.engine.config import EngineConfig
@ -140,6 +141,22 @@ class TagAction(argparse.Action):
setattr(namespace, self.dest, values)
class RegexAction(argparse.Action):
"""
Processes regex option.
"""
def __call__(self, parser, namespace, values, option_string=None):
regex_list = []
for regex in values:
try:
regex_list.append(re.compile(regex))
except re.error as exception:
parser.error(
"RegexAction: Invalid regex {0} reason: {1}".format(
regex, exception))
setattr(namespace, self.dest, regex_list)
class VerboseAction(argparse.Action):
"""
Custom action that sets VERBOSE environment variable.
@ -158,7 +175,7 @@ class ArgumentParser(argparse.ArgumentParser):
usage_string = """
cafe-runner <config> <testrepos>... [--fail-fast]
[--supress-load-tests] [--dry-run]
[--data-directory=DATA_DIRECTORY] [--dotpath-regex=REGEX...]
[--data-directory=DATA_DIRECTORY] [--regex-list=REGEX...]
[--file] [--parallel=(class|test)] [--result=(json|xml)]
[--result-directory=RESULT_DIRECTORY] [--tags=TAG...]
[--verbose=VERBOSE]
@ -222,11 +239,15 @@ class ArgumentParser(argparse.ArgumentParser):
help="Data directory override")
self.add_argument(
"--dotpath-regex", "-d",
"--regex-list", "-d",
action=RegexAction,
nargs="+",
default=[],
metavar="REGEX",
help="Package Filter")
help="Filter by regex against dotpath down to test level"
"Example: tests.repo.cafe_tests.NoDataGenerator.test_fail"
"Example: 'NoDataGenerator\.*fail'"
"Takes in a list and matches on any")
self.add_argument(
"--file", "-F",

View File

@ -12,20 +12,21 @@
# under the License.
from itertools import product
import json
from string import ascii_letters, digits
import json
ALLOWED_FIRST_CHAR = "_{0}".format(ascii_letters)
ALLOWED_OTHER_CHARS = "{0}{1}".format(ALLOWED_FIRST_CHAR, digits)
class _Dataset(object):
"""Defines a set of data to be used as input for a data driven test.
data_dict should be a dictionary with keys matching the keyword
arguments defined in test method that consumes the dataset.
name should be a string describing the dataset.
This class should not be accessed directly. Use or extend DatasetList.
"""
def __init__(self, name, data_dict, tags=None):
"""Defines a set of data to be used as input for a data driven test.
data_dict should be a dictionary with keys matching the keyword
arguments defined in test method that consumes the dataset.
name should be a string describing the dataset.
"""
self.name = name
self.data = data_dict
self.metadata = {'tags': tags or []}
@ -58,7 +59,6 @@ class DatasetList(list):
raise TypeError(
"extend() argument must be type DatasetList, not {0}".format(
type(dataset_list)))
super(DatasetList, self).extend(dataset_list)
def extend_new_datasets(self, dataset_list):
@ -66,19 +66,17 @@ class DatasetList(list):
self.extend(dataset_list)
def apply_test_tags(self, *tags):
"""Applys tags to all tests in dataset list"""
for dataset in self:
dataset.apply_test_tags(tags)
def dataset_names(self):
"""Gets a list of dataset names from dataset list"""
return [ds.name for ds in self]
def dataset_name_map(self):
name_map = {}
count = 0
for ds in self:
name_map[count] = ds.name
count += 1
return name_map
"""Creates a dictionary with key=count and value=dataset name"""
return {count: ds.name for count, ds in enumerate(self)}
def merge_dataset_tags(self, *dataset_lists):
local_name_map = self.dataset_name_map()
@ -118,11 +116,14 @@ class DatasetListCombiner(DatasetList):
"""
def __init__(self, *datasets):
for data in product(*datasets):
super(DatasetListCombiner, self).__init__()
for dataset_list in product(*datasets):
tmp_dic = {}
[tmp_dic.update(d.data) for d in data]
self.append_new_dataset(
"_".join([x.name for x in data]), tmp_dic)
names = []
for dataset in dataset_list:
tmp_dic.update(dataset.data)
names.append(dataset.name)
self.append_new_dataset("_".join(names), tmp_dic)
class DatasetGenerator(DatasetList):
@ -133,6 +134,7 @@ class DatasetGenerator(DatasetList):
"""
def __init__(self, list_of_dicts, base_dataset_name=None):
super(DatasetGenerator, self).__init__()
count = 0
for kwdict in list_of_dicts:
test_name = "{0}_{1}".format(base_dataset_name or "dataset", count)
@ -146,6 +148,7 @@ class TestMultiplier(DatasetList):
"""
def __init__(self, num_range):
super(TestMultiplier, self).__init__()
for num in range(num_range):
name = "{0}".format(num)
self.append_new_dataset(name, dict())
@ -161,6 +164,7 @@ class DatasetFileLoader(DatasetList):
load order, so that not all datasets need to be named.
"""
def __init__(self, file_object):
super(DatasetFileLoader, self).__init__()
content = json.loads(str(file_object.read()))
count = 0
for dataset in content:

View File

@ -10,33 +10,31 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import re
import six
from six.moves import zip_longest
from importlib import import_module
from types import FunctionType
from unittest import TestCase
from warnings import warn, simplefilter
import inspect
import re
from cafe.common.reporting import cclogging
from cafe.drivers.unittest.datasets import DatasetList
TAGS_DECORATOR_TAG_LIST_NAME = "__test_tags__"
TAGS_DECORATOR_ATTR_DICT_NAME = "__test_attrs__"
DATA_DRIVEN_TEST_ATTR = "__data_driven_test_data__"
DATA_DRIVEN_TEST_PREFIX = "ddtest_"
TAGS_DECORATOR_ATTR_DICT_NAME = "__test_attrs__"
TAGS_DECORATOR_TAG_LIST_NAME = "__test_tags__"
PARALLEL_TAGS_LIST_ATTR = "__parallel_test_tags__"
class DataDrivenFixtureError(Exception):
"""Error if you apply DataDrivenClass to class that isn't a TestCase"""
pass
def _add_tags(func, tags):
if not getattr(func, TAGS_DECORATOR_TAG_LIST_NAME, None):
setattr(func, TAGS_DECORATOR_TAG_LIST_NAME, [])
func.__test_tags__ = list(set(func.__test_tags__).union(set(tags)))
def _add_tags(func, tags, attr):
if not getattr(func, attr, None):
setattr(func, attr, [])
setattr(func, attr, list(set(getattr(func, attr)).union(set(tags))))
return func
@ -52,8 +50,15 @@ def tags(*tags, **attrs):
cafe-runner at run time
"""
def decorator(func):
func = _add_tags(func, tags)
"""Calls _add_tags/_add_attrs to add tags to a func"""
func = _add_tags(func, tags, TAGS_DECORATOR_TAG_LIST_NAME)
func = _add_attrs(func, attrs)
# add tags for parallel runner
func = _add_tags(func, tags, PARALLEL_TAGS_LIST_ATTR)
func = _add_tags(
func, ["{0}={1}".format(k, v) for k, v in attrs.items()],
PARALLEL_TAGS_LIST_ATTR)
return func
return decorator
@ -61,11 +66,19 @@ def tags(*tags, **attrs):
def data_driven_test(*dataset_sources, **kwargs):
"""Used to define the data source for a data driven test in a
DataDrivenFixture decorated Unittest TestCase class"""
def decorator(func):
# dataset_source checked for backward compatibility
combined_lists = kwargs.get("dataset_source") or []
"""Combines and stores DatasetLists in __data_driven_test_data__"""
dep_message = "DatasetList object required for data_generator"
combined_lists = kwargs.get("dataset_source") or DatasetList()
for key, value in kwargs:
if key != "dataset_source" and isinstance(value, DatasetList):
value.apply_test_tags(key)
elif not isinstance(value, DatasetList):
warn(dep_message, DeprecationWarning)
combined_lists += value
for dataset_list in dataset_sources:
if not isinstance(dataset_list, DatasetList):
warn(dep_message, DeprecationWarning)
combined_lists += dataset_list
setattr(func, DATA_DRIVEN_TEST_ATTR, combined_lists)
return func
@ -75,6 +88,9 @@ def data_driven_test(*dataset_sources, **kwargs):
def DataDrivenClass(*dataset_lists):
"""Use data driven class decorator. designed to be used on a fixture"""
def decorator(cls):
"""Creates classes with variables named after datasets.
Names of classes are equal to (class_name with out fixture) + ds_name
"""
module = import_module(cls.__module__)
cls = DataDrivenFixture(cls)
class_name = re.sub("fixture", "", cls.__name__, flags=re.IGNORECASE)
@ -93,66 +109,52 @@ def DataDrivenClass(*dataset_lists):
def DataDrivenFixture(cls):
"""Generates new unittest test methods from methods defined in the
decorated class"""
def create_func(original_test, new_name, kwargs):
"""Creates a function to add to class for ddtests"""
def new_test(self):
"""Docstring gets replaced by test docstring"""
func = getattr(self, original_test.__name__)
func(**kwargs)
new_test.__name__ = new_name
new_test.__doc__ = original_test.__doc__
return new_test
if not issubclass(cls, TestCase):
raise DataDrivenFixtureError
test_case_attrs = dir(cls)
for attr_name in test_case_attrs:
for attr_name in dir(cls):
if attr_name.startswith(DATA_DRIVEN_TEST_PREFIX) is False:
# Not a data driven test, skip it
continue
original_test = getattr(cls, attr_name, None).__func__
test_data = getattr(original_test, DATA_DRIVEN_TEST_ATTR, None)
if test_data is None:
# no data was provided to the datasource decorator or this is not a
# data driven test, skip it.
original_test = getattr(cls, attr_name, None)
if not callable(original_test):
continue
test_data = getattr(original_test, DATA_DRIVEN_TEST_ATTR, [])
for dataset in test_data:
# Name the new test based on original and dataset names
base_test_name = str(original_test.__name__)[
int(len(DATA_DRIVEN_TEST_PREFIX)):]
new_test_name = "test_{0}_{1}".format(
base_test_name, dataset.name)
base_test_name = attr_name[int(len(DATA_DRIVEN_TEST_PREFIX)):]
new_test_name = "test_{0}_{1}".format(base_test_name, dataset.name)
# Create a new test from the old test
new_test = FunctionType(
six.get_function_code(original_test),
six.get_function_globals(original_test),
name=new_test_name)
new_test = create_func(original_test, new_test_name, dataset.data)
# Copy over any other attributes the original test had (mainly to
# support test tag decorator)
for attr in list(set(dir(original_test)) - set(dir(new_test))):
setattr(new_test, attr, getattr(original_test, attr))
# Change the new test's default keyword values to the appropriate
# new data as defined by the datasource decorator
args, _, _, defaults = inspect.getargspec(original_test)
# Self doesn't have a default, so we need to remove it
args.remove('self')
# Make sure we take into account required arguments
kwargs = dict(
zip_longest(
args[::-1], list(defaults or ())[::-1], fillvalue=None))
kwargs.update(dataset.data)
# Make sure the updated values are in the correct order
new_default_values = [kwargs[arg] for arg in args]
setattr(new_test, "func_defaults", tuple(new_default_values))
for key, value in vars(original_test).items():
if key != DATA_DRIVEN_TEST_ATTR:
setattr(new_test, key, value)
# Set dataset tags and attrs
new_test = _add_tags(new_test, dataset.metadata.get('tags', []))
new_test = _add_tags(
new_test, dataset.metadata.get('tags', []),
TAGS_DECORATOR_TAG_LIST_NAME)
new_test = _add_tags(
new_test, dataset.metadata.get('tags', []),
PARALLEL_TAGS_LIST_ATTR)
# Add the new test to the decorated TestCase
setattr(cls, new_test_name, new_test)
return cls
@ -216,6 +218,7 @@ class memoized(object):
return self.func.__doc__
def _start_logging(self, log_file_name):
"""Starts logging"""
setattr(self.func, '_log_handler', cclogging.setup_new_cchandler(
log_file_name))
setattr(self.func, '_log', cclogging.getLogger(''))
@ -230,4 +233,5 @@ class memoized(object):
self.__name__))
def _stop_logging(self):
self.func._log.removeHandler(self.func._log_handler)
"""Stop logging"""
self.func._log.removeHandler(self.func._log_handler)

View File

@ -11,6 +11,11 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
@summary: Base Classes for Test Fixtures
@note: Corresponds DIRECTLY TO A unittest.TestCase
@see: http://docs.python.org/library/unittest.html#unittest.TestCase
"""
import os
import re
import six
@ -22,18 +27,17 @@ from cafe.drivers.base import FixtureReporter
class BaseTestFixture(unittest.TestCase):
"""
Base class that all cafe unittest test fixtures should inherit from
.. seealso:: http://docs.python.org/library/unittest.html#unittest.TestCase
@summary: This should be used as the base class for any unittest tests,
meant to be used instead of unittest.TestCase.
@see: http://docs.python.org/library/unittest.html#unittest.TestCase
"""
__test__ = True
def shortDescription(self):
"""
Returns a formatted description of the test
@summary: Returns a formatted description of the test
"""
short_desc = None
if os.environ.get("VERBOSE", None) == "true" and self._testMethodDoc:
@ -42,6 +46,9 @@ class BaseTestFixture(unittest.TestCase):
return short_desc
def logDescription(self):
"""
@summary: Returns a formatted description from the _testMethodDoc
"""
log_desc = None
if self._testMethodDoc:
log_desc = "\n{0}".format(
@ -51,22 +58,24 @@ class BaseTestFixture(unittest.TestCase):
@classmethod
def assertClassSetupFailure(cls, message):
"""
Use this if you need to fail from a Test Fixture's setUpClass()
@summary: Use this if you need to fail from a Test Fixture's
setUpClass() method
"""
cls.fixture_log.error("FATAL: %s:%s" % (cls.__name__, message))
cls.fixture_log.error("FATAL: %s:%s", cls.__name__, message)
raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))
@classmethod
def assertClassTeardownFailure(cls, message):
"""
Use this if you need to fail from a Test Fixture's tearDownClass()
@summary: Use this if you need to fail from a Test Fixture's
tearUpClass() method
"""
cls.fixture_log.error("FATAL: %s:%s" % (cls.__name__, message))
cls.fixture_log.error("FATAL: %s:%s", cls.__name__, message)
raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))
@classmethod
def setUpClass(cls):
"""@summary: Adds logging/reporting to Unittest setUpClass"""
super(BaseTestFixture, cls).setUpClass()
cls._reporter = FixtureReporter(cls)
cls.fixture_log = cls._reporter.logger.log
@ -75,13 +84,14 @@ class BaseTestFixture(unittest.TestCase):
@classmethod
def tearDownClass(cls):
"""@summary: Adds stop reporting to Unittest setUpClass"""
cls._reporter.stop()
# Call super teardown after to avoid tearing down the class before we
# can run our own tear down stuff.
super(BaseTestFixture, cls).tearDownClass()
def setUp(self):
"""@summary: Logs test metrics"""
self.shortDescription()
self._reporter.start_test_metrics(
self.__class__.__name__, self._testMethodName,
@ -94,7 +104,6 @@ class BaseTestFixture(unittest.TestCase):
better pattern or working with the result object directly.
This is related to the todo in L{TestRunMetrics}
"""
if sys.version_info < (3, 4):
if six.PY2:
report = self._resultForDoCleanups
@ -114,7 +123,7 @@ class BaseTestFixture(unittest.TestCase):
self._reporter.stop_test_metrics(self._testMethodName,
'Passed')
else:
for method, errors in self._outcome.errors:
for method, _ in self._outcome.errors:
if self._test_name_matches_result(self._testMethodName,
method):
self._reporter.stop_test_metrics(self._testMethodName,
@ -125,11 +134,9 @@ class BaseTestFixture(unittest.TestCase):
# Continue inherited tearDown()
super(BaseTestFixture, self).tearDown()
def _test_name_matches_result(self, name, test_result):
"""
Checks if a test result matches a specific test name.
"""
@staticmethod
def _test_name_matches_result(name, test_result):
"""@summary: Checks if a test result matches a specific test name."""
if sys.version_info < (3, 4):
# Try to get the result portion of the tuple
try:
@ -147,17 +154,14 @@ class BaseTestFixture(unittest.TestCase):
@classmethod
def _do_class_cleanup_tasks(cls):
"""
Runs the tasks designated by the use of addClassCleanup
"""
"""@summary: Runs class cleanup tasks added during testing"""
for func, args, kwargs in reversed(cls._class_cleanup_tasks):
cls.fixture_log.debug(
"Running class cleanup task: {0}({1}, {2})".format(
func.__name__,
", ".join([str(arg) for arg in args]),
", ".join(["{0}={1}".format(
str(k), str(kwargs[k])) for k in kwargs])))
"Running class cleanup task: %s(%s, %s)",
func.__name__,
", ".join([str(arg) for arg in args]),
", ".join(["{0}={1}".format(
str(k), str(kwargs[k])) for k in kwargs]))
try:
func(*args, **kwargs)
except Exception as exception:
@ -166,17 +170,15 @@ class BaseTestFixture(unittest.TestCase):
cls.fixture_log.exception(exception)
cls.fixture_log.error(
"classTearDown failure: Exception occured while trying to"
" execute class teardown task: {0}({1}, {2})".format(
func.__name__,
", ".join([str(arg) for arg in args]),
", ".join(["{0}={1}".format(
str(k), str(kwargs[k])) for k in kwargs])))
" execute class teardown task: %s(%s, %s)",
func.__name__,
", ".join([str(arg) for arg in args]),
", ".join(["{0}={1}".format(
str(k), str(kwargs[k])) for k in kwargs]))
@classmethod
def addClassCleanup(cls, function, *args, **kwargs):
"""
Provides an addCleanup-like method that can be used in classmethods
"""@summary: Named to match unittest's addCleanup.
ClassCleanup tasks run if setUpClass fails, or after tearDownClass.
(They don't depend on tearDownClass running)
"""
@ -186,15 +188,16 @@ class BaseTestFixture(unittest.TestCase):
class BaseBurnInTestFixture(BaseTestFixture):
"""
Base test fixture that allows for Burn-In tests
@summary: Base test fixture that allows for Burn-In tests
"""
@classmethod
def setUpClass(cls):
"""@summary: inits burning testing variables"""
super(BaseBurnInTestFixture, cls).setUpClass()
cls.test_list = []
cls.iterations = 0
@classmethod
def addTest(cls, test_case):
"""@summary: Adds a test case"""
cls.test_list.append(test_case)

View File

@ -11,101 +11,78 @@
# License for the specific language governing permissions and limitations
# under the License.
from unittest.suite import _ErrorHolder
import json
class SummarizeResults(object):
def __init__(self, result_dict, master_testsuite,
execution_time):
for keys, values in list(result_dict.items()):
setattr(self, keys, values)
self.master_testsuite = master_testsuite
"""Reads in vars dict from suite and builds a Summarized results obj"""
def __init__(self, result_dict, tests, execution_time):
self.execution_time = execution_time
self.all_tests = tests
self.failures = result_dict.get("failures", [])
self.skipped = result_dict.get("skipped", [])
self.errors = result_dict.get("errors", [])
self.tests_run = result_dict.get("testsRun", 0)
def get_passed_tests(self):
all_tests = []
failed_tests = []
skipped_tests = []
errored_tests = []
setup_errored_classes = []
setup_errored_tests = []
passed_obj_list = []
for test in vars(self.master_testsuite).get('_tests'):
all_tests.append(test)
for failed_test in self.failures:
failed_tests.append(failed_test[0])
for skipped_test in self.skipped:
skipped_tests.append(skipped_test[0])
for errored_test in self.errors:
if errored_test[0].__class__.__name__ != '_ErrorHolder':
errored_tests.append(errored_test[0])
else:
setup_errored_classes.append(
str(errored_test[0]).split(".")[-1].rstrip(')'))
if len(setup_errored_classes) != 0:
for item_1 in all_tests:
for item_2 in setup_errored_classes:
if item_2 == item_1.__class__.__name__:
setup_errored_tests.append(item_1)
"""Gets a list of results objects for passed tests"""
errored_tests = [
t[0] for t in self.errors if not isinstance(t[0], _ErrorHolder)]
setup_errored_classes = [
str(t[0]).split(".")[-1].rstrip(')')
for t in self.errors if isinstance(t[0], _ErrorHolder)]
setup_errored_tests = [
t for t in self.all_tests
if t.__class__.__name__ in setup_errored_classes]
passed_tests = list(set(all_tests) - set(failed_tests) -
set(skipped_tests) - set(errored_tests) -
set(setup_errored_tests))
passed_tests = list(
set(self.all_tests) -
set([test[0] for test in self.failures]) -
set([test[0] for test in self.skipped]) -
set(errored_tests) - set(setup_errored_tests))
for passed_test in passed_tests:
passed_obj = Result(passed_test.__class__.__name__,
vars(passed_test).get('_testMethodName'))
passed_obj_list.append(passed_obj)
return passed_obj_list
def get_skipped_tests(self):
skipped_obj_list = []
for item in self.skipped:
skipped_obj = Result(item[0].__class__.__name__,
vars(item[0]).get('_testMethodName'),
skipped_msg=item[1])
skipped_obj_list.append(skipped_obj)
return skipped_obj_list
def get_errored_tests(self):
errored_obj_list = []
for item in self.errors:
if item[0].__class__.__name__ is not '_ErrorHolder':
errored_obj = Result(item[0].__class__.__name__,
vars(item[0]).get('_testMethodName'),
error_trace=item[1])
else:
errored_obj = Result(str(item[0]).split(" ")[0],
str(item[0]).split(".")[-1].rstrip(')'),
error_trace=item[1])
errored_obj_list.append(errored_obj)
return errored_obj_list
def parse_failures(self):
failure_obj_list = []
for failure in self.failures:
failure_obj = Result(failure[0].__class__.__name__,
vars(failure[0]).get('_testMethodName'),
failure[1])
failure_obj_list.append(failure_obj)
return failure_obj_list
return [self._create_result(t) for t in passed_tests]
def summary_result(self):
summary_res = {'tests': str(self.testsRun),
'errors': str(len(self.errors)),
'failures': str(len(self.failures)),
'skipped': str(len(self.skipped))}
return summary_res
"""Returns a dictionary containing counts of tests and statuses"""
return {
'tests': self.tests_run,
'errors': len(self.errors),
'failures': len(self.failures),
'skipped': len(self.skipped)}
def gather_results(self):
executed_tests = (self.get_passed_tests() + self.parse_failures() +
self.get_errored_tests() + self.get_skipped_tests())
"""Gets a result obj for all tests ran and failed setup classes"""
return (
self.get_passed_tests() +
[self._create_result(t, "failures") for t in self.failures] +
[self._create_result(t, "errored") for t in self.errors] +
[self._create_result(t, "skipped") for t in self.skipped])
return executed_tests
@staticmethod
def _create_result(test, type_="passed"):
"""Creates a Result object from a test and type of test"""
msg_type = {"failures": "failure_trace", "skipped": "skipped_msg",
"errored": "error_trace"}
if type_ == "passed":
dic = {"test_method_name": getattr(test, '_testMethodName', ""),
"test_class_name": test.__class__.__name__}
elif (type_ in ["failures", "skipped", "errored"] and
not isinstance(test[0], _ErrorHolder)):
dic = {"test_method_name": getattr(test[0], '_testMethodName', ""),
"test_class_name": test[0].__class__.__name__,
msg_type.get(type_, "error_trace"): test[1]}
else:
dic = {"test_method_name": str(test[0]).split(" ")[0],
"test_class_name": str(test[0]).split(".")[-1].rstrip(')'),
msg_type.get(type_, "error_trace"): test[1]}
return Result(**dic)
class Result(object):
"""Result object used to create the json and xml results"""
def __init__(
self, test_class_name, test_method_name, failure_trace=None,
skipped_msg=None, error_trace=None):
@ -117,7 +94,4 @@ class Result(object):
self.error_trace = error_trace
def __repr__(self):
values = []
for prop in self.__dict__:
values.append("%s: %s" % (prop, self.__dict__[prop]))
return dict('{' + ', '.join(values) + '}')
return json.dumps(self.__dict__)

View File

@ -16,23 +16,23 @@ from __future__ import print_function
from inspect import isclass, ismethod
import importlib
import pkgutil
import re
import unittest
import uuid
from cafe.drivers.unittest.common import print_exception, get_error
from cafe.drivers.base import print_exception, get_error
from cafe.drivers.unittest.suite import OpenCafeUnittestTestSuite
from cafe.drivers.unittest.decorators import TAGS_LIST_ATTR
from cafe.drivers.unittest.decorators import PARALLEL_TAGS_LIST_ATTR
class SuiteBuilder(object):
"""Builds suites for OpenCafe Unittest Runner"""
def __init__(
self, testrepos, tags=None, all_tags=False, dotpath_regex=None,
self, testrepos, tags=None, all_tags=False, regex_list=None,
file_=None, dry_run=False, exit_on_error=False):
self.testrepos = testrepos
self.tags = tags or []
self.all_tags = all_tags
self.regex_list = dotpath_regex or []
self.regex_list = regex_list or []
self.exit_on_error = exit_on_error
self.dry_run = dry_run
# dict format {"ubroast.test.test1.TestClass": ["test_t1", "test_t2"]}
@ -52,6 +52,8 @@ class SuiteBuilder(object):
for test in suite:
print(test)
exit(0)
for suite in test_suites:
suite.cafe_uuid = uuid.uuid4()
return test_suites
def load_file(self):
@ -99,10 +101,6 @@ class SuiteBuilder(object):
obj = getattr(loaded_module, objname, None)
if (isclass(obj) and issubclass(obj, unittest.TestCase) and
"fixture" not in obj.__name__.lower()):
if getattr(obj, "__test__", None) is not None:
print("Feature __test__ deprecated: Not skipping:"
"{0}".format(obj.__name__))
print("Use unittest.skip(reason)")
classes.append(obj)
return classes
@ -122,7 +120,7 @@ class SuiteBuilder(object):
ret_val = ismethod(test) and self._check_tags(test)
regex_val = not self.regex_list
for regex in self.regex_list:
regex_val |= bool(re.search(regex, full_path))
regex_val |= bool(regex.search(full_path))
return ret_val & regex_val
def _check_tags(self, test):
@ -133,7 +131,7 @@ class SuiteBuilder(object):
foo and bar will be matched including a test that contains
(foo, bar, bazz)
"""
test_tags = getattr(test, TAGS_LIST_ATTR, [])
test_tags = getattr(test, PARALLEL_TAGS_LIST_ATTR, [])
if self.all_tags:
return all([tag in test_tags for tag in self.tags])
else:

View File

@ -1,40 +0,0 @@
# Copyright 2015 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
from traceback import print_exc
def print_exception(file_=None, method=None, value=None, exception=None):
"""
Prints exceptions in a standard format to stderr.
"""
print("{0}".format("=" * 70), file=sys.stderr)
if file_:
print("{0}:".format(file_), file=sys.stderr, end=" ")
if method:
print("{0}:".format(method), file=sys.stderr, end=" ")
if value:
print("{0}:".format(value), file=sys.stderr, end=" ")
if exception:
print("{0}:".format(exception), file=sys.stderr, end=" ")
print("\n{0}".format("-" * 70), file=sys.stderr)
if exception is not None:
print_exc(file=sys.stderr)
print(file=sys.stderr)
def get_error(exception=None):
"""Gets errno from exception or returns one"""
return getattr(exception, "errno", 1)

View File

@ -1,164 +0,0 @@
# Copyright 2015 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from itertools import product
from string import ascii_letters, digits
import json
ALLOWED_FIRST_CHAR = "_{0}".format(ascii_letters)
ALLOWED_OTHER_CHARS = "{0}{1}".format(ALLOWED_FIRST_CHAR, digits)
class _Dataset(object):
"""Defines a set of data to be used as input for a data driven test.
data_dict should be a dictionary with keys matching the keyword
arguments defined in test method that consumes the dataset.
name should be a string describing the dataset.
This class should not be accessed directly. Use or extend DatasetList.
"""
def __init__(self, name, data_dict, tags=None):
self.name = name
self.data = data_dict
self.metadata = {'tags': tags or []}
def apply_test_tags(self, tags):
"""Applys tags to dataset"""
self.metadata['tags'] = list(set(self.metadata.get('tags') + tags))
def __repr__(self):
return "<name:{0}, data:{1}>".format(self.name, self.data)
class DatasetList(list):
"""Specialized list-like object that holds Dataset objects"""
def append(self, dataset):
if not isinstance(dataset, _Dataset):
raise TypeError(
"append() argument must be type Dataset, not {0}".format(
type(dataset)))
super(DatasetList, self).append(dataset)
def append_new_dataset(self, name, data_dict, tags=None):
"""Creates and appends a new Dataset"""
self.append(_Dataset(name, data_dict, tags))
def extend(self, dataset_list):
if not isinstance(dataset_list, DatasetList):
raise TypeError(
"extend() argument must be type DatasetList, not {0}".format(
type(dataset_list)))
super(DatasetList, self).extend(dataset_list)
def extend_new_datasets(self, dataset_list):
"""Creates and extends a new DatasetList"""
self.extend(dataset_list)
def apply_test_tags(self, *tags):
"""Applys tags to all tests in dataset list"""
for dataset in self:
dataset.apply_test_tags(tags)
def dataset_names(self):
"""Gets a list of dataset names from dataset list"""
return [ds.name for ds in self]
def dataset_name_map(self):
"""Creates a dictionary with key=count and value=dataset name"""
return {count: ds.name for count, ds in enumerate(self)}
@staticmethod
def replace_invalid_characters(string, new_char="_"):
"""This functions corrects string so the following is true
Identifiers (also referred to as names) are described by the
following lexical definitions:
identifier ::= (letter|"_") (letter | digit | "_")*
letter ::= lowercase | uppercase
lowercase ::= "a"..."z"
uppercase ::= "A"..."Z"
digit ::= "0"..."9"
"""
if not string:
return string
for char in set(string) - set(ALLOWED_OTHER_CHARS):
string = string.replace(char, new_char)
if string[0] in digits:
string = "{0}{1}".format(new_char, string[1:])
return string
class DatasetListCombiner(DatasetList):
"""Class that can be used to combine multiple DatasetList objects together.
Produces the product of combining every dataset from each list together
with the names merged together. The data is overridden in a cascading
fashion, similar to CSS, where the last dataset takes priority.
"""
def __init__(self, *datasets):
super(DatasetListCombiner, self).__init__()
for dataset_list in product(*datasets):
tmp_dic = {}
names = []
for dataset in dataset_list:
tmp_dic.update(dataset.data)
names.append(dataset.name)
self.append_new_dataset("_".join(names), tmp_dic)
class DatasetGenerator(DatasetList):
"""Generates Datasets from a list of dictionaries, which are named
numericaly according to the source dictionary's order in the source list.
If a base_dataset_name is provided, that is used as the base name postfix
for all tests before they are numbered.
"""
def __init__(self, list_of_dicts, base_dataset_name=None):
super(DatasetGenerator, self).__init__()
count = 0
for kwdict in list_of_dicts:
test_name = "{0}_{1}".format(base_dataset_name or "dataset", count)
self.append_new_dataset(test_name, kwdict)
count += 1
class TestMultiplier(DatasetList):
"""Creates num_range number of copies of the source test,
and names the new tests numerically. Does not generate Datasets.
"""
def __init__(self, num_range):
super(TestMultiplier, self).__init__()
for num in range(num_range):
name = "{0}".format(num)
self.append_new_dataset(name, dict())
class DatasetFileLoader(DatasetList):
"""Reads a file object's contents in as json and converts them to
lists of Dataset objects.
Files should be opened in 'rb' (read binady) mode.
File should be a list of dictionaries following this format:
[{'name':"dataset_name", 'data':{key:value, key:value, ...}},]
if name is ommited, it is replaced with that dataset's location in the
load order, so that not all datasets need to be named.
"""
def __init__(self, file_object):
super(DatasetFileLoader, self).__init__()
content = json.loads(str(file_object.read()))
count = 0
for dataset in content:
name = dataset.get('name', str(count))
data = dataset.get('data', dict())
self.append_new_dataset(name, data)
count += 1

View File

@ -1,206 +0,0 @@
# Copyright 2015 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import re
from importlib import import_module
from unittest import TestCase
from cafe.common.reporting import cclogging
from cafe.drivers.unittest.datasets import DatasetList
TAGS_LIST_ATTR = "__test_tags__"
DATA_DRIVEN_TEST_ATTR = "__data_driven_test_data__"
DATA_DRIVEN_TEST_PREFIX = "ddtest_"
class DataDrivenFixtureError(Exception):
"""Error if you apply DataDrivenClass to func that isn't a TestCase"""
pass
def _add_tags(func, tag_list):
"""Adds tages to a function, stored in __test_tags__ variable"""
func.__test_tags__ = list(set(
getattr(func, TAGS_LIST_ATTR, []) + tag_list))
return func
def tags(*tag_list, **attrs):
"""Adds tags and attributes to tests, which are interpreted by the
cafe-runner at run time
"""
def decorator(func):
"""Calls _add_tags to add tags to a function"""
func = _add_tags(func, list(tag_list))
func = _add_tags(func, [
"{0}={1}".format(k, v) for k, v in attrs.items()])
return func
return decorator
def data_driven_test(*dataset_sources, **kwargs):
"""Used to define the data source for a data driven test in a
DataDrivenFixture decorated Unittest TestCase class"""
def decorator(func):
"""Combines and stores DatasetLists in __data_driven_test_data__"""
combined_lists = DatasetList()
for key, value in kwargs:
if isinstance(value, DatasetList):
value.apply_test_tags(key)
else:
print "DeprecationWarning Warning: non DataSetList passed to",
print " data generator."
combined_lists += value
for dataset_list in dataset_sources:
combined_lists += dataset_list
setattr(func, DATA_DRIVEN_TEST_ATTR, combined_lists)
return func
return decorator
def DataDrivenClass(*dataset_lists):
"""Use data driven class decorator. designed to be used on a fixture"""
def decorator(cls):
"""Creates classes with variables named after datasets.
Names of classes are equal to (class_name with out fixture) + ds_name
"""
module = import_module(cls.__module__)
cls = DataDrivenFixture(cls)
class_name = re.sub("fixture", "", cls.__name__, flags=re.IGNORECASE)
if not re.match(".*fixture", cls.__name__, flags=re.IGNORECASE):
cls.__name__ = "{0}Fixture".format(cls.__name__)
for dataset_list in dataset_lists:
for dataset in dataset_list:
class_name_new = "{0}_{1}".format(class_name, dataset.name)
class_name_new = DatasetList.replace_invalid_characters(
class_name_new)
new_class = type(class_name_new, (cls,), dataset.data)
new_class.__module__ = cls.__module__
setattr(module, class_name_new, new_class)
return cls
return decorator
def DataDrivenFixture(cls):
"""Generates new unittest test methods from methods defined in the
decorated class"""
def create_func(original_test, new_name, kwargs):
"""Creates a function to add to class for ddtests"""
def new_test(self):
"""Docstring gets replaced by test docstring"""
func = getattr(self, original_test.__name__)
func(**kwargs)
new_test.__name__ = new_name
new_test.__doc__ = original_test.__doc__
return new_test
if not issubclass(cls, TestCase):
raise DataDrivenFixtureError
for attr_name in dir(cls):
if attr_name.startswith(DATA_DRIVEN_TEST_PREFIX) is False:
# Not a data driven test, skip it
continue
original_test = getattr(cls, attr_name, None)
if not callable(original_test):
continue
test_data = getattr(original_test, DATA_DRIVEN_TEST_ATTR, [])
for dataset in test_data:
# Name the new test based on original and dataset names
base_test_name = attr_name[int(len(DATA_DRIVEN_TEST_PREFIX)):]
new_test_name = DatasetList.replace_invalid_characters(
"test_{0}_{1}".format(base_test_name, dataset.name))
new_test = create_func(original_test, new_test_name, dataset.data)
# Copy over any other attributes the original test had (mainly to
# support test tag decorator)
for key, value in vars(original_test).items():
if key != DATA_DRIVEN_TEST_ATTR:
setattr(new_test, key, value)
# Set dataset tags and attrs
new_test = _add_tags(new_test, dataset.metadata.get('tags', []))
# Add the new test to the decorated TestCase
setattr(cls, new_test_name, new_test)
return cls
class memoized(object):
"""
Decorator.
@see: https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
Adds and removes handlers to root log for the duration of the function
call, or logs return of cached result.
"""
def __init__(self, func):
self.func = func
self.cache = {}
self.__name__ = func.__name__
def __call__(self, *args):
log_name = "{0}.{1}".format(
cclogging.get_object_namespace(args[0]), self.__name__)
self._start_logging(log_name)
try:
hash(args)
except TypeError: # unhashable arguments in args
value = self.func(*args)
debug = "Uncacheable. Data returned"
else:
if args in self.cache:
value = self.cache[args]
debug = "Cached data returned."
else:
value = self.cache[args] = self.func(*args)
debug = "Data cached for future calls"
self.func._log.debug(debug)
self._stop_logging()
return value
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def _start_logging(self, log_file_name):
"""Starts logging"""
setattr(self.func, '_log_handler', cclogging.setup_new_cchandler(
log_file_name))
setattr(self.func, '_log', cclogging.getLogger(''))
self.func._log.addHandler(self.func._log_handler)
try:
curframe = inspect.currentframe()
self.func._log.debug("{0} called from {1}".format(
self.__name__, inspect.getouterframes(curframe, 2)[2][3]))
except:
self.func._log.debug(
"Unable to log where {0} was called from".format(
self.__name__))
def _stop_logging(self):
"""Stop logging"""
self.func._log.removeHandler(self.func._log_handler)

View File

@ -1,202 +0,0 @@
# Copyright 2015 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
@summary: Base Classes for Test Fixtures
@note: Corresponds DIRECTLY TO A unittest.TestCase
@see: http://docs.python.org/library/unittest.html#unittest.TestCase
"""
import os
import re
import six
import sys
import unittest
from cafe.drivers.base import FixtureReporter
class BaseTestFixture(unittest.TestCase):
"""
@summary: This should be used as the base class for any unittest tests,
meant to be used instead of unittest.TestCase.
@see: http://docs.python.org/library/unittest.html#unittest.TestCase
"""
def shortDescription(self):
"""
@summary: Returns a formatted description of the test
"""
short_desc = None
if os.environ.get("VERBOSE", None) == "true" and self._testMethodDoc:
temp = self._testMethodDoc.strip("\n")
short_desc = re.sub(r"[ ]{2,}", "", temp).strip("\n")
return short_desc
def logDescription(self):
"""
@summary: Returns a formatted description from the _testMethodDoc
"""
log_desc = None
if self._testMethodDoc:
log_desc = "\n{0}".format(
re.sub(r"[ ]{2,}", "", self._testMethodDoc).strip("\n"))
return log_desc
@classmethod
def assertClassSetupFailure(cls, message):
"""
@summary: Use this if you need to fail from a Test Fixture's
setUpClass() method
"""
cls.fixture_log.error("FATAL: %s:%s", cls.__name__, message)
raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))
@classmethod
def assertClassTeardownFailure(cls, message):
"""
@summary: Use this if you need to fail from a Test Fixture's
tearUpClass() method
"""
cls.fixture_log.error("FATAL: %s:%s", cls.__name__, message)
raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))
@classmethod
def setUpClass(cls):
"""@summary: Adds logging/reporting to Unittest setUpClass"""
super(BaseTestFixture, cls).setUpClass()
cls._reporter = FixtureReporter(cls)
cls.fixture_log = cls._reporter.logger.log
cls._reporter.start()
cls._class_cleanup_tasks = []
@classmethod
def tearDownClass(cls):
"""@summary: Adds stop reporting to Unittest setUpClass"""
cls._reporter.stop()
# Call super teardown after to avoid tearing down the class before we
# can run our own tear down stuff.
super(BaseTestFixture, cls).tearDownClass()
def setUp(self):
"""@summary: Logs test metrics"""
self.shortDescription()
self._reporter.start_test_metrics(
self.__class__.__name__, self._testMethodName,
self.logDescription())
super(BaseTestFixture, self).setUp()
def tearDown(self):
"""
@todo: This MUST be upgraded this from resultForDoCleanups into a
better pattern or working with the result object directly.
This is related to the todo in L{TestRunMetrics}
"""
if sys.version_info < (3, 4):
if six.PY2:
report = self._resultForDoCleanups
else:
report = self._outcomeForDoCleanups
if any(r for r in report.failures
if self._test_name_matches_result(self._testMethodName, r)):
self._reporter.stop_test_metrics(self._testMethodName,
'Failed')
elif any(r for r in report.errors
if self._test_name_matches_result(self._testMethodName,
r)):
self._reporter.stop_test_metrics(self._testMethodName,
'ERRORED')
else:
self._reporter.stop_test_metrics(self._testMethodName,
'Passed')
else:
for method, _ in self._outcome.errors:
if self._test_name_matches_result(self._testMethodName,
method):
self._reporter.stop_test_metrics(self._testMethodName,
'Failed')
else:
self._reporter.stop_test_metrics(self._testMethodName,
'Passed')
# Let the base handle whatever hoodoo it needs
super(BaseTestFixture, self).tearDown()
@staticmethod
def _test_name_matches_result(name, test_result):
"""@summary: Checks if a test result matches a specific test name."""
if sys.version_info < (3, 4):
# Try to get the result portion of the tuple
try:
result = test_result[0]
except IndexError:
return False
else:
result = test_result
# Verify the object has the correct property
if hasattr(result, '_testMethodName'):
return result._testMethodName == name
else:
return False
@classmethod
def _do_class_cleanup_tasks(cls):
"""@summary: Runs class cleanup tasks added during testing"""
for func, args, kwargs in reversed(cls._class_cleanup_tasks):
cls.fixture_log.debug(
"Running class cleanup task: %s(%s, %s)",
func.__name__,
", ".join([str(arg) for arg in args]),
", ".join(["{0}={1}".format(
str(k), str(kwargs[k])) for k in kwargs]))
try:
func(*args, **kwargs)
except Exception as exception:
# Pretty prints method signature in the following format:
# "classTearDown failure: Unable to execute FnName(a, b, c=42)"
cls.fixture_log.exception(exception)
cls.fixture_log.error(
"classTearDown failure: Exception occured while trying to"
" execute class teardown task: %s(%s, %s)",
func.__name__,
", ".join([str(arg) for arg in args]),
", ".join(["{0}={1}".format(
str(k), str(kwargs[k])) for k in kwargs]))
@classmethod
def addClassCleanup(cls, function, *args, **kwargs):
"""@summary: Named to match unittest's addCleanup.
ClassCleanup tasks run if setUpClass fails, or after tearDownClass.
(They don't depend on tearDownClass running)
"""
cls._class_cleanup_tasks.append((function, args or [], kwargs or {}))
class BaseBurnInTestFixture(BaseTestFixture):
"""
@summary: Base test fixture that allows for Burn-In tests
"""
@classmethod
def setUpClass(cls):
"""@summary: inits burning testing variables"""
super(BaseBurnInTestFixture, cls).setUpClass()
cls.test_list = []
cls.iterations = 0
@classmethod
def addTest(cls, test_case):
"""@summary: Adds a test case"""
cls.test_list.append(test_case)

View File

@ -1,98 +0,0 @@
# Copyright 2015 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest.suite import _ErrorHolder
import json
class SummarizeResults(object):
"""Reads in vars dict from suite and builds a Summarized results obj"""
def __init__(self, result_dict, testsuite, execution_time):
self.testsuite = testsuite
self.execution_time = execution_time
self.all_tests = getattr(testsuite, "_tests", [])
self.failures = result_dict.get("failures", [])
self.skipped = result_dict.get("skipped", [])
self.errors = result_dict.get("errors", [])
self.tests_run = result_dict.get("testsRun", 0)
def get_passed_tests(self):
"""Gets a list of results objects for passed tests"""
errored_tests = [
t[0] for t in self.errors if not isinstance(t[0], _ErrorHolder)]
setup_errored_classes = [
str(t[0]).split(".")[-1].rstrip(')')
for t in self.errors if isinstance(t[0], _ErrorHolder)]
setup_errored_tests = [
t for t in self.all_tests
if t.__class__.__name__ in setup_errored_classes]
passed_tests = list(
set(self.all_tests) -
set([test[0] for test in self.failures]) -
set([test[0] for test in self.skipped]) -
set(errored_tests) - set(setup_errored_tests))
return [self._create_result(t) for t in passed_tests]
def summary_result(self):
"""Returns a dictionary containing counts of tests and statuses"""
return {
'tests': self.tests_run,
'errors': len(self.errors),
'failures': len(self.failures),
'skipped': len(self.skipped)}
def gather_results(self):
"""Gets a result obj for all tests ran and failed setup classes"""
return (
self.get_passed_tests() +
[self._create_result(t, "failures") for t in self.failures] +
[self._create_result(t, "errored") for t in self.errors] +
[self._create_result(t, "skipped") for t in self.skipped])
@staticmethod
def _create_result(test, type_="passed"):
"""Creates a Result object from a test and type of test"""
msg_type = {"failures": "failure_trace", "skipped": "skipped_msg",
"errored": "error_trace"}
if type_ == "passed":
dic = {"test_method_name": getattr(test, '_testMethodName', ""),
"test_class_name": test.__class__.__name__}
elif (type_ in ["failures", "skipped", "errored"] and
not isinstance(test[0], _ErrorHolder)):
dic = {"test_method_name": getattr(test[0], '_testMethodName', ""),
"test_class_name": test[0].__class__.__name__,
msg_type.get(type_, "error_trace"): test[1]}
else:
dic = {"test_method_name": str(test[0]).split(" ")[0],
"test_class_name": str(test[0]).split(".")[-1].rstrip(')'),
msg_type.get(type_, "error_trace"): test[1]}
return Result(**dic)
class Result(object):
"""Result object used to create the json and xml results"""
def __init__(
self, test_class_name, test_method_name, failure_trace=None,
skipped_msg=None, error_trace=None):
self.test_class_name = test_class_name
self.test_method_name = test_method_name
self.failure_trace = failure_trace
self.skipped_msg = skipped_msg
self.error_trace = error_trace
def __repr__(self):
return json.dumps(self.__dict__)

View File

@ -1,4 +1,3 @@
#!/usr/bin/env python
# Copyright 2015 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -14,7 +13,7 @@
from __future__ import print_function
from multiprocessing import Process, Queue, active_children
from multiprocessing import Process, Queue
from StringIO import StringIO
from unittest.runner import _WritelnDecorator
import importlib
@ -29,7 +28,7 @@ from cafe.common.reporting import cclogging
from cafe.common.reporting.reporter import Reporter
from cafe.configurator.managers import TestEnvManager
from cafe.drivers.unittest.arguments import ArgumentParser
from cafe.drivers.unittest.common import print_exception, get_error
from cafe.drivers.base import print_exception, get_error
from cafe.drivers.unittest.parsers import SummarizeResults
from cafe.drivers.unittest.suite_builder import SuiteBuilder
@ -68,14 +67,14 @@ class UnittestRunner(object):
self.cl_args.data_directory or self.test_env.test_data_directory)
self.test_env.finalize()
cclogging.init_root_log_handler()
self.cl_args.testrepos = import_repos(self.cl_args.testrepos)
self.print_configuration(self.test_env, self.cl_args.testrepos)
self.cl_args.testrepos = import_repos(self.cl_args.testrepos)
self.suites = SuiteBuilder(
testrepos=self.cl_args.testrepos,
tags=self.cl_args.tags,
all_tags=self.cl_args.all_tags,
dotpath_regex=self.cl_args.dotpath_regex,
regex_list=self.cl_args.regex_list,
file_=self.cl_args.file,
dry_run=self.cl_args.dry_run,
exit_on_error=self.cl_args.exit_on_error).get_suites()
@ -97,31 +96,23 @@ class UnittestRunner(object):
to_worker.put(None)
start = time.time()
# A second try catch is needed here because queues can cause locking
# when they go out of scope, especially when termination signals used
try:
for _ in range(workers):
proc = Consumer(to_worker, from_worker, verbose, failfast)
worker_list.append(proc)
proc.start()
while active_children():
if from_worker.qsize():
results.append(self.log_result(from_worker.get()))
while not from_worker.empty():
for _ in self.suites:
results.append(self.log_result(from_worker.get()))
tests_run, errors, failures = self.compile_results(
time.time() - start, results)
except KeyboardInterrupt:
for proc in worker_list:
try:
os.kill(proc.pid, 9)
except:
# Process already exited, control C signal hit process
# when not in a test
pass
print_exception("Runner", "run", "Keyboard Interrupt, exiting...")
exit(get_error())
os.killpg(0, 9)
return bool(sum([errors, failures, not tests_run]))
@staticmethod
@ -145,9 +136,9 @@ class UnittestRunner(object):
print("Percolated Configuration")
print("-" * 150)
if repos:
print("BREWING FROM: ....: {0}".format(repos[0].__name__))
print("BREWING FROM: ....: {0}".format(repos[0]))
for repo in repos[1:]:
print("{0}{1}".format(" " * 20, repo.__name__))
print("{0}{1}".format(" " * 20, repo))
print("ENGINE CONFIG FILE: {0}".format(test_env.engine_config_path))
print("TEST CONFIG FILE..: {0}".format(test_env.test_config_file_path))
print("DATA DIRECTORY....: {0}".format(test_env.test_data_directory))
@ -181,8 +172,9 @@ class UnittestRunner(object):
result_dict = {"tests": 0, "errors": 0, "failures": 0}
for dic in results:
result = dic["result"]
suite = dic["suite"]
result_parser = SummarizeResults(vars(result), suite, run_time)
tests = [suite for suite in self.suites
if suite.cafe_uuid == dic["cafe_uuid"]][0]
result_parser = SummarizeResults(vars(result), tests, run_time)
all_results += result_parser.gather_results()
summary = result_parser.summary_result()
for key in result_dict:
@ -252,11 +244,19 @@ class Consumer(Process):
record.msg = "{0}\n{1}".format(
record.msg, traceback.format_exc(record.exc_info))
record.exc_info = None
dic = {"result": result, "logs": handler._records, "suite": suite}
dic = {
"result": result,
"logs": handler._records,
"cafe_uuid": suite.cafe_uuid}
self.from_worker.put(dic)
def entry_point():
"""Function setup.py links cafe-runner to"""
runner = UnittestRunner()
exit(runner.run())
try:
runner = UnittestRunner()
exit(runner.run())
except KeyboardInterrupt:
print_exception("Runner", "run", "Keyboard Interrupt, exiting...")
os.killpg(0, 9)

View File

@ -1,89 +0,0 @@
# Copyright 2015 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains a monkeypatched version of unittest's TestSuite class that supports
a version of addCleanup that can be used in classmethods. This allows a
more granular approach to teardown to be used in setUpClass and classmethod
helper methods
"""
from unittest.suite import TestSuite, _DebugResult, util
class OpenCafeUnittestTestSuite(TestSuite):
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
try:
tearDownClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
# Monkeypatch: run class cleanup tasks regardless of whether
# tearDownClass succeeds or not
finally:
if hasattr(previousClass, '_do_class_cleanup_tasks'):
previousClass._do_class_cleanup_tasks()
# Monkeypatch: run class cleanup tasks regardless of whether
# tearDownClass exists or not
else:
if getattr(previousClass, '_do_class_cleanup_tasks', False):
previousClass._do_class_cleanup_tasks()
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
# Monkeypatch: Run class cleanup if setUpClass fails
currentClass._do_class_cleanup_tasks()

View File

@ -52,9 +52,9 @@ class PositiveDataGenerator(DatasetList):
"arg_update": ["--result-directory", "/"],
"update": {"result_directory": "/"}})
self.append_new_dataset("dotpath_regex", {
self.append_new_dataset("regex_list", {
"arg_update": ["-d", ".*", "..."],
"update": {"dotpath_regex": [".*", "..."]}})
"update": {"regex_list": [".*", "..."]}})
self.append_new_dataset("dry_run", {
"arg_update": ["--dry-run"],
@ -113,7 +113,7 @@ class ArgumentsTests(unittest.TestCase):
"""ArgumentParser Tests"""
good_package = "tests.repo"
bad_package = "tests.fakerepo"
good_module = "tests.repo.test_module"
good_module = "tests.repo.cafe_tests"
bad_module = "tests.repo.blah"
bad_path = "tests."
good_config = CONFIG_NAME
@ -125,7 +125,7 @@ class ArgumentsTests(unittest.TestCase):
"tags": [],
"all_tags": False,
"data_directory": None,
"dotpath_regex": [],
"regex_list": [],
"dry_run": False,
"exit_on_error": False,
"failfast": False,
@ -142,8 +142,8 @@ class ArgumentsTests(unittest.TestCase):
def setUpClass(cls):
super(ArgumentsTests, cls).setUpClass()
file_ = open(cls.config, "w")
file_.write("test_fail (tests.repo.test_module.NoDataGenerator)\n")
file_.write("test_pass (tests.repo.test_module.NoDataGenerator)\n")
file_.write("test_fail (tests.repo.cafe_tests.NoDataGenerator)\n")
file_.write("test_pass (tests.repo.cafe_tests.NoDataGenerator)\n")
file_.close()
def get_updated_expected(self, **kwargs):