diff --git a/avocado/job.py b/avocado/job.py index b75850002f371cb00ff656b646d56219eb9cb50c..a3da5cf54836704409a785a059e84e9704ccc340 100644 --- a/avocado/job.py +++ b/avocado/job.py @@ -10,6 +10,7 @@ from avocado.core import data_dir from avocado.core import output from avocado import test from avocado import sysinfo +from avocado import result JOB_STATUSES = {"TEST_NA": False, "ABORT": False, @@ -108,24 +109,15 @@ class Job(object): if urls is None: urls = self.args.url.split() - total_tests = len(urls) - self.output_manager.start_file_logging(self.debuglog, self.loglevel) - self.output_manager.log_header("DEBUG LOG: %s" % self.debuglog) - self.output_manager.log_header("TOTAL TESTS: %s" % total_tests) - self.output_mapping = {'PASS': self.output_manager.log_pass, - 'FAIL': self.output_manager.log_fail, - 'TEST_NA': self.output_manager.log_skip, - 'WARN': self.output_manager.log_warn} - + test_result = result.TestResult(stream=self.output_manager, + debuglog=self.debuglog, + loglevel=self.loglevel, + tests_total=len(urls)) + test_result.start_tests() for url in urls: test_instance = self.run_test(url) - output_func = self.output_mapping[test_instance.status] - label = "(%s/%s) %s:" % (self.test_index, total_tests, - test_instance.tagged_name) - output_func(label, test_instance.time_elapsed) - self.test_index += 1 - - self.output_manager.stop_file_logging() + test_result.check_test(test_instance) + test_result.end_tests() class TestModuleRunner(object): diff --git a/avocado/result.py b/avocado/result.py new file mode 100644 index 0000000000000000000000000000000000000000..d4b06e700285de33e416fb36cef829d621f4a47c --- /dev/null +++ b/avocado/result.py @@ -0,0 +1,78 @@ +"""Test result module.""" + + +class TestResult(object): + + """ + Test result class, holder for test result information. + """ + + def __init__(self, stream, debuglog, loglevel, tests_total): + self.stream = stream + self.debuglog = debuglog + self.loglevel = loglevel + self.tests_run = 0 + self.tests_total = tests_total + self.total_time = 0.0 + self.passed = [] + self.failed = [] + self.skipped = [] + self.warned = [] + + def start_tests(self): + 'Called once before any tests are executed.' + self.stream.start_file_logging(self.debuglog, self.loglevel) + self.stream.log_header("DEBUG LOG: %s" % self.debuglog) + self.stream.log_header("TOTAL TESTS: %s" % self.tests_total) + self.tests_run += 1 + + def end_tests(self): + 'Called once after all tests are executed.' + self.stream.log_header("TOTAL PASSED: %d" % len(self.passed)) + self.stream.log_header("TOTAL FAILED: %d" % len(self.failed)) + self.stream.log_header("TOTAL SKIPPED: %d" % len(self.skipped)) + self.stream.log_header("TOTAL WARNED: %d" % len(self.warned)) + self.stream.log_header("ELAPSED TIME: %.2f s" % self.total_time) + self.stream.stop_file_logging() + + def start_test(self, test): + 'Called when the given test is about to be run.' + self.test_label = '(%s/%s) %s: ' % (self.tests_run, + self.tests_total, + test.tagged_name) + + def end_test(self, test): + 'Called when the given test has been run.' + self.tests_run += 1 + self.total_time += test.time_elapsed + + def add_pass(self, test): + 'Called when a test succeed.' + self.stream.log_pass(self.test_label, test.time_elapsed) + self.passed.append(test) + + def add_fail(self, test): + 'Called when a test fails.' + self.stream.log_fail(self.test_label, test.time_elapsed) + self.failed.append(test) + + def add_skip(self, test): + 'Called when a test is skipped.' + self.stream.log_skip(self.test_label, test.time_elapsed) + self.skipped.append(test) + + def add_warn(self, test): + 'Called when a test warns.' + self.stream.log_warn(self.test_label, test.time_elapsed) + self.warned.append(test) + + def check_test(self, test): + 'Called once for a test to check status and report.' + self.start_test(test) + status_map = {'PASS': self.add_pass, + 'FAIL': self.add_fail, + 'TEST_NA': self.add_skip, + 'WARN': self.add_warn} + add = status_map[test.status] + add(test) + self.end_test(test)