diff --git a/avocado/core/output.py b/avocado/core/output.py index 676335d25b941fa83efbe3a5c0ab26c3860319bd..9583b9c13b763ac35a94f4e58dd2522db56612ba 100644 --- a/avocado/core/output.py +++ b/avocado/core/output.py @@ -154,6 +154,7 @@ class TermSupport(object): self.PASS = self.COLOR_GREEN self.SKIP = self.COLOR_YELLOW self.FAIL = self.COLOR_RED + self.INTERRUPT = self.COLOR_RED self.ERROR = self.COLOR_RED self.WARN = self.COLOR_YELLOW self.PARTIAL = self.COLOR_YELLOW @@ -176,6 +177,7 @@ class TermSupport(object): self.PASS = '' self.SKIP = '' self.FAIL = '' + self.INTERRUPT = '' self.ERROR = '' self.WARN = '' self.PARTIAL = '' @@ -206,7 +208,7 @@ class TermSupport(object): If the output does not support colors, just return the original string. """ - return self.SKIP + msg + self.ENDC + return self.WARN + msg + self.ENDC def healthy_str(self, msg): """ @@ -250,12 +252,20 @@ class TermSupport(object): def error_str(self): """ - Print a not found string (yellow colored). + Print a error string (red colored). If the output does not support colors, just return the original string. """ return self.MOVE_BACK + self.ERROR + 'ERROR' + self.ENDC + def interrupt_str(self): + """ + Print an interrupt string (red colored). + + If the output does not support colors, just return the original string. + """ + return self.MOVE_BACK + self.INTERRUPT + 'INTERRUPT' + self.ENDC + def warn_str(self): """ Print an warning string (yellow colored). @@ -414,7 +424,8 @@ class View(object): 'ERROR': self._log_ui_status_error, 'FAIL': self._log_ui_status_fail, 'SKIP': self._log_ui_status_skip, - 'WARN': self._log_ui_status_warn} + 'WARN': self._log_ui_status_warn, + 'INTERRUPTED': self._log_ui_status_interrupt} mapping[status](state['time_elapsed']) def set_tests_info(self, info): @@ -530,6 +541,15 @@ class View(object): normal_error_msg = term_support.error_str() + " (%.2f s)" % t_elapsed self._log_ui_error_base(normal_error_msg) + def _log_ui_status_interrupt(self, t_elapsed): + """ + Log an INTERRUPT status message for a given operation. + + :param t_elapsed: Time it took for the operation to complete. + """ + normal_error_msg = term_support.interrupt_str() + " (%.2f s)" % t_elapsed + self._log_ui_error_base(normal_error_msg) + def _log_ui_status_fail(self, t_elapsed): """ Log a FAIL status message for a given operation. diff --git a/avocado/job.py b/avocado/job.py index 1f6f6dc4d15f35b25d9d8225f7f5713dc401a20e..21282fa00fcc3a3974e4eebe4d0147e0d458ac97 100644 --- a/avocado/job.py +++ b/avocado/job.py @@ -223,18 +223,7 @@ class Job(object): human_plugin = result.HumanTestResult(self.view, self.args) self.result_proxy.add_output_plugin(human_plugin) - def _run(self, urls=None): - """ - Unhandled job method. Runs a list of test URLs to its completion. - - :param urls: String with tests to run, separated by whitespace. - Optionally, a list of tests (each test a string). - :return: Integer with overall job status. See - :mod:`avocado.core.exit_codes` for more information. - :raise: Any exception (avocado crashed), or - :class:`avocado.core.exceptions.JobBaseException` errors, - that configure a job failure. - """ + def _handle_urls(self, urls): if urls is None: urls = getattr(self.args, 'url', None) @@ -245,15 +234,26 @@ class Job(object): e_msg = "Empty test ID. A test path or alias must be provided" raise exceptions.OptionValidationError(e_msg) + return urls + + def _make_test_suite(self, urls=None): + """ + Prepares a test suite to be used for running tests + + :param urls: String with tests to run, separated by whitespace. + Optionally, a list of tests (each test a string). + :returns: a test suite (a list of test factories) + """ + urls = self._handle_urls(urls) + self._make_test_loader() params_list = self.test_loader.discover_urls(urls) + test_suite = self.test_loader.discover(params_list) + return test_suite - mux = multiplexer.Mux(self.args) - self._setup_job_results() - + def _validate_test_suite(self, test_suite): try: - test_suite = self.test_loader.discover(params_list) # Do not attempt to validate the tests given on the command line if # the tests will not be copied from this system to a remote one # using the remote plugin features @@ -269,6 +269,7 @@ class Job(object): e_msg = '\n'.join(error_msg_parts) raise exceptions.OptionValidationError(e_msg) + def _filter_test_suite(self, test_suite): # Filter tests methods with params.filter and methodName filtered_suite = [] for test_template in test_suite: @@ -280,14 +281,32 @@ class Job(object): else: if method and fnmatch.fnmatch(method, filter_pattern): filtered_suite.append(test_template) - test_suite = filtered_suite + return filtered_suite + + def _run(self, urls=None): + """ + Unhandled job method. Runs a list of test URLs to its completion. + + :param urls: String with tests to run, separated by whitespace. + Optionally, a list of tests (each test a string). + :return: Integer with overall job status. See + :mod:`avocado.core.exit_codes` for more information. + :raise: Any exception (avocado crashed), or + :class:`avocado.core.exceptions.JobBaseException` errors, + that configure a job failure. + """ + self._setup_job_results() + test_suite = self._make_test_suite(urls) + self._validate_test_suite(test_suite) + test_suite = self._filter_test_suite(test_suite) if not test_suite: e_msg = ("No tests found within the specified path(s) " "(Possible reasons: File ownership, permissions, " "filters, typos)") raise exceptions.OptionValidationError(e_msg) + mux = multiplexer.Mux(self.args) self.args.test_result_total = mux.get_number_of_tests(test_suite) self._make_test_result() diff --git a/avocado/result.py b/avocado/result.py index 24b1527f3553c1f045d52d3668e23f23326a494e..250ea3b4e5ff6009d1c206d31b333a138f77e311 100644 --- a/avocado/result.py +++ b/avocado/result.py @@ -126,12 +126,28 @@ class TestResult(object): self.failed = [] self.skipped = [] self.warned = [] + self.interrupted = [] # Where this results intends to write to. Convention is that a dash (-) # means stdout, and stdout is a special output that can be exclusively # claimed by a result class. self.output = None + def _reconcile(self): + """ + Make sure job results are reconciled + + In situations such as job interruptions, some test results will be + missing, but this is no excuse for giving wrong summaries of test + results. + """ + valid_results_count = (len(self.passed) + len(self.errors) + + len(self.failed) + len(self.warned) + + len(self.skipped) + len(self.interrupted)) + other_skipped_count = self.tests_total - valid_results_count + for i in xrange(other_skipped_count): + self.skipped.append({}) + def start_tests(self): """ Called once before any tests are executed. @@ -209,6 +225,15 @@ class TestResult(object): """ self.warned.append(state) + def add_interrupt(self, state): + """ + Called when a test is interrupted by the user. + + :param state: result of :class:`avocado.test.Test.get_state`. + :type state: dict + """ + self.interrupted.append(state) + def check_test(self, state): """ Called once for a test to check status and report. @@ -219,7 +244,8 @@ class TestResult(object): 'ERROR': self.add_error, 'FAIL': self.add_fail, 'TEST_NA': self.add_skip, - 'WARN': self.add_warn} + 'WARN': self.add_warn, + 'INTERRUPTED': self.add_interrupt} add = status_map[state['status']] add(state) self.end_test(state) @@ -250,11 +276,13 @@ class HumanTestResult(TestResult): """ Called once after all tests are executed. """ + self._reconcile() self.stream.notify(event="message", msg="PASS : %d" % len(self.passed)) self.stream.notify(event="message", msg="ERROR : %d" % len(self.errors)) self.stream.notify(event="message", msg="FAIL : %d" % len(self.failed)) self.stream.notify(event="message", msg="SKIP : %d" % len(self.skipped)) self.stream.notify(event="message", msg="WARN : %d" % len(self.warned)) + self.stream.notify(event="message", msg="INTERRUPT : %d" % len(self.interrupted)) self.stream.notify(event="message", msg="TIME : %.2f s" % self.total_time) def start_test(self, state): diff --git a/avocado/runner.py b/avocado/runner.py index aa4a61436bb6790acca44c3abcd9fc356680f96d..641574712f7c3f9ed6659546453dba93d6696350 100644 --- a/avocado/runner.py +++ b/avocado/runner.py @@ -207,11 +207,13 @@ class TestRunner(object): # don't process other tests from the list if ctrl_c_count > 0: self.job.view.notify(event='minor', msg='') - return False self.result.check_test(test_state) if not status.mapping[test_state['status']]: failures.append(test_state['name']) + + if ctrl_c_count > 0: + return False return True def run_suite(self, test_suite, mux): @@ -228,14 +230,10 @@ class TestRunner(object): self.result.start_tests() q = queues.SimpleQueue() - ctrl_c = False for test_template in test_suite: for test_factory in mux.itertests(test_template): if not self._run_test(test_factory, q, failures): - ctrl_c = True break - if ctrl_c: - break runtime.CURRENT_TEST = None self.result.end_tests() if self.job.sysinfo is not None: diff --git a/docs/source/GetStartedGuide.rst b/docs/source/GetStartedGuide.rst index ebaa1bd3315fe9614d69f9abe1ce0e6d7354f4e2..de0ab6fff5e194d114203d37094bfbe4c4208850 100644 --- a/docs/source/GetStartedGuide.rst +++ b/docs/source/GetStartedGuide.rst @@ -136,15 +136,17 @@ Running Tests You can run them using the subcommand ``run``:: $ avocado run sleeptest - JOB ID : 381b849a62784228d2fd208d929cc49f310412dc - JOB LOG: $HOME/avocado/job-results/job-2014-08-12T15.39-381b849a/job.log - TESTS : 1 + JOB ID : 381b849a62784228d2fd208d929cc49f310412dc + JOB LOG : $HOME/avocado/job-results/job-2014-08-12T15.39-381b849a/job.log + JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.39-381b849a/html/results.html + TESTS : 1 (1/1) sleeptest.1: PASS (1.01 s) - PASS : 1 - ERROR: 0 - FAIL : 0 - SKIP : 0 - WARN : 0 + PASS : 1 + ERROR : 0 + FAIL : 0 + SKIP : 0 + WARN : 0 + INTERRUPT : 0 TIME : 1.01 s Job ID @@ -170,21 +172,23 @@ native tests and simple tests:: $ echo 'true' >> /tmp/script_that_passes.sh $ chmod +x /tmp/script_that_passes.sh $ avocado run failtest sleeptest synctest failtest synctest /tmp/script_that_passes.sh - JOB ID : 86911e49b5f2c36caeea41307cee4fecdcdfa121 - JOB LOG: $HOME/avocado/job-results/job-2014-08-12T15.42-86911e49/job.log - TESTS : 6 + JOB ID : 86911e49b5f2c36caeea41307cee4fecdcdfa121 + JOB LOG : $HOME/avocado/job-results/job-2014-08-12T15.42-86911e49/job.log + JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.42-86911e49/html/results.html + TESTS : 6 (1/6) failtest.1: FAIL (0.00 s) (2/6) sleeptest.1: PASS (1.00 s) (3/6) synctest.1: ERROR (0.01 s) (4/6) failtest.2: FAIL (0.00 s) (5/6) synctest.2: ERROR (0.01 s) (6/6) /tmp/script_that_passes.sh.1: PASS (0.02 s) - PASS : 2 - ERROR: 2 - FAIL : 2 - SKIP : 0 - WARN : 0 - TIME : 1.04 s + PASS : 2 + ERROR : 2 + FAIL : 2 + SKIP : 0 + WARN : 0 + INTERRUPT : 0 + TIME : 1.04 s Debugging tests =============== diff --git a/docs/source/OutputPlugins.rst b/docs/source/OutputPlugins.rst index bb62f73221fcd74e33cd8d41e0c87fef3553160c..7d365a99ed5e334f38eba6085cb7074608d120db 100644 --- a/docs/source/OutputPlugins.rst +++ b/docs/source/OutputPlugins.rst @@ -28,18 +28,20 @@ print while executing tests:: Or the more verbose avocado output:: $ avocado run sleeptest failtest synctest - JOB ID : 5ffe479262ea9025f2e4e84c4e92055b5c79bdc9 - JOB LOG: $HOME/avocado/job-results/job-2014-08-12T15.57-5ffe4792/job.log - TESTS : 3 + JOB ID : 5ffe479262ea9025f2e4e84c4e92055b5c79bdc9 + JOB LOG : $HOME/avocado/job-results/job-2014-08-12T15.57-5ffe4792/job.log + JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.57-5ffe4792/html/results.html + TESTS : 3 (1/3) sleeptest.1: PASS (1.01 s) (2/3) failtest.1: FAIL (0.00 s) (3/3) synctest.1: PASS (1.98 s) - PASS : 1 - ERROR: 1 - FAIL : 1 - SKIP : 0 - WARN : 0 - TIME : 3.17 s + PASS : 1 + ERROR : 1 + FAIL : 1 + SKIP : 0 + WARN : 0 + INTERRUPT : 0 + TIME : 3.17 s The most important thing is to remember that programs should never need to parse human output to figure out what happened with your test run. diff --git a/docs/source/WritingTests.rst b/docs/source/WritingTests.rst index 6d0b92d84ab46eefc170d844909e131903ac9ac7..c87aee5d1921f511a3686333a528919c2535d442 100644 --- a/docs/source/WritingTests.rst +++ b/docs/source/WritingTests.rst @@ -146,17 +146,19 @@ You may use the avocado runner with a multiplex file to provide params and matri generation for sleeptest just like:: $ avocado run sleeptest --multiplex examples/tests/sleeptest.py.data/sleeptest.yaml - JOB ID : d565e8dec576d6040f894841f32a836c751f968f - JOB LOG: $HOME/avocado/job-results/job-2014-08-12T15.44-d565e8de/job.log - TESTS : 3 + JOB ID : d565e8dec576d6040f894841f32a836c751f968f + JOB LOG : $HOME/avocado/job-results/job-2014-08-12T15.44-d565e8de/job.log + JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.44-d565e8de/html/results.html + TESTS : 3 (1/3) sleeptest.short: PASS (0.50 s) (2/3) sleeptest.medium: PASS (1.01 s) (3/3) sleeptest.long: PASS (5.01 s) - PASS : 3 - ERROR: 0 - FAIL : 0 - SKIP : 0 - WARN : 0 + PASS : 3 + ERROR : 0 + FAIL : 0 + SKIP : 0 + WARN : 0 + INTERRUPT : 0 TIME : 6.52 s Note that, as your multiplex file specifies all parameters for sleeptest, you @@ -168,18 +170,20 @@ can't leave the test ID empty:: If you want to run some tests that don't require params set by the multiplex file, you can:: $ avocado run sleeptest synctest --multiplex examples/tests/sleeptest.py.data/sleeptest.yaml - JOB ID : dd91ea5f8b42b2f084702315688284f7e8aa220a - JOB LOG: $HOME/avocado/job-results/job-2014-08-12T15.49-dd91ea5f/job.log - TESTS : 4 + JOB ID : dd91ea5f8b42b2f084702315688284f7e8aa220a + JOB LOG : $HOME/avocado/job-results/job-2014-08-12T15.49-dd91ea5f/job.log + JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.49-dd91ea5f/html/results.html + TESTS : 4 (1/4) sleeptest.short: PASS (0.50 s) (2/4) sleeptest.medium: PASS (1.01 s) (3/4) sleeptest.long: PASS (5.01 s) (4/4) synctest.1: ERROR (1.85 s) - PASS : 3 - ERROR: 1 - FAIL : 0 - SKIP : 0 - WARN : 0 + PASS : 3 + ERROR : 1 + FAIL : 0 + SKIP : 0 + WARN : 0 + INTERRUPT : 0 TIME : 8.69 s Avocado tests are also unittests @@ -211,16 +215,18 @@ you want to use it, don't forget to ``chmod +x`` your test. Executing an avocado test gives:: $ examples/tests/sleeptest.py - JOB ID : de6c1e4c227c786dc4d926f6fca67cda34d96276 - JOB LOG: $HOME/avocado/job-results/job-2014-08-12T15.48-de6c1e4c/job.log - TESTS : 1 + JOB ID : de6c1e4c227c786dc4d926f6fca67cda34d96276 + JOB LOG : $HOME/avocado/job-results/job-2014-08-12T15.48-de6c1e4c/job.log + JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.48-de6c1e4c/html/results.html + TESTS : 1 (1/1) sleeptest.1: PASS (1.00 s) - PASS : 1 - ERROR: 0 - FAIL : 0 - SKIP : 0 - WARN : 0 - TIME : 1.00 s + PASS : 1 + ERROR : 0 + FAIL : 0 + SKIP : 0 + WARN : 0 + INTERRUPT : 0 + TIME : 1.00 s Running tests with nosetests ============================ @@ -546,16 +552,18 @@ impact your test grid. You can account for that possibility and set up a :: $ avocado run sleeptest --multiplex /tmp/sleeptest-example.mplx - JOB ID : 6d5a2ff16bb92395100fbc3945b8d253308728c9 - JOB LOG: $HOME/avocado/job-results/job-2014-08-12T15.52-6d5a2ff1/job.log - TESTS : 1 + JOB ID : 6d5a2ff16bb92395100fbc3945b8d253308728c9 + JOB LOG : $HOME/avocado/job-results/job-2014-08-12T15.52-6d5a2ff1/job.log + JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.52-6d5a2ff1/html/results.html + TESTS : 1 (1/1) sleeptest.1: ERROR (2.97 s) - PASS : 0 - ERROR: 1 - FAIL : 0 - SKIP : 0 - WARN : 0 - TIME : 2.97 s + PASS : 0 + ERROR : 1 + FAIL : 0 + SKIP : 0 + WARN : 0 + INTERRUPT : 0 + TIME : 2.97 s :: @@ -635,16 +643,18 @@ This accomplishes a similar effect to the multiplex setup defined in there. :: $ avocado run timeouttest - JOB ID : d78498a54504b481192f2f9bca5ebb9bbb820b8a - JOB LOG: $HOME/avocado/job-results/job-2014-08-12T15.54-d78498a5/job.log - TESTS : 1 + JOB ID : d78498a54504b481192f2f9bca5ebb9bbb820b8a + JOB LOG : $HOME/avocado/job-results/job-2014-08-12T15.54-d78498a5/job.log + JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.54-d78498a5/html/results.html + TESTS : 1 (1/1) timeouttest.1: ERROR (2.97 s) - PASS : 0 - ERROR: 1 - FAIL : 0 - SKIP : 0 - WARN : 0 - TIME : 2.97 s + PASS : 0 + ERROR : 1 + FAIL : 0 + SKIP : 0 + WARN : 0 + INTERRUPT : 0 + TIME : 2.97 s :: diff --git a/man/avocado.rst b/man/avocado.rst index 2106bc914f59db63bb90d9ed464566bfbe53ef71..daf300be6419f97ef0e130b10234530feb35d297 100644 --- a/man/avocado.rst +++ b/man/avocado.rst @@ -83,6 +83,7 @@ directories. The output should be similar to:: FAIL : 0 SKIP : 0 WARN : 0 + INTERRUPT : 0 TIME : 1.00 s The test directories will vary depending on you system and @@ -285,6 +286,7 @@ And the output should look like:: FAIL : 0 SKIP : 0 WARN : 0 + INTERRUPT : 0 TIME : 16.53 s The `multiplex` plugin and the test runner supports two kinds of global @@ -448,6 +450,7 @@ option --output-check-record all to the test runner:: FAIL : 0 SKIP : 0 WARN : 0 + INTERRUPT : 0 TIME : 2.20 s After the reference files are added, the check process is transparent, in the @@ -483,6 +486,7 @@ Let's record the output (both stdout and stderr) for this one:: FAIL : 0 SKIP : 0 WARN : 0 + INTERRUPT : 0 TIME : 0.01 s After this is done, you'll notice that a the test data directory @@ -531,6 +535,7 @@ The output should look like:: FAIL : 0 SKIP : 0 WARN : 0 + INTERRUPT : 0 TIME : 1.01 s For more information, please consult the topic Remote Machine Plugin