From 28817ab3df58ff419043711aff34a06ae7115230 Mon Sep 17 00:00:00 2001 From: Amador Pahim Date: Wed, 8 Mar 2017 16:57:36 +0100 Subject: [PATCH] Introduce the CANCEL status and the self.cancel() We have received many requests to support skip tests from inside the test method. As we can do that without breaking our own concepts, we decided to introduce the CANCEL status and the corresponding self.cancel() method to the Test class. Reference: https://trello.com/c/viBJIEwI Signed-off-by: Amador Pahim --- avocado/core/exceptions.py | 9 ++ avocado/core/output.py | 1 + avocado/core/result.py | 6 +- avocado/core/status.py | 6 +- avocado/core/test.py | 25 ++++++ avocado/plugins/human.py | 8 +- avocado/plugins/jsonresult.py | 1 + avocado/plugins/tap.py | 3 + avocado/plugins/xunit.py | 4 +- docs/source/WritingTests.rst | 73 ++++++++++++++++ examples/tests/canceltest.py | 18 ++++ .../html/avocado_result_html/__init__.py | 3 +- selftests/functional/test_canceltests.py | 86 +++++++++++++++++++ 13 files changed, 235 insertions(+), 8 deletions(-) create mode 100644 examples/tests/canceltest.py create mode 100644 selftests/functional/test_canceltests.py diff --git a/avocado/core/exceptions.py b/avocado/core/exceptions.py index 02569d5f..9dba753e 100644 --- a/avocado/core/exceptions.py +++ b/avocado/core/exceptions.py @@ -177,3 +177,12 @@ class TestWarn(TestBaseException): failure. """ status = "WARN" + + +class TestCancel(TestBaseException): + """ + Indicates that a test was cancelled. + + Should be thrown when the cancel() test method is used. + """ + status = "CANCEL" diff --git a/avocado/core/output.py b/avocado/core/output.py index 8108d639..237bed1b 100644 --- a/avocado/core/output.py +++ b/avocado/core/output.py @@ -78,6 +78,7 @@ class TermSupport(object): self.INTERRUPT = self.COLOR_RED self.ERROR = self.COLOR_RED self.WARN = self.COLOR_YELLOW + self.CANCEL = self.COLOR_YELLOW self.PARTIAL = self.COLOR_YELLOW self.ENDC = self.CONTROL_END self.LOWLIGHT = self.COLOR_DARKGREY diff --git a/avocado/core/result.py b/avocado/core/result.py index 4d73074c..13eaa8f5 100644 --- a/avocado/core/result.py +++ b/avocado/core/result.py @@ -41,6 +41,7 @@ class Result(object): self.skipped = 0 self.warned = 0 self.interrupted = 0 + self.cancelled = 0 self.tests = [] def _reconcile(self): @@ -53,7 +54,8 @@ class Result(object): """ valid_results_count = (self.passed + self.errors + self.failed + self.warned + - self.skipped + self.interrupted) + self.skipped + self.interrupted + + self.cancelled) other_skipped_count = self.tests_total - valid_results_count if other_skipped_count > 0: self.skipped += other_skipped_count @@ -103,6 +105,8 @@ class Result(object): self.warned += 1 elif status == "INTERRUPTED": self.interrupted += 1 + elif status == "CANCEL": + self.cancelled += 1 else: self.errors += 1 self.end_test(state) diff --git a/avocado/core/status.py b/avocado/core/status.py index 266b49c5..6b785cae 100644 --- a/avocado/core/status.py +++ b/avocado/core/status.py @@ -26,14 +26,16 @@ mapping = {"SKIP": True, "ALERT": False, "RUNNING": False, "NOSTATUS": False, - "INTERRUPTED": False} + "INTERRUPTED": False, + "CANCEL": True} user_facing_status = ["SKIP", "ERROR", "FAIL", "WARN", "PASS", - "INTERRUPTED"] + "INTERRUPTED", + "CANCEL"] feedback = { # Test did not advertise current status, but process running the test is diff --git a/avocado/core/test.py b/avocado/core/test.py index 478e2359..dd8fe779 100644 --- a/avocado/core/test.py +++ b/avocado/core/test.py @@ -567,6 +567,13 @@ class Test(unittest.TestCase): exceptions.TestSkipError) as details: stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') raise exceptions.TestSkipError(details) + except exceptions.TestCancel as details: + stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') + skip_illegal_msg = ('Calling cancel() in setUp() ' + 'is not allowed in avocado, you ' + 'must fix your test. Original cancel exception: ' + '%s' % details) + raise exceptions.TestError(skip_illegal_msg) except: # Old-style exceptions are not inherited from Exception() stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') details = sys.exc_info()[1] @@ -583,6 +590,9 @@ class Test(unittest.TestCase): except exceptions.TestDecoratorSkip as details: stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') raise exceptions.TestSkipError(details) + except exceptions.TestCancel as details: + stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') + raise except: # Old-style exceptions are not inherited from Exception() stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') details = sys.exc_info()[1] @@ -774,6 +784,21 @@ class Test(unittest.TestCase): """ raise exceptions.TestSetupSkip(message) + def cancel(self, message=None): + """ + Cancels the test. + + This method is expected to be called from the test method, not + anywhere else, since by definition, we can only cancel a test that + is currently under execution. If you call this method outside the + test method, avocado will mark your test status as ERROR, and + instruct you to fix your test in the error message. + + :param message: an optional message that will be recorded in the logs + :type message: str + """ + raise exceptions.TestCancel(message) + def fetch_asset(self, name, asset_hash=None, algorithm='sha1', locations=None, expire=None): """ diff --git a/avocado/plugins/human.py b/avocado/plugins/human.py index 44ef59b1..018fc7d3 100644 --- a/avocado/plugins/human.py +++ b/avocado/plugins/human.py @@ -35,7 +35,8 @@ class Human(ResultEvents): 'FAIL': output.TERM_SUPPORT.FAIL, 'SKIP': output.TERM_SUPPORT.SKIP, 'WARN': output.TERM_SUPPORT.WARN, - 'INTERRUPTED': output.TERM_SUPPORT.INTERRUPT} + 'INTERRUPTED': output.TERM_SUPPORT.INTERRUPT, + 'CANCEL': output.TERM_SUPPORT.CANCEL} def __init__(self, args): self.log = logging.getLogger("avocado.app") @@ -93,7 +94,8 @@ class Human(ResultEvents): if not self.owns_stdout: return self.log.info("RESULTS : PASS %d | ERROR %d | FAIL %d | SKIP %d | " - "WARN %d | INTERRUPT %s", job.result.passed, + "WARN %d | INTERRUPT %s | CANCEL %s", job.result.passed, job.result.errors, job.result.failed, job.result.skipped, - job.result.warned, job.result.interrupted) + job.result.warned, job.result.interrupted, + job.result.cancelled) self.log.info("TESTS TIME : %.2f s", job.result.tests_total_time) diff --git a/avocado/plugins/jsonresult.py b/avocado/plugins/jsonresult.py index d81dbe8a..a8445b53 100644 --- a/avocado/plugins/jsonresult.py +++ b/avocado/plugins/jsonresult.py @@ -62,6 +62,7 @@ class JSONResult(Result): 'errors': result.errors, 'failures': result.failed, 'skip': result.skipped, + 'cancel': result.cancelled, 'time': result.tests_total_time} return json.dumps(content, sort_keys=True, diff --git a/avocado/plugins/tap.py b/avocado/plugins/tap.py index 52f5bb70..6203c42e 100644 --- a/avocado/plugins/tap.py +++ b/avocado/plugins/tap.py @@ -114,6 +114,9 @@ class TAPResult(ResultEvents): self.__write("ok %s %s", result.tests_run, name) elif status == "SKIP": self.__write("ok %s %s # SKIP %s", result.tests_run, name, state.get("fail_reason")) + elif status == "CANCEL": + self.__write("ok %s %s # CANCEL %s", + result.tests_run, name, state.get("fail_reason")) else: self.__write("not ok %s %s", result.tests_run, name) diff --git a/avocado/plugins/xunit.py b/avocado/plugins/xunit.py index e8a17457..c71850a7 100644 --- a/avocado/plugins/xunit.py +++ b/avocado/plugins/xunit.py @@ -79,7 +79,7 @@ class XUnitResult(Result): testsuite.setAttribute('tests', self._escape_attr(result.tests_total)) testsuite.setAttribute('errors', self._escape_attr(result.errors + result.interrupted)) testsuite.setAttribute('failures', self._escape_attr(result.failed)) - testsuite.setAttribute('skipped', self._escape_attr(result.skipped)) + testsuite.setAttribute('skipped', self._escape_attr(result.skipped + result.cancelled)) testsuite.setAttribute('time', self._escape_attr(result.tests_total_time)) testsuite.setAttribute('timestamp', self._escape_attr(datetime.datetime.now())) document.appendChild(testsuite) @@ -93,6 +93,8 @@ class XUnitResult(Result): elif status == 'FAIL': element = self._create_failure_or_error(document, test, 'failure') testcase.appendChild(element) + elif status == 'CANCEL': + testcase.appendChild(Element('skipped')) else: element = self._create_failure_or_error(document, test, 'error') testcase.appendChild(element) diff --git a/docs/source/WritingTests.rst b/docs/source/WritingTests.rst index 5ed9c73c..6a23d29c 100644 --- a/docs/source/WritingTests.rst +++ b/docs/source/WritingTests.rst @@ -1034,6 +1034,79 @@ Notice the ``test3`` was not skipped because the provided condition was not ``False``. +Cancelling Tests +================ + +The only supported way to cancel a test and not negatively impact the +job exit status (unlike using `self.fail` or `self.error`) is by using +the `self.cancel()` method. The `self.cancel()` can be called only +from your test methods. Example:: + + #!/usr/bin/env python + + from avocado import Test + from avocado import main + + from avocado.utils.process import run + from avocado.utils.software_manager import SoftwareManager + + + class CancelTest(Test): + + """ + Example tests that cancel the current test from inside the test. + """ + + def setUp(self): + sm = SoftwareManager() + self.pkgs = sm.list_all(software_components=False) + + def test_iperf(self): + if 'iperf-2.0.8-6.fc25.x86_64' not in self.pkgs: + self.cancel('iperf is not installed or wrong version') + self.assertIn('pthreads', + run('iperf -v', ignore_status=True).stderr) + + def test_gcc(self): + if 'gcc-6.3.1-1.fc25.x86_64' not in self.pkgs: + self.cancel('gcc is not installed or wrong version') + self.assertIn('enable-gnu-indirect-function', + run('gcc -v', ignore_status=True).stderr) + + if __name__ == "__main__": + main() + +In a system missing the `iperf` package but with `gcc` installed in +the correct version, the result will be:: + + JOB ID : 39c1f120830b9769b42f5f70b6b7bad0b1b1f09f + JOB LOG : $HOME/avocado/job-results/job-2017-03-10T16.22-39c1f12/job.log + (1/2) /home/apahim/avocado/tests/test_cancel.py:CancelTest.test_iperf: CANCEL (1.15 s) + (2/2) /home/apahim/avocado/tests/test_cancel.py:CancelTest.test_gcc: PASS (1.13 s) + RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1 + TESTS TIME : 2.28 s + JOB HTML : $HOME/avocado/job-results/job-2017-03-10T16.22-39c1f12/html/results.html + +Notice that, since the `setUp()` was already executed, calling the +`self.cancel()` will cancel the rest of the test from that point on, but +the `tearDown()` will still be executed. + +Depending on the result format you're refering to, the `CANCEL` status +is mapped to a corresponding valid status in that format. See the table +below: + ++--------+----------------------+ +| Format | Corresponding Status | ++========+======================+ +| json | cancel | ++--------+----------------------+ +| xunit | skipped | ++--------+----------------------+ +| tap | ok | ++--------+----------------------+ +| html | CANCEL (warning) | ++--------+----------------------+ + Docstring Directives ==================== diff --git a/examples/tests/canceltest.py b/examples/tests/canceltest.py new file mode 100644 index 00000000..4962dc15 --- /dev/null +++ b/examples/tests/canceltest.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +from avocado import Test +from avocado import main + + +class CancelTest(Test): + + """ + Example test that cancels the current test from inside the test. + """ + + def test(self): + self.cancel("This should end with CANCEL.") + + +if __name__ == "__main__": + main() diff --git a/optional_plugins/html/avocado_result_html/__init__.py b/optional_plugins/html/avocado_result_html/__init__.py index 3bd44063..fb163543 100644 --- a/optional_plugins/html/avocado_result_html/__init__.py +++ b/optional_plugins/html/avocado_result_html/__init__.py @@ -113,7 +113,8 @@ class ReportModel(object): "ALERT": "danger", "RUNNING": "info", "NOSTATUS": "info", - "INTERRUPTED": "danger"} + "INTERRUPTED": "danger", + "CANCEL": "warning"} test_info = [] results_dir = self.results_dir(False) for tst in self.result.tests: diff --git a/selftests/functional/test_canceltests.py b/selftests/functional/test_canceltests.py new file mode 100644 index 00000000..ea5c3c87 --- /dev/null +++ b/selftests/functional/test_canceltests.py @@ -0,0 +1,86 @@ +import json +import os +import shutil +import tempfile +import unittest + +from avocado.core import exit_codes +from avocado.utils import process +from avocado.utils import script + +basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..') +basedir = os.path.abspath(basedir) + +AVOCADO_TEST_CANCEL = """ +import avocado + +class AvocadoCancelTest(avocado.Test): + + def test(self): + self.cancel() +""" + +AVOCADO_TEST_CANCEL_ON_SETUP = """ +import avocado + +class AvocadoCancelTest(avocado.Test): + + def setUp(self): + self.cancel() + + def test(self): + pass +""" + + +class TestCancel(unittest.TestCase): + + def setUp(self): + os.chdir(basedir) + self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) + + test_path = os.path.join(self.tmpdir, 'test_cancel.py') + self.test_cancel = script.Script(test_path, + AVOCADO_TEST_CANCEL) + self.test_cancel.save() + + test_path = os.path.join(self.tmpdir, 'test_cancel_on_setup.py') + self.test_cancel_on_setup = script.Script(test_path, + AVOCADO_TEST_CANCEL_ON_SETUP) + self.test_cancel_on_setup.save() + + def test_cancel(self): + os.chdir(basedir) + cmd_line = ['./scripts/avocado', + 'run', + '--sysinfo=off', + '--job-results-dir', + '%s' % self.tmpdir, + '%s' % self.test_cancel, + '--json -'] + result = process.run(' '.join(cmd_line), ignore_status=True) + json_results = json.loads(result.stdout) + self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) + self.assertEqual(json_results['cancel'], 1) + + def test_cancel_on_setup(self): + os.chdir(basedir) + cmd_line = ['./scripts/avocado', + 'run', + '--sysinfo=off', + '--job-results-dir', + '%s' % self.tmpdir, + '%s' % self.test_cancel_on_setup, + '--json -'] + result = process.run(' '.join(cmd_line), ignore_status=True) + json_results = json.loads(result.stdout) + self.assertEqual(result.exit_status, exit_codes.AVOCADO_TESTS_FAIL) + self.assertEqual(json_results['cancel'], 0) + self.assertEqual(json_results['errors'], 1) + + def tearDown(self): + shutil.rmtree(self.tmpdir) + + +if __name__ == '__main__': + unittest.main() -- GitLab