diff --git a/avocado/core/exceptions.py b/avocado/core/exceptions.py index 02569d5f166255ea9fb5e137b6fd2036c00c9ae7..9dba753ebca5bde23634f64e990c2aef6206f4f0 100644 --- a/avocado/core/exceptions.py +++ b/avocado/core/exceptions.py @@ -177,3 +177,12 @@ class TestWarn(TestBaseException): failure. """ status = "WARN" + + +class TestCancel(TestBaseException): + """ + Indicates that a test was cancelled. + + Should be thrown when the cancel() test method is used. + """ + status = "CANCEL" diff --git a/avocado/core/output.py b/avocado/core/output.py index 8108d6397daf67ada7cfef5318e00dc55be87fd7..237bed1b0470348302ddd2b859cf6c8618a76df0 100644 --- a/avocado/core/output.py +++ b/avocado/core/output.py @@ -78,6 +78,7 @@ class TermSupport(object): self.INTERRUPT = self.COLOR_RED self.ERROR = self.COLOR_RED self.WARN = self.COLOR_YELLOW + self.CANCEL = self.COLOR_YELLOW self.PARTIAL = self.COLOR_YELLOW self.ENDC = self.CONTROL_END self.LOWLIGHT = self.COLOR_DARKGREY diff --git a/avocado/core/result.py b/avocado/core/result.py index 4d73074c0469374e7c418c677485e96f43d671db..13eaa8f547975269eae585a2573b006f389b0670 100644 --- a/avocado/core/result.py +++ b/avocado/core/result.py @@ -41,6 +41,7 @@ class Result(object): self.skipped = 0 self.warned = 0 self.interrupted = 0 + self.cancelled = 0 self.tests = [] def _reconcile(self): @@ -53,7 +54,8 @@ class Result(object): """ valid_results_count = (self.passed + self.errors + self.failed + self.warned + - self.skipped + self.interrupted) + self.skipped + self.interrupted + + self.cancelled) other_skipped_count = self.tests_total - valid_results_count if other_skipped_count > 0: self.skipped += other_skipped_count @@ -103,6 +105,8 @@ class Result(object): self.warned += 1 elif status == "INTERRUPTED": self.interrupted += 1 + elif status == "CANCEL": + self.cancelled += 1 else: self.errors += 1 self.end_test(state) diff --git a/avocado/core/status.py b/avocado/core/status.py index 266b49c5ca45e73e7ec88ed3d8015f6396618b4e..6b785cae742469adc6e4fc37c2d8c0f40f96fec1 100644 --- a/avocado/core/status.py +++ b/avocado/core/status.py @@ -26,14 +26,16 @@ mapping = {"SKIP": True, "ALERT": False, "RUNNING": False, "NOSTATUS": False, - "INTERRUPTED": False} + "INTERRUPTED": False, + "CANCEL": True} user_facing_status = ["SKIP", "ERROR", "FAIL", "WARN", "PASS", - "INTERRUPTED"] + "INTERRUPTED", + "CANCEL"] feedback = { # Test did not advertise current status, but process running the test is diff --git a/avocado/core/test.py b/avocado/core/test.py index 478e2359bb3e13e44526c0739de8383aa5582216..dd8fe779b3fe3c836bfc7e3600ff58d2c48ce920 100644 --- a/avocado/core/test.py +++ b/avocado/core/test.py @@ -567,6 +567,13 @@ class Test(unittest.TestCase): exceptions.TestSkipError) as details: stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') raise exceptions.TestSkipError(details) + except exceptions.TestCancel as details: + stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') + skip_illegal_msg = ('Calling cancel() in setUp() ' + 'is not allowed in avocado, you ' + 'must fix your test. Original cancel exception: ' + '%s' % details) + raise exceptions.TestError(skip_illegal_msg) except: # Old-style exceptions are not inherited from Exception() stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') details = sys.exc_info()[1] @@ -583,6 +590,9 @@ class Test(unittest.TestCase): except exceptions.TestDecoratorSkip as details: stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') raise exceptions.TestSkipError(details) + except exceptions.TestCancel as details: + stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') + raise except: # Old-style exceptions are not inherited from Exception() stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') details = sys.exc_info()[1] @@ -774,6 +784,21 @@ class Test(unittest.TestCase): """ raise exceptions.TestSetupSkip(message) + def cancel(self, message=None): + """ + Cancels the test. + + This method is expected to be called from the test method, not + anywhere else, since by definition, we can only cancel a test that + is currently under execution. If you call this method outside the + test method, avocado will mark your test status as ERROR, and + instruct you to fix your test in the error message. + + :param message: an optional message that will be recorded in the logs + :type message: str + """ + raise exceptions.TestCancel(message) + def fetch_asset(self, name, asset_hash=None, algorithm='sha1', locations=None, expire=None): """ diff --git a/avocado/plugins/human.py b/avocado/plugins/human.py index 44ef59b16094d83b2a2dd615e8eb23435fca895b..018fc7d3db4d6257013d21a976c6a016884aaf6e 100644 --- a/avocado/plugins/human.py +++ b/avocado/plugins/human.py @@ -35,7 +35,8 @@ class Human(ResultEvents): 'FAIL': output.TERM_SUPPORT.FAIL, 'SKIP': output.TERM_SUPPORT.SKIP, 'WARN': output.TERM_SUPPORT.WARN, - 'INTERRUPTED': output.TERM_SUPPORT.INTERRUPT} + 'INTERRUPTED': output.TERM_SUPPORT.INTERRUPT, + 'CANCEL': output.TERM_SUPPORT.CANCEL} def __init__(self, args): self.log = logging.getLogger("avocado.app") @@ -93,7 +94,8 @@ class Human(ResultEvents): if not self.owns_stdout: return self.log.info("RESULTS : PASS %d | ERROR %d | FAIL %d | SKIP %d | " - "WARN %d | INTERRUPT %s", job.result.passed, + "WARN %d | INTERRUPT %s | CANCEL %s", job.result.passed, job.result.errors, job.result.failed, job.result.skipped, - job.result.warned, job.result.interrupted) + job.result.warned, job.result.interrupted, + job.result.cancelled) self.log.info("TESTS TIME : %.2f s", job.result.tests_total_time) diff --git a/avocado/plugins/jsonresult.py b/avocado/plugins/jsonresult.py index d81dbe8a9a2a2779e7377ba20752891293396482..a8445b53b24748e74bc4c3d88b68940b3215f74a 100644 --- a/avocado/plugins/jsonresult.py +++ b/avocado/plugins/jsonresult.py @@ -62,6 +62,7 @@ class JSONResult(Result): 'errors': result.errors, 'failures': result.failed, 'skip': result.skipped, + 'cancel': result.cancelled, 'time': result.tests_total_time} return json.dumps(content, sort_keys=True, diff --git a/avocado/plugins/tap.py b/avocado/plugins/tap.py index 52f5bb7037de63013d9ba9111efcc0eea65164b1..6203c42e02ae609874bac183d1f9bd78f8916170 100644 --- a/avocado/plugins/tap.py +++ b/avocado/plugins/tap.py @@ -114,6 +114,9 @@ class TAPResult(ResultEvents): self.__write("ok %s %s", result.tests_run, name) elif status == "SKIP": self.__write("ok %s %s # SKIP %s", result.tests_run, name, state.get("fail_reason")) + elif status == "CANCEL": + self.__write("ok %s %s # CANCEL %s", + result.tests_run, name, state.get("fail_reason")) else: self.__write("not ok %s %s", result.tests_run, name) diff --git a/avocado/plugins/xunit.py b/avocado/plugins/xunit.py index e8a174571e0f65bf879eee7194edb5356c1c9093..c71850a77dc3d67f68af545715cf5c67e1b78fba 100644 --- a/avocado/plugins/xunit.py +++ b/avocado/plugins/xunit.py @@ -79,7 +79,7 @@ class XUnitResult(Result): testsuite.setAttribute('tests', self._escape_attr(result.tests_total)) testsuite.setAttribute('errors', self._escape_attr(result.errors + result.interrupted)) testsuite.setAttribute('failures', self._escape_attr(result.failed)) - testsuite.setAttribute('skipped', self._escape_attr(result.skipped)) + testsuite.setAttribute('skipped', self._escape_attr(result.skipped + result.cancelled)) testsuite.setAttribute('time', self._escape_attr(result.tests_total_time)) testsuite.setAttribute('timestamp', self._escape_attr(datetime.datetime.now())) document.appendChild(testsuite) @@ -93,6 +93,8 @@ class XUnitResult(Result): elif status == 'FAIL': element = self._create_failure_or_error(document, test, 'failure') testcase.appendChild(element) + elif status == 'CANCEL': + testcase.appendChild(Element('skipped')) else: element = self._create_failure_or_error(document, test, 'error') testcase.appendChild(element) diff --git a/docs/source/WritingTests.rst b/docs/source/WritingTests.rst index 5ed9c73cd705ba331f15bca7e64bdbbdc8898c90..6a23d29cf4865293c3f525df15de43def419e67d 100644 --- a/docs/source/WritingTests.rst +++ b/docs/source/WritingTests.rst @@ -1034,6 +1034,79 @@ Notice the ``test3`` was not skipped because the provided condition was not ``False``. +Cancelling Tests +================ + +The only supported way to cancel a test and not negatively impact the +job exit status (unlike using `self.fail` or `self.error`) is by using +the `self.cancel()` method. The `self.cancel()` can be called only +from your test methods. Example:: + + #!/usr/bin/env python + + from avocado import Test + from avocado import main + + from avocado.utils.process import run + from avocado.utils.software_manager import SoftwareManager + + + class CancelTest(Test): + + """ + Example tests that cancel the current test from inside the test. + """ + + def setUp(self): + sm = SoftwareManager() + self.pkgs = sm.list_all(software_components=False) + + def test_iperf(self): + if 'iperf-2.0.8-6.fc25.x86_64' not in self.pkgs: + self.cancel('iperf is not installed or wrong version') + self.assertIn('pthreads', + run('iperf -v', ignore_status=True).stderr) + + def test_gcc(self): + if 'gcc-6.3.1-1.fc25.x86_64' not in self.pkgs: + self.cancel('gcc is not installed or wrong version') + self.assertIn('enable-gnu-indirect-function', + run('gcc -v', ignore_status=True).stderr) + + if __name__ == "__main__": + main() + +In a system missing the `iperf` package but with `gcc` installed in +the correct version, the result will be:: + + JOB ID : 39c1f120830b9769b42f5f70b6b7bad0b1b1f09f + JOB LOG : $HOME/avocado/job-results/job-2017-03-10T16.22-39c1f12/job.log + (1/2) /home/apahim/avocado/tests/test_cancel.py:CancelTest.test_iperf: CANCEL (1.15 s) + (2/2) /home/apahim/avocado/tests/test_cancel.py:CancelTest.test_gcc: PASS (1.13 s) + RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1 + TESTS TIME : 2.28 s + JOB HTML : $HOME/avocado/job-results/job-2017-03-10T16.22-39c1f12/html/results.html + +Notice that, since the `setUp()` was already executed, calling the +`self.cancel()` will cancel the rest of the test from that point on, but +the `tearDown()` will still be executed. + +Depending on the result format you're refering to, the `CANCEL` status +is mapped to a corresponding valid status in that format. See the table +below: + ++--------+----------------------+ +| Format | Corresponding Status | ++========+======================+ +| json | cancel | ++--------+----------------------+ +| xunit | skipped | ++--------+----------------------+ +| tap | ok | ++--------+----------------------+ +| html | CANCEL (warning) | ++--------+----------------------+ + Docstring Directives ==================== diff --git a/examples/tests/canceltest.py b/examples/tests/canceltest.py new file mode 100644 index 0000000000000000000000000000000000000000..4962dc1593146ae2fdba5989309f7589fb4ff672 --- /dev/null +++ b/examples/tests/canceltest.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +from avocado import Test +from avocado import main + + +class CancelTest(Test): + + """ + Example test that cancels the current test from inside the test. + """ + + def test(self): + self.cancel("This should end with CANCEL.") + + +if __name__ == "__main__": + main() diff --git a/optional_plugins/html/avocado_result_html/__init__.py b/optional_plugins/html/avocado_result_html/__init__.py index 3bd44063a028fcf048c89787778230da9aef99eb..fb163543b5ad6a6137d441940df0e8eae48ef4b9 100644 --- a/optional_plugins/html/avocado_result_html/__init__.py +++ b/optional_plugins/html/avocado_result_html/__init__.py @@ -113,7 +113,8 @@ class ReportModel(object): "ALERT": "danger", "RUNNING": "info", "NOSTATUS": "info", - "INTERRUPTED": "danger"} + "INTERRUPTED": "danger", + "CANCEL": "warning"} test_info = [] results_dir = self.results_dir(False) for tst in self.result.tests: diff --git a/selftests/functional/test_canceltests.py b/selftests/functional/test_canceltests.py new file mode 100644 index 0000000000000000000000000000000000000000..ea5c3c87bdc48ab8b339bb49e490412f4c6da2e4 --- /dev/null +++ b/selftests/functional/test_canceltests.py @@ -0,0 +1,86 @@ +import json +import os +import shutil +import tempfile +import unittest + +from avocado.core import exit_codes +from avocado.utils import process +from avocado.utils import script + +basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..') +basedir = os.path.abspath(basedir) + +AVOCADO_TEST_CANCEL = """ +import avocado + +class AvocadoCancelTest(avocado.Test): + + def test(self): + self.cancel() +""" + +AVOCADO_TEST_CANCEL_ON_SETUP = """ +import avocado + +class AvocadoCancelTest(avocado.Test): + + def setUp(self): + self.cancel() + + def test(self): + pass +""" + + +class TestCancel(unittest.TestCase): + + def setUp(self): + os.chdir(basedir) + self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) + + test_path = os.path.join(self.tmpdir, 'test_cancel.py') + self.test_cancel = script.Script(test_path, + AVOCADO_TEST_CANCEL) + self.test_cancel.save() + + test_path = os.path.join(self.tmpdir, 'test_cancel_on_setup.py') + self.test_cancel_on_setup = script.Script(test_path, + AVOCADO_TEST_CANCEL_ON_SETUP) + self.test_cancel_on_setup.save() + + def test_cancel(self): + os.chdir(basedir) + cmd_line = ['./scripts/avocado', + 'run', + '--sysinfo=off', + '--job-results-dir', + '%s' % self.tmpdir, + '%s' % self.test_cancel, + '--json -'] + result = process.run(' '.join(cmd_line), ignore_status=True) + json_results = json.loads(result.stdout) + self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) + self.assertEqual(json_results['cancel'], 1) + + def test_cancel_on_setup(self): + os.chdir(basedir) + cmd_line = ['./scripts/avocado', + 'run', + '--sysinfo=off', + '--job-results-dir', + '%s' % self.tmpdir, + '%s' % self.test_cancel_on_setup, + '--json -'] + result = process.run(' '.join(cmd_line), ignore_status=True) + json_results = json.loads(result.stdout) + self.assertEqual(result.exit_status, exit_codes.AVOCADO_TESTS_FAIL) + self.assertEqual(json_results['cancel'], 0) + self.assertEqual(json_results['errors'], 1) + + def tearDown(self): + shutil.rmtree(self.tmpdir) + + +if __name__ == '__main__': + unittest.main()