提交 28817ab3 编写于 作者: A Amador Pahim

Introduce the CANCEL status and the self.cancel()

We have received many requests to support skip tests from inside the
test method. As we can do that without breaking our own concepts, we
decided to introduce the CANCEL status and the corresponding
self.cancel() method to the Test class.

Reference: https://trello.com/c/viBJIEwISigned-off-by: NAmador Pahim <apahim@redhat.com>
上级 74fa60cc
...@@ -177,3 +177,12 @@ class TestWarn(TestBaseException): ...@@ -177,3 +177,12 @@ class TestWarn(TestBaseException):
failure. failure.
""" """
status = "WARN" status = "WARN"
class TestCancel(TestBaseException):
"""
Indicates that a test was cancelled.
Should be thrown when the cancel() test method is used.
"""
status = "CANCEL"
...@@ -78,6 +78,7 @@ class TermSupport(object): ...@@ -78,6 +78,7 @@ class TermSupport(object):
self.INTERRUPT = self.COLOR_RED self.INTERRUPT = self.COLOR_RED
self.ERROR = self.COLOR_RED self.ERROR = self.COLOR_RED
self.WARN = self.COLOR_YELLOW self.WARN = self.COLOR_YELLOW
self.CANCEL = self.COLOR_YELLOW
self.PARTIAL = self.COLOR_YELLOW self.PARTIAL = self.COLOR_YELLOW
self.ENDC = self.CONTROL_END self.ENDC = self.CONTROL_END
self.LOWLIGHT = self.COLOR_DARKGREY self.LOWLIGHT = self.COLOR_DARKGREY
......
...@@ -41,6 +41,7 @@ class Result(object): ...@@ -41,6 +41,7 @@ class Result(object):
self.skipped = 0 self.skipped = 0
self.warned = 0 self.warned = 0
self.interrupted = 0 self.interrupted = 0
self.cancelled = 0
self.tests = [] self.tests = []
def _reconcile(self): def _reconcile(self):
...@@ -53,7 +54,8 @@ class Result(object): ...@@ -53,7 +54,8 @@ class Result(object):
""" """
valid_results_count = (self.passed + self.errors + valid_results_count = (self.passed + self.errors +
self.failed + self.warned + self.failed + self.warned +
self.skipped + self.interrupted) self.skipped + self.interrupted +
self.cancelled)
other_skipped_count = self.tests_total - valid_results_count other_skipped_count = self.tests_total - valid_results_count
if other_skipped_count > 0: if other_skipped_count > 0:
self.skipped += other_skipped_count self.skipped += other_skipped_count
...@@ -103,6 +105,8 @@ class Result(object): ...@@ -103,6 +105,8 @@ class Result(object):
self.warned += 1 self.warned += 1
elif status == "INTERRUPTED": elif status == "INTERRUPTED":
self.interrupted += 1 self.interrupted += 1
elif status == "CANCEL":
self.cancelled += 1
else: else:
self.errors += 1 self.errors += 1
self.end_test(state) self.end_test(state)
...@@ -26,14 +26,16 @@ mapping = {"SKIP": True, ...@@ -26,14 +26,16 @@ mapping = {"SKIP": True,
"ALERT": False, "ALERT": False,
"RUNNING": False, "RUNNING": False,
"NOSTATUS": False, "NOSTATUS": False,
"INTERRUPTED": False} "INTERRUPTED": False,
"CANCEL": True}
user_facing_status = ["SKIP", user_facing_status = ["SKIP",
"ERROR", "ERROR",
"FAIL", "FAIL",
"WARN", "WARN",
"PASS", "PASS",
"INTERRUPTED"] "INTERRUPTED",
"CANCEL"]
feedback = { feedback = {
# Test did not advertise current status, but process running the test is # Test did not advertise current status, but process running the test is
......
...@@ -567,6 +567,13 @@ class Test(unittest.TestCase): ...@@ -567,6 +567,13 @@ class Test(unittest.TestCase):
exceptions.TestSkipError) as details: exceptions.TestSkipError) as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
raise exceptions.TestSkipError(details) raise exceptions.TestSkipError(details)
except exceptions.TestCancel as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
skip_illegal_msg = ('Calling cancel() in setUp() '
'is not allowed in avocado, you '
'must fix your test. Original cancel exception: '
'%s' % details)
raise exceptions.TestError(skip_illegal_msg)
except: # Old-style exceptions are not inherited from Exception() except: # Old-style exceptions are not inherited from Exception()
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
details = sys.exc_info()[1] details = sys.exc_info()[1]
...@@ -583,6 +590,9 @@ class Test(unittest.TestCase): ...@@ -583,6 +590,9 @@ class Test(unittest.TestCase):
except exceptions.TestDecoratorSkip as details: except exceptions.TestDecoratorSkip as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
raise exceptions.TestSkipError(details) raise exceptions.TestSkipError(details)
except exceptions.TestCancel as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
raise
except: # Old-style exceptions are not inherited from Exception() except: # Old-style exceptions are not inherited from Exception()
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test') stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
details = sys.exc_info()[1] details = sys.exc_info()[1]
...@@ -774,6 +784,21 @@ class Test(unittest.TestCase): ...@@ -774,6 +784,21 @@ class Test(unittest.TestCase):
""" """
raise exceptions.TestSetupSkip(message) raise exceptions.TestSetupSkip(message)
def cancel(self, message=None):
"""
Cancels the test.
This method is expected to be called from the test method, not
anywhere else, since by definition, we can only cancel a test that
is currently under execution. If you call this method outside the
test method, avocado will mark your test status as ERROR, and
instruct you to fix your test in the error message.
:param message: an optional message that will be recorded in the logs
:type message: str
"""
raise exceptions.TestCancel(message)
def fetch_asset(self, name, asset_hash=None, algorithm='sha1', def fetch_asset(self, name, asset_hash=None, algorithm='sha1',
locations=None, expire=None): locations=None, expire=None):
""" """
......
...@@ -35,7 +35,8 @@ class Human(ResultEvents): ...@@ -35,7 +35,8 @@ class Human(ResultEvents):
'FAIL': output.TERM_SUPPORT.FAIL, 'FAIL': output.TERM_SUPPORT.FAIL,
'SKIP': output.TERM_SUPPORT.SKIP, 'SKIP': output.TERM_SUPPORT.SKIP,
'WARN': output.TERM_SUPPORT.WARN, 'WARN': output.TERM_SUPPORT.WARN,
'INTERRUPTED': output.TERM_SUPPORT.INTERRUPT} 'INTERRUPTED': output.TERM_SUPPORT.INTERRUPT,
'CANCEL': output.TERM_SUPPORT.CANCEL}
def __init__(self, args): def __init__(self, args):
self.log = logging.getLogger("avocado.app") self.log = logging.getLogger("avocado.app")
...@@ -93,7 +94,8 @@ class Human(ResultEvents): ...@@ -93,7 +94,8 @@ class Human(ResultEvents):
if not self.owns_stdout: if not self.owns_stdout:
return return
self.log.info("RESULTS : PASS %d | ERROR %d | FAIL %d | SKIP %d | " self.log.info("RESULTS : PASS %d | ERROR %d | FAIL %d | SKIP %d | "
"WARN %d | INTERRUPT %s", job.result.passed, "WARN %d | INTERRUPT %s | CANCEL %s", job.result.passed,
job.result.errors, job.result.failed, job.result.skipped, job.result.errors, job.result.failed, job.result.skipped,
job.result.warned, job.result.interrupted) job.result.warned, job.result.interrupted,
job.result.cancelled)
self.log.info("TESTS TIME : %.2f s", job.result.tests_total_time) self.log.info("TESTS TIME : %.2f s", job.result.tests_total_time)
...@@ -62,6 +62,7 @@ class JSONResult(Result): ...@@ -62,6 +62,7 @@ class JSONResult(Result):
'errors': result.errors, 'errors': result.errors,
'failures': result.failed, 'failures': result.failed,
'skip': result.skipped, 'skip': result.skipped,
'cancel': result.cancelled,
'time': result.tests_total_time} 'time': result.tests_total_time}
return json.dumps(content, return json.dumps(content,
sort_keys=True, sort_keys=True,
......
...@@ -114,6 +114,9 @@ class TAPResult(ResultEvents): ...@@ -114,6 +114,9 @@ class TAPResult(ResultEvents):
self.__write("ok %s %s", result.tests_run, name) self.__write("ok %s %s", result.tests_run, name)
elif status == "SKIP": elif status == "SKIP":
self.__write("ok %s %s # SKIP %s", result.tests_run, name, state.get("fail_reason")) self.__write("ok %s %s # SKIP %s", result.tests_run, name, state.get("fail_reason"))
elif status == "CANCEL":
self.__write("ok %s %s # CANCEL %s",
result.tests_run, name, state.get("fail_reason"))
else: else:
self.__write("not ok %s %s", result.tests_run, name) self.__write("not ok %s %s", result.tests_run, name)
......
...@@ -79,7 +79,7 @@ class XUnitResult(Result): ...@@ -79,7 +79,7 @@ class XUnitResult(Result):
testsuite.setAttribute('tests', self._escape_attr(result.tests_total)) testsuite.setAttribute('tests', self._escape_attr(result.tests_total))
testsuite.setAttribute('errors', self._escape_attr(result.errors + result.interrupted)) testsuite.setAttribute('errors', self._escape_attr(result.errors + result.interrupted))
testsuite.setAttribute('failures', self._escape_attr(result.failed)) testsuite.setAttribute('failures', self._escape_attr(result.failed))
testsuite.setAttribute('skipped', self._escape_attr(result.skipped)) testsuite.setAttribute('skipped', self._escape_attr(result.skipped + result.cancelled))
testsuite.setAttribute('time', self._escape_attr(result.tests_total_time)) testsuite.setAttribute('time', self._escape_attr(result.tests_total_time))
testsuite.setAttribute('timestamp', self._escape_attr(datetime.datetime.now())) testsuite.setAttribute('timestamp', self._escape_attr(datetime.datetime.now()))
document.appendChild(testsuite) document.appendChild(testsuite)
...@@ -93,6 +93,8 @@ class XUnitResult(Result): ...@@ -93,6 +93,8 @@ class XUnitResult(Result):
elif status == 'FAIL': elif status == 'FAIL':
element = self._create_failure_or_error(document, test, 'failure') element = self._create_failure_or_error(document, test, 'failure')
testcase.appendChild(element) testcase.appendChild(element)
elif status == 'CANCEL':
testcase.appendChild(Element('skipped'))
else: else:
element = self._create_failure_or_error(document, test, 'error') element = self._create_failure_or_error(document, test, 'error')
testcase.appendChild(element) testcase.appendChild(element)
......
...@@ -1034,6 +1034,79 @@ Notice the ``test3`` was not skipped because the provided condition was ...@@ -1034,6 +1034,79 @@ Notice the ``test3`` was not skipped because the provided condition was
not ``False``. not ``False``.
Cancelling Tests
================
The only supported way to cancel a test and not negatively impact the
job exit status (unlike using `self.fail` or `self.error`) is by using
the `self.cancel()` method. The `self.cancel()` can be called only
from your test methods. Example::
#!/usr/bin/env python
from avocado import Test
from avocado import main
from avocado.utils.process import run
from avocado.utils.software_manager import SoftwareManager
class CancelTest(Test):
"""
Example tests that cancel the current test from inside the test.
"""
def setUp(self):
sm = SoftwareManager()
self.pkgs = sm.list_all(software_components=False)
def test_iperf(self):
if 'iperf-2.0.8-6.fc25.x86_64' not in self.pkgs:
self.cancel('iperf is not installed or wrong version')
self.assertIn('pthreads',
run('iperf -v', ignore_status=True).stderr)
def test_gcc(self):
if 'gcc-6.3.1-1.fc25.x86_64' not in self.pkgs:
self.cancel('gcc is not installed or wrong version')
self.assertIn('enable-gnu-indirect-function',
run('gcc -v', ignore_status=True).stderr)
if __name__ == "__main__":
main()
In a system missing the `iperf` package but with `gcc` installed in
the correct version, the result will be::
JOB ID : 39c1f120830b9769b42f5f70b6b7bad0b1b1f09f
JOB LOG : $HOME/avocado/job-results/job-2017-03-10T16.22-39c1f12/job.log
(1/2) /home/apahim/avocado/tests/test_cancel.py:CancelTest.test_iperf: CANCEL (1.15 s)
(2/2) /home/apahim/avocado/tests/test_cancel.py:CancelTest.test_gcc: PASS (1.13 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1
TESTS TIME : 2.28 s
JOB HTML : $HOME/avocado/job-results/job-2017-03-10T16.22-39c1f12/html/results.html
Notice that, since the `setUp()` was already executed, calling the
`self.cancel()` will cancel the rest of the test from that point on, but
the `tearDown()` will still be executed.
Depending on the result format you're refering to, the `CANCEL` status
is mapped to a corresponding valid status in that format. See the table
below:
+--------+----------------------+
| Format | Corresponding Status |
+========+======================+
| json | cancel |
+--------+----------------------+
| xunit | skipped |
+--------+----------------------+
| tap | ok |
+--------+----------------------+
| html | CANCEL (warning) |
+--------+----------------------+
Docstring Directives Docstring Directives
==================== ====================
......
#!/usr/bin/env python
from avocado import Test
from avocado import main
class CancelTest(Test):
"""
Example test that cancels the current test from inside the test.
"""
def test(self):
self.cancel("This should end with CANCEL.")
if __name__ == "__main__":
main()
...@@ -113,7 +113,8 @@ class ReportModel(object): ...@@ -113,7 +113,8 @@ class ReportModel(object):
"ALERT": "danger", "ALERT": "danger",
"RUNNING": "info", "RUNNING": "info",
"NOSTATUS": "info", "NOSTATUS": "info",
"INTERRUPTED": "danger"} "INTERRUPTED": "danger",
"CANCEL": "warning"}
test_info = [] test_info = []
results_dir = self.results_dir(False) results_dir = self.results_dir(False)
for tst in self.result.tests: for tst in self.result.tests:
......
import json
import os
import shutil
import tempfile
import unittest
from avocado.core import exit_codes
from avocado.utils import process
from avocado.utils import script
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO_TEST_CANCEL = """
import avocado
class AvocadoCancelTest(avocado.Test):
def test(self):
self.cancel()
"""
AVOCADO_TEST_CANCEL_ON_SETUP = """
import avocado
class AvocadoCancelTest(avocado.Test):
def setUp(self):
self.cancel()
def test(self):
pass
"""
class TestCancel(unittest.TestCase):
def setUp(self):
os.chdir(basedir)
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
test_path = os.path.join(self.tmpdir, 'test_cancel.py')
self.test_cancel = script.Script(test_path,
AVOCADO_TEST_CANCEL)
self.test_cancel.save()
test_path = os.path.join(self.tmpdir, 'test_cancel_on_setup.py')
self.test_cancel_on_setup = script.Script(test_path,
AVOCADO_TEST_CANCEL_ON_SETUP)
self.test_cancel_on_setup.save()
def test_cancel(self):
os.chdir(basedir)
cmd_line = ['./scripts/avocado',
'run',
'--sysinfo=off',
'--job-results-dir',
'%s' % self.tmpdir,
'%s' % self.test_cancel,
'--json -']
result = process.run(' '.join(cmd_line), ignore_status=True)
json_results = json.loads(result.stdout)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertEqual(json_results['cancel'], 1)
def test_cancel_on_setup(self):
os.chdir(basedir)
cmd_line = ['./scripts/avocado',
'run',
'--sysinfo=off',
'--job-results-dir',
'%s' % self.tmpdir,
'%s' % self.test_cancel_on_setup,
'--json -']
result = process.run(' '.join(cmd_line), ignore_status=True)
json_results = json.loads(result.stdout)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
self.assertEqual(json_results['cancel'], 0)
self.assertEqual(json_results['errors'], 1)
def tearDown(self):
shutil.rmtree(self.tmpdir)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册