提交 1ded91b9 编写于 作者: L Lucas Meneghel Rodrigues

Merge pull request #692 from ldoktor/fail2error3

core.test: Map generic exceptions to TestError [v2]
......@@ -35,6 +35,8 @@ from . import data_dir
from . import sysinfo
from . import exceptions
from . import multiplexer
from . import status
from .settings import settings
from .version import VERSION
from ..utils import genio
from ..utils import path as utils_path
......@@ -444,7 +446,16 @@ class Test(unittest.TestCase):
self.fail_reason = detail
self.traceback = stacktrace.prepare_exc_info(sys.exc_info())
except Exception, detail:
self.status = 'FAIL'
stat = settings.get_value("runner.behavior",
"uncaught_exception_result",
default="ERROR")
if stat not in status.mapping:
stacktrace.log_message("Incorrect runner.behavior.generic_"
"exception_result value '%s', using "
"'ERROR' instead." % stat,
"avocado.test")
stat = "ERROR"
self.status = stat
tb_info = stacktrace.tb_info(sys.exc_info())
self.traceback = stacktrace.prepare_exc_info(sys.exc_info())
try:
......
......@@ -35,14 +35,18 @@ and match those in a single job.
Instrumented
------------
These are tests written in Python that use the Avocado test API.
These are tests written in Python or BASH with the Avocado helpers that use the Avocado test API.
To be more precise, the Python file must contain a class derived from :mod:`avocado.test.Test`.
This means that an executable written in Python is not always an instrumented test, but may work
as a simple test.
By the way, the term instrumented is used because the Avocado Python test classes allow you to
get more features for your test, such as logging facilities and more sophisticated test APIs.
The instrumented tests allows the writer finer control over the process
including logging, test result status and other more sophisticated test APIs.
Test statuses ``PASS``, ``WARN``, ``START`` and ``TEST_NA`` are considered as
successful builds. The ``ABORT``, ``ERROR``, ``FAIL``, ``ALERT``, ``RUNNING``,
``NOSTATUS`` and ``INTERRUPTED`` are considered as failed ones.
Simple
------
......
......@@ -33,6 +33,8 @@ utf8 =
[runner.behavior]
# Keep job temporary files after jobs (useful for avocado debugging)
keep_tmp_files = False
# Overrides the test result in case of uncaught exception (FAIL, ERROR, ...)
uncaught_exception_result = ERROR
[job.output]
# Base log level for --show-job-log.
......
#!/usr/bin/python
from avocado import Test
from avocado import main
class ErrorTest(Test):
"""
Example test that raises generic exception
"""
def test(self):
"""
This should end with ERROR (on default config)
"""
raise Exception("This is a generic exception")
if __name__ == "__main__":
main()
......@@ -115,6 +115,17 @@ class RunnerOperationTest(unittest.TestCase):
output,
"Test did not fail with action exception:\n%s" % output)
def test_uncaught_exception(self):
os.chdir(basedir)
cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
"--json - uncaught_exception" % self.tmpdir)
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc,
result))
self.assertIn('"status": "ERROR"', result.stdout)
def test_runner_timeout(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - timeouttest' % self.tmpdir
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册