提交 cd79862e 编写于 作者: C Cleber Rosa

Merge remote-tracking branch 'lmr/refine-test-results-v2'

......@@ -13,9 +13,10 @@
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
__all__ = ['main', 'Test', 'VERSION']
__all__ = ['main', 'Test', 'VERSION', 'fail_on_error']
from avocado.core.job import main
from avocado.core.test import Test
from avocado.core.version import VERSION
from avocado.core.exceptions import fail_on_error
......@@ -17,6 +17,29 @@ Exception classes, useful for tests, and other parts of the framework code.
"""
def fail_on_error(fn):
"""
Apply to any test you want to FAIL upon any exception raised.
Normally only TestFail called explicitly will mark an avocado test with the
FAIL state, but this decorator is provided as a convenience for people
that need a more relaxed behavior.
:param fn: Function that will be decorated
"""
def new_fn(*args, **kwargs):
try:
return fn(*args, **kwargs)
except TestBaseException:
raise
except Exception, e:
raise TestFail(str(e))
new_fn.__name__ = fn.__name__
new_fn.__doc__ = fn.__doc__
new_fn.__dict__.update(fn.__dict__)
return new_fn
class JobBaseException(Exception):
"""
......
......@@ -35,8 +35,6 @@ from . import data_dir
from . import sysinfo
from . import exceptions
from . import multiplexer
from . import status
from .settings import settings
from .version import VERSION
from ..utils import genio
from ..utils import path as utils_path
......@@ -361,12 +359,26 @@ class Test(unittest.TestCase):
raise exceptions.TestSetupFail(details)
try:
testMethod()
except exceptions.TestNAError, details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
skip_illegal_msg = ('Calling skip() in places other than '
'setUp() is not allowed in avocado, you '
'must fix your test. Original skip exception: '
'%s' % details)
raise exceptions.TestError(skip_illegal_msg)
except Exception, details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
test_exception = details
finally:
try:
self.tearDown()
except exceptions.TestNAError, details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
skip_illegal_msg = ('Calling skip() in places other than '
'setUp() is not allowed in avocado, '
'you must fix your test. Original skip '
'exception: %s' % details)
raise exceptions.TestError(skip_illegal_msg)
except Exception, details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
cleanup_exception = details
......@@ -451,16 +463,7 @@ class Test(unittest.TestCase):
self.fail_reason = detail
self.traceback = stacktrace.prepare_exc_info(sys.exc_info())
except Exception, detail:
stat = settings.get_value("runner.behavior",
"uncaught_exception_result",
default="ERROR")
if stat not in status.mapping:
stacktrace.log_message("Incorrect runner.behavior.generic_"
"exception_result value '%s', using "
"'ERROR' instead." % stat,
"avocado.test")
stat = "ERROR"
self.status = stat
self.status = 'ERROR'
tb_info = stacktrace.tb_info(sys.exc_info())
self.traceback = stacktrace.prepare_exc_info(sys.exc_info())
try:
......@@ -522,7 +525,13 @@ class Test(unittest.TestCase):
def skip(self, message=None):
"""
Skips the currently running test
Skips the currently running test.
This method should only be called from a test's setUp() method, not
anywhere else, since by definition, if a test gets to be executed, it
can't be skipped anymore. If you call this method outside setUp(),
avocado will mark your test status as ERROR, and instruct you to
fix your test in the error message.
:param message: an optional message that will be recorded in the logs
:type message: str
......@@ -636,6 +645,9 @@ class TimeOutSkipTest(Test):
It will never have a chance to execute.
"""
def test(self):
def setUp(self):
e_msg = 'Test skipped due a job timeout!'
raise exceptions.TestNAError(e_msg)
def test(self):
pass
......@@ -113,7 +113,6 @@ one is that, you may use ``avocado config --datadir``). The output looks like::
INSTRUMENTED /usr/share/avocado/tests/linuxbuild.py
INSTRUMENTED /usr/share/avocado/tests/multiplextest.py
INSTRUMENTED /usr/share/avocado/tests/passtest.py
INSTRUMENTED /usr/share/avocado/tests/skiptest.py
INSTRUMENTED /usr/share/avocado/tests/sleeptenmin.py
INSTRUMENTED /usr/share/avocado/tests/sleeptest.py
INSTRUMENTED /usr/share/avocado/tests/synctest.py
......
......@@ -54,6 +54,21 @@ Simple
Any executable in your box. The criteria for PASS/FAIL is the return code of the executable.
If it returns 0, the test PASSes, if it returns anything else, it FAILs.
Test Statuses
=============
Avocado sticks to the following definitions of test statuses:
* ```PASS```: The test passed, which means all conditions being tested have passed.
* ```FAIL```: The test failed, which means at least one condition being tested has
failed. Ideally, it should mean a problem in the software being tested has been found.
* ```ERROR```: An error happened during the test execution. This can happen, for example,
if there's a bug in the test runner, in its libraries or if a resource breaks unexpectedly.
Uncaught exceptions in the test code will also result in this status.
* ```SKIP```: The test runner decided a requested test should not be run. This
can happen, for example, due to missing requirements in the test environment
or when there's a job timeout.
.. _libraries-apis:
Libraries and APIs
......
......@@ -337,14 +337,21 @@ an example that does that::
if __name__ == "__main__":
main()
Here we have an example of the ``setup`` method in action: Here we get the
Here we have an example of the ``setUp`` method in action: Here we get the
location of the test suite code (tarball) through
:func:`avocado.Test.get_data_path`, then uncompress the tarball through
:func:`avocado.utils.archive.extract`, an API that will
decompress the suite tarball, followed by ``build.make``, that will build the
suite.
In this example, the ``action`` method just gets into the base directory of
The ``setUp`` method is the only place in avocado where you are allowed to
call the ``skip`` method, given that, if a test started to be executed, by
definition it can't be skipped anymore. Avocado will do its best to enforce
this boundary, so that if you use ``skip`` outside ``setUp``, the test upon
execution will be marked with the ``ERROR`` status, and the error message
will instruct you to fix your test's code.
In this example, the ``test`` method just gets into the base directory of
the compiled suite and executes the ``./synctest`` command, with appropriate
parameters, using :func:`avocado.utils.process.system`.
......
......@@ -33,8 +33,6 @@ utf8 =
[runner.behavior]
# Keep job temporary files after jobs (useful for avocado debugging)
keep_tmp_files = False
# Overrides the test result in case of uncaught exception (FAIL, ERROR, ...)
uncaught_exception_result = ERROR
[job.output]
# Base log level for --show-job-log.
......
#!/usr/bin/python
import avocado
class FailOnError(avocado.Test):
"""
Test illustrating the behavior of the fail_on_error decorator.
"""
@avocado.fail_on_error
def test(self):
"""
This should end with FAIL.
Avocado tests should end with ERROR when a generic exception such as
ValueError is raised. The avocado.fail_on_error decorator allows you
to override this behavior, and turn your generic exceptions into
errors.
"""
raise ValueError('This raises a ValueError and should end as a FAIL')
if __name__ == "__main__":
avocado.main()
#!/usr/bin/python
import avocado
class SkipOutsideSetup(avocado.Test):
"""
Test illustrating the behavior of calling skip() outside setUp().
"""
def test(self):
"""
This should end with ERROR.
The method skip() can only be called from inside setUp(). If called
outside of that method, the test status will be marked as ERROR, with
a reason message that asks you to fix your test.
"""
self.skip('Calling skip() outside setUp() will result in ERROR')
if __name__ == "__main__":
avocado.main()
#!/usr/bin/python
from avocado import Test
from avocado import main
class SkipTest(Test):
"""
Example test that skips the current test, that is it, ends with SKIP.
"""
def test(self):
"""
This should end with SKIP.
"""
self.skip('This should end with SKIP.')
if __name__ == "__main__":
main()
......@@ -12,7 +12,7 @@ class ErrorTest(Test):
def test(self):
"""
This should end with ERROR (on default config)
This should end with ERROR.
"""
raise Exception("This is a generic exception")
......
......@@ -126,6 +126,17 @@ class RunnerOperationTest(unittest.TestCase):
result))
self.assertIn('"status": "ERROR"', result.stdout)
def test_fail_on_error(self):
os.chdir(basedir)
cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
"--json - fail_on_error" % self.tmpdir)
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc,
result))
self.assertIn('"status": "FAIL"', result.stdout)
def test_runner_timeout(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - timeouttest' % self.tmpdir
......@@ -193,7 +204,7 @@ class RunnerOperationTest(unittest.TestCase):
self.assertNotIn('File not found', result.stdout)
def test_invalid_unique_id(self):
cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar skiptest'
cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar passtest'
result = process.run(cmd_line, ignore_status=True)
self.assertNotEqual(0, result.exit_status)
self.assertIn('needs to be a 40 digit hex', result.stderr)
......@@ -201,20 +212,31 @@ class RunnerOperationTest(unittest.TestCase):
def test_valid_unique_id(self):
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 skiptest' % self.tmpdir)
'--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 passtest' % self.tmpdir)
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(0, result.exit_status)
self.assertNotIn('needs to be a 40 digit hex', result.stderr)
self.assertIn('SKIP', result.stdout)
self.assertIn('PASS', result.stdout)
def test_automatic_unique_id(self):
cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off skiptest --json -' % self.tmpdir
cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off passtest --json -' % self.tmpdir
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(0, result.exit_status)
r = json.loads(result.stdout)
int(r['job_id'], 16) # it's an hex number
self.assertEqual(len(r['job_id']), 40)
def test_skip_outside_setup(self):
os.chdir(basedir)
cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
"--json - skip_outside_setup" % self.tmpdir)
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc,
result))
self.assertIn('"status": "ERROR"', result.stdout)
def tearDown(self):
shutil.rmtree(self.tmpdir)
......@@ -499,9 +521,6 @@ class PluginsXunitTest(PluginsTest):
def test_xunit_plugin_failtest(self):
self.run_and_check('failtest', 1, 1, 0, 0, 1, 0)
def test_xunit_plugin_skiptest(self):
self.run_and_check('skiptest', 0, 1, 0, 0, 0, 1)
def test_xunit_plugin_skiponsetuptest(self):
self.run_and_check('skiponsetup', 0, 1, 0, 0, 0, 1)
......@@ -560,9 +579,6 @@ class PluginsJSONTest(PluginsTest):
def test_json_plugin_failtest(self):
self.run_and_check('failtest', 1, 1, 0, 1, 0)
def test_json_plugin_skiptest(self):
self.run_and_check('skiptest', 0, 1, 0, 0, 1)
def test_json_plugin_skiponsetuptest(self):
self.run_and_check('skiponsetup', 0, 1, 0, 0, 1)
......
......@@ -37,11 +37,6 @@ class StandaloneTests(unittest.TestCase):
expected_rc = 0
self.run_and_check(cmd_line, expected_rc, 'passtest')
def test_skiptest(self):
cmd_line = './examples/tests/skiptest.py -r'
expected_rc = 0
self.run_and_check(cmd_line, expected_rc, 'skiptest')
def test_warntest(self):
cmd_line = './examples/tests/warntest.py -r'
expected_rc = 0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册