未验证 提交 3cd6036f 编写于 作者: L Lukáš Doktor

Merging pull request 1883

Signed-off-by: NLukáš Doktor <ldoktor@redhat.com>

* https://github.com/avocado-framework/avocado:
  Deprecate message in self.skip()
  Accept self.cancel() in setUp() and tearDown()
  Don't execute setUp()/tearDown() on SKIP
......@@ -65,6 +65,7 @@ def skip(message=None):
def wrapper(*args, **kwargs):
raise core_exceptions.TestDecoratorSkip(message)
function = wrapper
function.__skip_test_decorator__ = True
return function
return decorator
......
......@@ -559,26 +559,29 @@ class Test(unittest.TestCase):
cleanup_exception = None
stdout_check_exception = None
stderr_check_exception = None
skip_test = getattr(testMethod, '__skip_test_decorator__', False)
cancel_test = False
try:
self.setUp()
if skip_test is False:
self.setUp()
except (exceptions.TestSetupSkip,
exceptions.TestDecoratorSkip,
exceptions.TestTimeoutSkip,
exceptions.TestSkipError) as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
raise exceptions.TestSkipError(details)
except exceptions.TestCancel as details:
except exceptions.TestDecoratorSkip as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
skip_illegal_msg = ('Calling cancel() in setUp() '
'is not allowed in avocado, you '
'must fix your test. Original cancel exception: '
'%s' % details)
raise exceptions.TestError(skip_illegal_msg)
raise exceptions.TestSkipError(details)
except exceptions.TestCancel as details:
cancel_test = details
except: # Old-style exceptions are not inherited from Exception()
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
details = sys.exc_info()[1]
raise exceptions.TestSetupFail(details)
try:
if cancel_test:
raise exceptions.TestCancel(cancel_test)
testMethod()
except exceptions.TestSetupSkip as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
......@@ -605,7 +608,8 @@ class Test(unittest.TestCase):
self.log.debug(' -> %s %s: %s', key, type(value), value)
finally:
try:
self.tearDown()
if skip_test is False:
self.tearDown()
except exceptions.TestSetupSkip as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
skip_illegal_msg = ('Calling skip() in places other than '
......@@ -615,11 +619,15 @@ class Test(unittest.TestCase):
raise exceptions.TestError(skip_illegal_msg)
except exceptions.TestDecoratorSkip as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
skip_illegal_msg = ('Using skip decorators after the test '
'will have no effect, you must fix your '
skip_illegal_msg = ('Using skip decorators in tearDown() '
'is not allowed in '
'avocado, you must fix your '
'test. Original skip exception: %s' %
details)
raise exceptions.TestError(skip_illegal_msg)
except exceptions.TestCancel as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
raise
except: # avoid old-style exception failures
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
details = sys.exc_info()[1]
......@@ -782,7 +790,9 @@ class Test(unittest.TestCase):
:param message: an optional message that will be recorded in the logs
:type message: str
"""
raise exceptions.TestSetupSkip(message)
dep_msg = "WARNING: self.skip() will be deprecated. " \
"Use 'self.cancel()' or the skip decorators"
raise exceptions.TestSetupSkip("[%s] %s" % (dep_msg, message))
def cancel(self, message=None):
"""
......
......@@ -76,7 +76,10 @@ Avocado supports the most common exit statuses:
be nice to review. (some result plugins does not support this and report
``PASS`` instead)
* ``SKIP`` - the test's pre-requisites were not satisfied and the test's
body was not executed (nor its ``tearDown``)
body was not executed (nor its ``setUp()`` and ``tearDown``).
* ``CANCEL`` - the test was canceled somewhere during the `setUp()`, the
test method or the `tearDown()`. The ``setUp()`` and ``tearDown``
methods are executed.
* ``FAIL`` - test did not result in the expected outcome. A failure points
at a (possible) bug in the tested subject, and not in the test itself.
When the test (and its) execution breaks, an ``ERROR`` and not a ``FAIL``
......@@ -957,6 +960,10 @@ Avocado offers some options for the test writers to skip a test:
Test ``skip()`` Method
----------------------
.. warning:: `self.skip()` will be deprecated at the end of 2017.
Please adjust your tests to use the `self.cancel()` or the skip
decorators instead.
Using the ``skip()`` method available in the Test API is only allowed
inside the ``setUp()`` method. Calling ``skip()`` from inside the test is not
allowed as, by concept, you cannot skip a test after it's already initiated.
......@@ -1036,17 +1043,17 @@ Will produce the following result::
Notice the ``test3`` was not skipped because the provided condition was
not ``False``.
Using the skip decorators, since the `setUp()` was already executed, the
`tearDown()` will be also executed.
Using the skip decorators, nothing is actually executed. We will skip
the `setUp()` method, the test method and the `tearDown()` method.
Cancelling Tests
================
The only supported way to cancel a test and not negatively impact the
job exit status (unlike using `self.fail` or `self.error`) is by using
the `self.cancel()` method. The `self.cancel()` can be called only
from your test methods. Example::
You can cancel a test calling `self.cancel()` at any phase of the test
(`setUp()`, test method or `tearDown()`). Test will finish with `CANCEL`
status and will not make the Job to exit with a non-0 status. Example::
#!/usr/bin/env python
......@@ -1093,11 +1100,10 @@ the correct version, the result will be::
TESTS TIME : 2.28 s
JOB HTML : $HOME/avocado/job-results/job-2017-03-10T16.22-39c1f12/html/results.html
Notice that, since the `setUp()` was already executed, calling the
`self.cancel()` will cancel the rest of the test from that point on, but
the `tearDown()` will still be executed.
Notice that using the `self.cancel()` will cancel the rest of the test
from that point on, but the `tearDown()` will still be executed.
Depending on the result format you're refering to, the `CANCEL` status
Depending on the result format you're referring to, the `CANCEL` status
is mapped to a corresponding valid status in that format. See the table
below:
......
......@@ -4,21 +4,22 @@ from avocado import Test
from avocado import main
class SkipOnSetupTest(Test):
class CancelOnSetupTest(Test):
"""
Example test that skips the current test, on the setUp phase.
Example test that cancels the current test, on the setUp phase.
"""
def setUp(self):
"""
This should end with SKIP.
self.skip() is under deprecation process. This should
end with CANCEL instead.
"""
self.skip('This should end with SKIP.')
self.cancel('This should end with CANCEL.')
def test_wont_be_executed(self):
"""
This won't get to be executed, given that setUp calls .skip().
This won't get to be executed, given that setUp calls .cancel().
"""
pass
......
#!/usr/bin/env python
import avocado
class SkipOutsideSetup(avocado.Test):
"""
Test illustrating the behavior of calling skip() outside setUp().
"""
def test(self):
"""
This should end with ERROR.
The method skip() can only be called from inside setUp(). If called
outside of that method, the test status will be marked as ERROR, with
a reason message that asks you to fix your test.
"""
self.skip('Calling skip() outside setUp() will result in ERROR')
if __name__ == "__main__":
avocado.main()
......@@ -442,17 +442,6 @@ class RunnerOperationTest(unittest.TestCase):
int(r['job_id'], 16) # it's an hex number
self.assertEqual(len(r['job_id']), 40)
def test_skip_outside_setup(self):
os.chdir(basedir)
cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
"--json - skip_outside_setup.py" % self.tmpdir)
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc,
result))
self.assertIn('"status": "ERROR"', result.stdout)
def test_early_latest_result(self):
"""
Tests that the `latest` link to the latest job results is created early
......@@ -563,17 +552,17 @@ class RunnerHumanOutputTest(unittest.TestCase):
(expected_rc, result))
self.assertIn('errortest.py:ErrorTest.test: ERROR', result.stdout)
def test_output_skip(self):
def test_output_cancel(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'skiponsetup.py' % self.tmpdir)
'cancelonsetup.py' % self.tmpdir)
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
' SKIP', result.stdout)
self.assertIn('PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1',
result.stdout)
@unittest.skipIf(not GNU_ECHO_BINARY,
'GNU style echo binary not available')
......@@ -604,7 +593,7 @@ class RunnerHumanOutputTest(unittest.TestCase):
def test_replay_skip_skipped(self):
cmd = ("./scripts/avocado run --job-results-dir %s --json - "
"skiponsetup.py" % self.tmpdir)
"cancelonsetup.py" % self.tmpdir)
result = process.run(cmd)
result = json.loads(result.stdout)
jobid = str(result["job_id"])
......@@ -1079,7 +1068,7 @@ class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1, 0, 0, 1, 0)
def test_xunit_plugin_skiponsetuptest(self):
self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1, 0, 0, 0, 1)
def test_xunit_plugin_errortest(self):
......@@ -1144,8 +1133,8 @@ class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1, 0, 1, 0)
def test_json_plugin_skiponsetuptest(self):
self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1, 0, 0, 1)
self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1, 0, 0, 0)
def test_json_plugin_errortest(self):
self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
......
......@@ -16,8 +16,16 @@ import avocado
class AvocadoCancelTest(avocado.Test):
def setUp(self):
self.log.info('setup code cancel')
def test(self):
self.log.info('test code before cancel')
self.cancel()
self.log.info('test code after cancel')
def tearDown(self):
self.log.info('teardown code')
"""
AVOCADO_TEST_CANCEL_ON_SETUP = """
......@@ -26,10 +34,15 @@ import avocado
class AvocadoCancelTest(avocado.Test):
def setUp(self):
self.log.info('setup code before cancel')
self.cancel()
self.log.info('setup code after cancel')
def test(self):
pass
self.log.info('test code')
def tearDown(self):
self.log.info('teardown code')
"""
......@@ -60,8 +73,13 @@ class TestCancel(unittest.TestCase):
'--json -']
result = process.run(' '.join(cmd_line), ignore_status=True)
json_results = json.loads(result.stdout)
debuglog = json_results['debuglog']
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertEqual(json_results['cancel'], 1)
self.assertIn('setup code', open(debuglog, 'r').read())
self.assertIn('test code before cancel', open(debuglog, 'r').read())
self.assertNotIn('test code after cancel', open(debuglog, 'r').read())
self.assertIn('teardown code', open(debuglog, 'r').read())
def test_cancel_on_setup(self):
os.chdir(basedir)
......@@ -74,9 +92,13 @@ class TestCancel(unittest.TestCase):
'--json -']
result = process.run(' '.join(cmd_line), ignore_status=True)
json_results = json.loads(result.stdout)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
self.assertEqual(json_results['cancel'], 0)
self.assertEqual(json_results['errors'], 1)
debuglog = json_results['debuglog']
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertEqual(json_results['cancel'], 1)
self.assertIn('setup code before cancel', open(debuglog, 'r').read())
self.assertNotIn('setup code after cancel', open(debuglog, 'r').read())
self.assertNotIn('test code', open(debuglog, 'r').read())
self.assertIn('teardown code', open(debuglog, 'r').read())
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
......@@ -18,31 +18,25 @@ from lib_skip_decorators import check_condition
class AvocadoSkipTests(avocado.Test):
def setUp(self):
self.log.info('setup executed')
@avocado.skip('Test skipped')
def test1(self):
pass
self.log.info('test executed')
@avocado.skipIf(check_condition(True),
'Skipped due to the True condition')
def test2(self):
pass
self.log.info('test executed')
@avocado.skipUnless(check_condition(False),
'Skipped due to the False condition')
def test3(self):
pass
@avocado.skipIf(False)
def test4(self):
pass
@avocado.skipUnless(True)
def test5(self):
pass
self.log.info('test executed')
@avocado.skip()
def test6(self):
pass
def tearDown(self):
self.log.info('teardown executed')
"""
......@@ -54,6 +48,34 @@ def check_condition(condition):
"""
AVOCADO_SKIP_DECORATOR_SETUP = """
import avocado
class AvocadoSkipTests(avocado.Test):
@avocado.skip('Test skipped')
def setUp(self):
pass
def test1(self):
pass
"""
AVOCADO_SKIP_DECORATOR_TEARDOWN = """
import avocado
class AvocadoSkipTests(avocado.Test):
def test1(self):
pass
@avocado.skip('Test skipped')
def tearDown(self):
pass
"""
class TestSkipDecorators(unittest.TestCase):
def setUp(self):
......@@ -69,6 +91,18 @@ class TestSkipDecorators(unittest.TestCase):
self.test_lib = script.Script(lib_path, AVOCADO_TEST_SKIP_LIB)
self.test_lib.save()
skip_setup_path = os.path.join(self.tmpdir,
'test_skip_decorator_setup.py')
self.skip_setup = script.Script(skip_setup_path,
AVOCADO_SKIP_DECORATOR_SETUP)
self.skip_setup.save()
bad_teardown_path = os.path.join(self.tmpdir,
'test_skip_decorator_teardown.py')
self.bad_teardown = script.Script(bad_teardown_path,
AVOCADO_SKIP_DECORATOR_TEARDOWN)
self.bad_teardown.save()
def test_skip_decorators(self):
os.chdir(basedir)
cmd_line = ['./scripts/avocado',
......@@ -80,9 +114,41 @@ class TestSkipDecorators(unittest.TestCase):
'--json -']
result = process.run(' '.join(cmd_line), ignore_status=True)
json_results = json.loads(result.stdout)
debuglog = json_results['debuglog']
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertEquals(json_results['skip'], 3)
self.assertFalse('setup executed' in open(debuglog, 'r').read())
self.assertFalse('test executed' in open(debuglog, 'r').read())
self.assertFalse('teardown executed' in open(debuglog, 'r').read())
def test_skip_setup(self):
os.chdir(basedir)
cmd_line = ['./scripts/avocado',
'run',
'--sysinfo=off',
'--job-results-dir',
'%s' % self.tmpdir,
'%s' % self.skip_setup,
'--json -']
result = process.run(' '.join(cmd_line), ignore_status=True)
json_results = json.loads(result.stdout)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertEquals(json_results['pass'], 2)
self.assertEquals(json_results['skip'], 4)
self.assertEquals(json_results['skip'], 1)
def test_skip_teardown(self):
os.chdir(basedir)
cmd_line = ['./scripts/avocado',
'run',
'--sysinfo=off',
'--job-results-dir',
'%s' % self.tmpdir,
'%s' % self.bad_teardown,
'--json -']
result = process.run(' '.join(cmd_line), ignore_status=True)
json_results = json.loads(result.stdout)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
self.assertEquals(json_results['errors'], 1)
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册