未验证 提交 7970518f 编写于 作者: C Cleber Rosa

Merge remote-tracking branch 'apahim/cancel_status_v2'

Signed-off-by: NCleber Rosa <crosa@redhat.com>
......@@ -177,3 +177,12 @@ class TestWarn(TestBaseException):
failure.
"""
status = "WARN"
class TestCancel(TestBaseException):
"""
Indicates that a test was cancelled.
Should be thrown when the cancel() test method is used.
"""
status = "CANCEL"
......@@ -78,6 +78,7 @@ class TermSupport(object):
self.INTERRUPT = self.COLOR_RED
self.ERROR = self.COLOR_RED
self.WARN = self.COLOR_YELLOW
self.CANCEL = self.COLOR_YELLOW
self.PARTIAL = self.COLOR_YELLOW
self.ENDC = self.CONTROL_END
self.LOWLIGHT = self.COLOR_DARKGREY
......
......@@ -41,6 +41,7 @@ class Result(object):
self.skipped = 0
self.warned = 0
self.interrupted = 0
self.cancelled = 0
self.tests = []
def _reconcile(self):
......@@ -53,7 +54,8 @@ class Result(object):
"""
valid_results_count = (self.passed + self.errors +
self.failed + self.warned +
self.skipped + self.interrupted)
self.skipped + self.interrupted +
self.cancelled)
other_skipped_count = self.tests_total - valid_results_count
if other_skipped_count > 0:
self.skipped += other_skipped_count
......@@ -103,6 +105,8 @@ class Result(object):
self.warned += 1
elif status == "INTERRUPTED":
self.interrupted += 1
elif status == "CANCEL":
self.cancelled += 1
else:
self.errors += 1
self.end_test(state)
......@@ -26,14 +26,16 @@ mapping = {"SKIP": True,
"ALERT": False,
"RUNNING": False,
"NOSTATUS": False,
"INTERRUPTED": False}
"INTERRUPTED": False,
"CANCEL": True}
user_facing_status = ["SKIP",
"ERROR",
"FAIL",
"WARN",
"PASS",
"INTERRUPTED"]
"INTERRUPTED",
"CANCEL"]
feedback = {
# Test did not advertise current status, but process running the test is
......
......@@ -567,6 +567,13 @@ class Test(unittest.TestCase):
exceptions.TestSkipError) as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
raise exceptions.TestSkipError(details)
except exceptions.TestCancel as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
skip_illegal_msg = ('Calling cancel() in setUp() '
'is not allowed in avocado, you '
'must fix your test. Original cancel exception: '
'%s' % details)
raise exceptions.TestError(skip_illegal_msg)
except: # Old-style exceptions are not inherited from Exception()
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
details = sys.exc_info()[1]
......@@ -583,6 +590,9 @@ class Test(unittest.TestCase):
except exceptions.TestDecoratorSkip as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
raise exceptions.TestSkipError(details)
except exceptions.TestCancel as details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
raise
except: # Old-style exceptions are not inherited from Exception()
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
details = sys.exc_info()[1]
......@@ -774,6 +784,21 @@ class Test(unittest.TestCase):
"""
raise exceptions.TestSetupSkip(message)
def cancel(self, message=None):
"""
Cancels the test.
This method is expected to be called from the test method, not
anywhere else, since by definition, we can only cancel a test that
is currently under execution. If you call this method outside the
test method, avocado will mark your test status as ERROR, and
instruct you to fix your test in the error message.
:param message: an optional message that will be recorded in the logs
:type message: str
"""
raise exceptions.TestCancel(message)
def fetch_asset(self, name, asset_hash=None, algorithm='sha1',
locations=None, expire=None):
"""
......
......@@ -35,7 +35,8 @@ class Human(ResultEvents):
'FAIL': output.TERM_SUPPORT.FAIL,
'SKIP': output.TERM_SUPPORT.SKIP,
'WARN': output.TERM_SUPPORT.WARN,
'INTERRUPTED': output.TERM_SUPPORT.INTERRUPT}
'INTERRUPTED': output.TERM_SUPPORT.INTERRUPT,
'CANCEL': output.TERM_SUPPORT.CANCEL}
def __init__(self, args):
self.log = logging.getLogger("avocado.app")
......@@ -93,7 +94,8 @@ class Human(ResultEvents):
if not self.owns_stdout:
return
self.log.info("RESULTS : PASS %d | ERROR %d | FAIL %d | SKIP %d | "
"WARN %d | INTERRUPT %s", job.result.passed,
"WARN %d | INTERRUPT %s | CANCEL %s", job.result.passed,
job.result.errors, job.result.failed, job.result.skipped,
job.result.warned, job.result.interrupted)
job.result.warned, job.result.interrupted,
job.result.cancelled)
self.log.info("TESTS TIME : %.2f s", job.result.tests_total_time)
......@@ -62,6 +62,7 @@ class JSONResult(Result):
'errors': result.errors,
'failures': result.failed,
'skip': result.skipped,
'cancel': result.cancelled,
'time': result.tests_total_time}
return json.dumps(content,
sort_keys=True,
......
......@@ -114,6 +114,9 @@ class TAPResult(ResultEvents):
self.__write("ok %s %s", result.tests_run, name)
elif status == "SKIP":
self.__write("ok %s %s # SKIP %s", result.tests_run, name, state.get("fail_reason"))
elif status == "CANCEL":
self.__write("ok %s %s # CANCEL %s",
result.tests_run, name, state.get("fail_reason"))
else:
self.__write("not ok %s %s", result.tests_run, name)
......
......@@ -79,7 +79,7 @@ class XUnitResult(Result):
testsuite.setAttribute('tests', self._escape_attr(result.tests_total))
testsuite.setAttribute('errors', self._escape_attr(result.errors + result.interrupted))
testsuite.setAttribute('failures', self._escape_attr(result.failed))
testsuite.setAttribute('skipped', self._escape_attr(result.skipped))
testsuite.setAttribute('skipped', self._escape_attr(result.skipped + result.cancelled))
testsuite.setAttribute('time', self._escape_attr(result.tests_total_time))
testsuite.setAttribute('timestamp', self._escape_attr(datetime.datetime.now()))
document.appendChild(testsuite)
......@@ -93,6 +93,8 @@ class XUnitResult(Result):
elif status == 'FAIL':
element = self._create_failure_or_error(document, test, 'failure')
testcase.appendChild(element)
elif status == 'CANCEL':
testcase.appendChild(Element('skipped'))
else:
element = self._create_failure_or_error(document, test, 'error')
testcase.appendChild(element)
......
......@@ -1034,6 +1034,79 @@ Notice the ``test3`` was not skipped because the provided condition was
not ``False``.
Cancelling Tests
================
The only supported way to cancel a test and not negatively impact the
job exit status (unlike using `self.fail` or `self.error`) is by using
the `self.cancel()` method. The `self.cancel()` can be called only
from your test methods. Example::
#!/usr/bin/env python
from avocado import Test
from avocado import main
from avocado.utils.process import run
from avocado.utils.software_manager import SoftwareManager
class CancelTest(Test):
"""
Example tests that cancel the current test from inside the test.
"""
def setUp(self):
sm = SoftwareManager()
self.pkgs = sm.list_all(software_components=False)
def test_iperf(self):
if 'iperf-2.0.8-6.fc25.x86_64' not in self.pkgs:
self.cancel('iperf is not installed or wrong version')
self.assertIn('pthreads',
run('iperf -v', ignore_status=True).stderr)
def test_gcc(self):
if 'gcc-6.3.1-1.fc25.x86_64' not in self.pkgs:
self.cancel('gcc is not installed or wrong version')
self.assertIn('enable-gnu-indirect-function',
run('gcc -v', ignore_status=True).stderr)
if __name__ == "__main__":
main()
In a system missing the `iperf` package but with `gcc` installed in
the correct version, the result will be::
JOB ID : 39c1f120830b9769b42f5f70b6b7bad0b1b1f09f
JOB LOG : $HOME/avocado/job-results/job-2017-03-10T16.22-39c1f12/job.log
(1/2) /home/apahim/avocado/tests/test_cancel.py:CancelTest.test_iperf: CANCEL (1.15 s)
(2/2) /home/apahim/avocado/tests/test_cancel.py:CancelTest.test_gcc: PASS (1.13 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1
TESTS TIME : 2.28 s
JOB HTML : $HOME/avocado/job-results/job-2017-03-10T16.22-39c1f12/html/results.html
Notice that, since the `setUp()` was already executed, calling the
`self.cancel()` will cancel the rest of the test from that point on, but
the `tearDown()` will still be executed.
Depending on the result format you're refering to, the `CANCEL` status
is mapped to a corresponding valid status in that format. See the table
below:
+--------+----------------------+
| Format | Corresponding Status |
+========+======================+
| json | cancel |
+--------+----------------------+
| xunit | skipped |
+--------+----------------------+
| tap | ok |
+--------+----------------------+
| html | CANCEL (warning) |
+--------+----------------------+
Docstring Directives
====================
......
#!/usr/bin/env python
from avocado import Test
from avocado import main
class CancelTest(Test):
"""
Example test that cancels the current test from inside the test.
"""
def test(self):
self.cancel("This should end with CANCEL.")
if __name__ == "__main__":
main()
......@@ -113,7 +113,8 @@ class ReportModel(object):
"ALERT": "danger",
"RUNNING": "info",
"NOSTATUS": "info",
"INTERRUPTED": "danger"}
"INTERRUPTED": "danger",
"CANCEL": "warning"}
test_info = []
results_dir = self.results_dir(False)
for tst in self.result.tests:
......
import json
import os
import shutil
import tempfile
import unittest
from avocado.core import exit_codes
from avocado.utils import process
from avocado.utils import script
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO_TEST_CANCEL = """
import avocado
class AvocadoCancelTest(avocado.Test):
def test(self):
self.cancel()
"""
AVOCADO_TEST_CANCEL_ON_SETUP = """
import avocado
class AvocadoCancelTest(avocado.Test):
def setUp(self):
self.cancel()
def test(self):
pass
"""
class TestCancel(unittest.TestCase):
def setUp(self):
os.chdir(basedir)
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
test_path = os.path.join(self.tmpdir, 'test_cancel.py')
self.test_cancel = script.Script(test_path,
AVOCADO_TEST_CANCEL)
self.test_cancel.save()
test_path = os.path.join(self.tmpdir, 'test_cancel_on_setup.py')
self.test_cancel_on_setup = script.Script(test_path,
AVOCADO_TEST_CANCEL_ON_SETUP)
self.test_cancel_on_setup.save()
def test_cancel(self):
os.chdir(basedir)
cmd_line = ['./scripts/avocado',
'run',
'--sysinfo=off',
'--job-results-dir',
'%s' % self.tmpdir,
'%s' % self.test_cancel,
'--json -']
result = process.run(' '.join(cmd_line), ignore_status=True)
json_results = json.loads(result.stdout)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertEqual(json_results['cancel'], 1)
def test_cancel_on_setup(self):
os.chdir(basedir)
cmd_line = ['./scripts/avocado',
'run',
'--sysinfo=off',
'--job-results-dir',
'%s' % self.tmpdir,
'%s' % self.test_cancel_on_setup,
'--json -']
result = process.run(' '.join(cmd_line), ignore_status=True)
json_results = json.loads(result.stdout)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
self.assertEqual(json_results['cancel'], 0)
self.assertEqual(json_results['errors'], 1)
def tearDown(self):
shutil.rmtree(self.tmpdir)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册