提交 fcdf0c78 编写于 作者: L Lucas Meneghel Rodrigues

Merge pull request #695 from avocado-framework/ldoktor-runTest2

  avocado.core.test: Remove the obsoleted "runTest" [v4]
......@@ -50,11 +50,11 @@ class Test(unittest.TestCase):
Base implementation for the test class.
You'll inherit from this to write your own tests. Typically you'll want
to implement setUp(), runTest() and tearDown() methods on your own tests.
to implement setUp(), test*() and tearDown() methods on your own tests.
"""
default_params = {}
def __init__(self, methodName='runTest', name=None, params=None,
def __init__(self, methodName='test', name=None, params=None,
base_logdir=None, tag=None, job=None, runner_queue=None):
"""
Initializes the test.
......@@ -293,7 +293,7 @@ class Test(unittest.TestCase):
def setUp(self):
"""
Setup stage that the test needs before passing to the actual runTest.
Setup stage that the test needs before passing to the actual test*.
Must be implemented by tests if they want such an stage. Commonly we'll
download/compile test suites, create files needed for a test, among
......@@ -303,9 +303,9 @@ class Test(unittest.TestCase):
def tearDown(self):
"""
Cleanup stage after the runTest is done.
Cleanup stage after the test* is done.
Examples of cleanup runTests are deleting temporary files, restoring
Examples of cleanup are deleting temporary files, restoring
firewall configurations or other system settings that were changed
in setup.
"""
......@@ -342,7 +342,7 @@ class Test(unittest.TestCase):
testMethod = getattr(self, self._testMethodName)
self._start_logging()
self.sysinfo_logger.start_test_hook()
runTest_exception = None
test_exception = None
cleanup_exception = None
stdout_check_exception = None
stderr_check_exception = None
......@@ -358,7 +358,7 @@ class Test(unittest.TestCase):
testMethod()
except Exception, details:
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
runTest_exception = details
test_exception = details
finally:
try:
self.tearDown()
......@@ -398,8 +398,8 @@ class Test(unittest.TestCase):
self.record_reference_stderr()
# pylint: disable=E0702
if runTest_exception is not None:
raise runTest_exception
if test_exception is not None:
raise test_exception
elif cleanup_exception is not None:
raise exceptions.TestSetupFail(cleanup_exception)
elif stdout_check_exception is not None:
......@@ -556,7 +556,7 @@ class SimpleTest(Test):
self.log.info("Exit status: %s", result.exit_status)
self.log.info("Duration: %s", result.duration)
def runTest(self):
def test(self):
"""
Run the executable, and log its detailed execution.
"""
......@@ -586,7 +586,7 @@ class MissingTest(Test):
Handle when there is no such test module in the test directory.
"""
def runTest(self):
def test(self):
e_msg = ('Test %s could not be found in the test dir %s '
'(or test path does not exist)' %
(self.name, data_dir.get_test_dir()))
......@@ -602,7 +602,7 @@ class BuggyTest(Test):
buggy python module.
"""
def runTest(self):
def test(self):
# pylint: disable=E0702
raise self.params.get('exception')
......@@ -616,7 +616,7 @@ class NotATest(Test):
or a regular, non executable file.
"""
def runTest(self):
def test(self):
e_msg = ('File %s is not executable and does not contain an avocado '
'test class in it ' % self.name)
raise exceptions.NotATestError(e_msg)
......@@ -631,6 +631,6 @@ class TimeOutSkipTest(Test):
It will never have a chance to execute.
"""
def runTest(self):
def test(self):
e_msg = 'Test skipped due a job timeout!'
raise exceptions.TestNAError(e_msg)
......@@ -92,7 +92,7 @@ test::
class HelloOutputTest(Test):
def runTest(self):
def test(self):
result = process.run("/path/to/hello", ignore_status=True)
self.assertIn("hello\n", result.stdout)
......@@ -154,7 +154,7 @@ Example
Take a look at ``examples/tests/modify_variable.py`` test::
def runTest(self):
def test(self):
"""
Execute 'print_variable'.
"""
......
......@@ -14,7 +14,7 @@ class AbortTest(Test):
default_params = {'timeout': 2.0}
def runTest(self):
def test(self):
os.abort()
......
......@@ -27,7 +27,7 @@ class CAbort(Test):
env={'CFLAGS': '-g -O0'},
extra_args='abort')
def runTest(self):
def test(self):
"""
Execute 'abort'.
"""
......
......@@ -28,7 +28,7 @@ class DataDirTest(Test):
env={'CFLAGS': '-g -O0'},
extra_args='datadir')
def runTest(self):
def test(self):
"""
Execute 'datadir'.
"""
......
......@@ -10,7 +10,7 @@ class DoubleFail(Test):
Functional test for avocado. Straight up fail the test.
"""
def runTest(self):
def test(self):
"""
Should fail.
"""
......
......@@ -29,7 +29,7 @@ class DoubleFreeTest(Test):
env={'CFLAGS': '-g -O0'},
extra_args='doublefree')
def runTest(self):
def test(self):
"""
Execute 'doublefree'.
"""
......
......@@ -29,7 +29,7 @@ class DoubleFreeTest(Test):
env={'CFLAGS': '-g -O0'},
extra_args=self.__binary)
def runTest(self):
def test(self):
"""
Execute 'doublefree'.
"""
......
......@@ -10,7 +10,7 @@ class ErrorTest(Test):
Example test that ends with ERROR.
"""
def runTest(self):
def test(self):
"""
This should end with ERROR.
"""
......
......@@ -10,7 +10,7 @@ class FailTest(Test):
Example test for avocado. Straight up fail the test.
"""
def runTest(self):
def test(self):
"""
Should fail.
"""
......
......@@ -21,7 +21,7 @@ class FailTest(Test):
Very nasty exception test
"""
def runTest(self):
def test(self):
"""
Should fail not-that-badly
"""
......
......@@ -21,7 +21,7 @@ class FailTest(Test):
Very nasty exception test
"""
def runTest(self):
def test(self):
"""
Should fail.
"""
......
......@@ -30,7 +30,7 @@ class FioTest(Test):
self.srcdir = os.path.join(self.srcdir, fio_version)
build.make(self.srcdir)
def runTest(self):
def test(self):
"""
Execute 'fio' with appropriate parameters.
"""
......
......@@ -12,7 +12,7 @@ class GenDataTest(Test):
Simple test that generates data to be persisted after the test is run
"""
def generate_bsod(self):
def test_bsod(self):
try:
from PIL import Image
from PIL import ImageDraw
......@@ -34,16 +34,13 @@ class GenDataTest(Test):
y += 12
bsod.save(os.path.join(self.outputdir, "bsod.png"))
def generate_json(self):
def test_json(self):
import json
output_path = os.path.join(self.outputdir, "test.json")
output = {"basedir": self.basedir,
"outputdir": self.outputdir}
json.dump(output, open(output_path, "w"))
def runTest(self):
self.generate_bsod()
self.generate_json()
if __name__ == "__main__":
main()
......@@ -22,7 +22,7 @@ class LinuxBuildTest(Test):
self.linux_build.uncompress()
self.linux_build.configure()
def runTest(self):
def test(self):
self.linux_build.build()
......
......@@ -32,7 +32,7 @@ class PrintVariableTest(Test):
env={'CFLAGS': '-g -O0'},
extra_args=self.__binary)
def runTest(self):
def test(self):
"""
Execute 'print_variable'.
"""
......
......@@ -55,7 +55,7 @@ class MultiplexTest(Test):
if enable_msx_vectors == 'yes':
self.log.info('Enabling msx vectors')
def runTest(self):
def test(self):
self.log.info('Executing synctest...')
self.log.info('synctest --timeout %s --tries %s',
self.params.get('sync_timeout', default=12),
......
......@@ -10,7 +10,7 @@ class PassTest(Test):
Example test that passes.
"""
def runTest(self):
def test(self):
"""
A test simply doesn't have to fail in order to pass
"""
......
......@@ -28,7 +28,7 @@ class Raise(Test):
env={'CFLAGS': '-g -O0'},
extra_args='raise')
def runTest(self):
def test(self):
"""
Execute 'raise'.
"""
......
......@@ -10,7 +10,7 @@ class SkipTest(Test):
Example test that skips the current test, that is it, ends with SKIP.
"""
def runTest(self):
def test(self):
"""
This should end with SKIP.
"""
......
......@@ -13,7 +13,7 @@ class SleepTenMin(Test):
Sleeps for 10 minutes
"""
def runTest(self):
def test(self):
"""
Sleep for length seconds.
"""
......
......@@ -12,7 +12,7 @@ class SleepTest(Test):
Example test for avocado.
"""
def runTest(self):
def test(self):
"""
Sleep for length seconds.
"""
......
......@@ -31,7 +31,7 @@ class SyncTest(Test):
else:
build.make(self.srcdir)
def runTest(self):
def test(self):
"""
Execute synctest with the appropriate params.
"""
......
......@@ -14,7 +14,7 @@ class TimeoutTest(Test):
default_params = {'timeout': 3}
def runTest(self):
def test(self):
"""
This should throw a TestTimeoutError.
"""
......
......@@ -37,7 +37,7 @@ class TrinityTest(Test):
build.make(self.srcdir)
self.victims_path = data_factory.make_dir_and_populate(self.workdir)
def runTest(self):
def test(self):
"""
Execute the trinity syscall fuzzer with the appropriate params.
"""
......
......@@ -10,7 +10,7 @@ class WarnTest(Test):
Functional test for avocado. Throw a TestWarn.
"""
def runTest(self):
def test(self):
"""
This should throw a TestWarn.
"""
......
......@@ -12,7 +12,7 @@ class WhiteBoard(Test):
Simple test that saves test custom data to the test whiteboard
"""
def runTest(self):
def test(self):
data_file = self.params.get('whiteboard_data_file', default='')
data_size = self.params.get('whiteboard_data_size', default='10')
if data_file:
......
......@@ -8,3 +8,4 @@ inspektor==0.1.15
pep8==1.6.2
requests==1.2.3
PyYAML==3.11
Pillow==2.2.1
......@@ -22,7 +22,7 @@ from avocado import Test
from avocado import main
class PassTest(Test):
def runTest(self):
def test(self):
pass
if __name__ == "__main__":
......@@ -35,7 +35,7 @@ from avocado import main
import adsh
class PassTest(Test):
def runTest(self):
def test(self):
pass
if __name__ == "__main__":
......
......@@ -244,10 +244,9 @@ class OutputPluginTest(unittest.TestCase):
(expected_rc, result))
with open(tmpfile, 'r') as fp:
json_results = json.load(fp)
debug_log = json_results['debuglog']
debug_dir = os.path.dirname(debug_log)
test_result_dir = os.path.join(debug_dir, 'test-results', 'whiteboard.py')
whiteboard_path = os.path.join(test_result_dir, 'whiteboard')
logfile = json_results['tests'][0]['logfile']
debug_dir = os.path.dirname(logfile)
whiteboard_path = os.path.join(debug_dir, 'whiteboard')
self.assertTrue(os.path.exists(whiteboard_path),
'Missing whiteboard file %s' % whiteboard_path)
finally:
......@@ -256,6 +255,45 @@ class OutputPluginTest(unittest.TestCase):
except OSError:
pass
def test_gendata(self):
tmpfile = tempfile.mktemp()
try:
os.chdir(basedir)
cmd_line = ("./scripts/avocado run --job-results-dir %s "
"--sysinfo=off gendata --json %s" %
(self.tmpdir, tmpfile))
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
with open(tmpfile, 'r') as fp:
json_results = json.load(fp)
bsod_dir = None
json_dir = None
for test in json_results['tests']:
if "test_bsod" in test['url']:
bsod_dir = test['logfile']
elif "test_json" in test['url']:
json_dir = test['logfile']
self.assertTrue(bsod_dir, "Failed to get test_bsod output "
"directory")
self.assertTrue(json_dir, "Failed to get test_json output "
"directory")
bsod_dir = os.path.join(os.path.dirname(bsod_dir), "data",
"bsod.png")
json_dir = os.path.join(os.path.dirname(json_dir), "data",
"test.json")
self.assertTrue(os.path.exists(bsod_dir), "File %s produced by"
"test does not exist" % bsod_dir)
self.assertTrue(os.path.exists(json_dir), "File %s produced by"
"test does not exist" % json_dir)
finally:
try:
os.remove(tmpfile)
except OSError:
pass
def test_redirect_output(self):
redirected_output_path = tempfile.mktemp()
try:
......
......@@ -19,7 +19,7 @@ from avocado.utils import process
UNITTEST_GOOD = """from avocado import Test
from unittest import main
class AvocadoPassTest(Test):
def runTest(self):
def test(self):
self.assertTrue(True)
if __name__ == '__main__':
main()
......@@ -28,7 +28,7 @@ if __name__ == '__main__':
UNITTEST_FAIL = """from avocado import Test
from unittest import main
class AvocadoFailTest(Test):
def runTest(self):
def test(self):
self.fail('This test is supposed to fail')
if __name__ == '__main__':
main()
......@@ -37,7 +37,7 @@ if __name__ == '__main__':
UNITTEST_ERROR = """from avocado import Test
from unittest import main
class AvocadoErrorTest(Test):
def runTest(self):
def test(self):
self.error('This test is supposed to error')
if __name__ == '__main__':
main()
......
......@@ -44,7 +44,7 @@ class JSONResultTest(unittest.TestCase):
class SimpleTest(Test):
def runTest(self):
def test(self):
pass
self.tmpfile = tempfile.mkstemp()
......
......@@ -23,7 +23,7 @@ from avocado import Test
from avocado import main
class PassTest(Test):
def runTest(self):
def test(self):
pass
if __name__ == "__main__":
......@@ -36,7 +36,7 @@ from avocado import main
import adsh
class PassTest(Test):
def runTest(self):
def test(self):
pass
if __name__ == "__main__":
......@@ -103,7 +103,7 @@ class LoaderTest(unittest.TestCase):
self.loader.discover_tests(params={'id': simple_test.path})[0])
self.assertTrue(test_class == test.SimpleTest, test_class)
tc = test_class(**test_parameters)
tc.runTest()
tc.test()
simple_test.remove()
def test_load_simple_not_exec(self):
......@@ -115,7 +115,7 @@ class LoaderTest(unittest.TestCase):
self.loader.discover_tests(params={'id': simple_test.path})[0])
self.assertTrue(test_class == test.NotATest, test_class)
tc = test_class(**test_parameters)
self.assertRaises(exceptions.NotATestError, tc.runTest)
self.assertRaises(exceptions.NotATestError, tc.test)
simple_test.remove()
def test_load_pass(self):
......@@ -129,7 +129,7 @@ class LoaderTest(unittest.TestCase):
str(test_class))
self.assertTrue(issubclass(test_class, test.Test))
tc = test_class(**test_parameters)
tc.runTest()
tc.test()
avocado_pass_test.remove()
def test_load_inherited(self):
......@@ -162,7 +162,7 @@ class LoaderTest(unittest.TestCase):
self.loader.discover_tests(params={'id': avocado_buggy_test.path})[0])
self.assertTrue(test_class == test.SimpleTest, test_class)
tc = test_class(**test_parameters)
self.assertRaises(exceptions.TestFail, tc.runTest)
self.assertRaises(exceptions.TestFail, tc.test)
avocado_buggy_test.remove()
def test_load_buggy_not_exec(self):
......@@ -175,7 +175,7 @@ class LoaderTest(unittest.TestCase):
self.loader.discover_tests(params={'id': avocado_buggy_test.path})[0])
self.assertTrue(test_class == test.BuggyTest, test_class)
tc = test_class(**test_parameters)
self.assertRaises(ImportError, tc.runTest)
self.assertRaises(ImportError, tc.test)
avocado_buggy_test.remove()
def test_load_not_a_test(self):
......@@ -188,7 +188,7 @@ class LoaderTest(unittest.TestCase):
self.loader.discover_tests(params={'id': avocado_not_a_test.path})[0])
self.assertTrue(test_class == test.NotATest, test_class)
tc = test_class(**test_parameters)
self.assertRaises(exceptions.NotATestError, tc.runTest)
self.assertRaises(exceptions.NotATestError, tc.test)
avocado_not_a_test.remove()
def test_load_not_a_test_exec(self):
......@@ -201,7 +201,7 @@ class LoaderTest(unittest.TestCase):
tc = test_class(**test_parameters)
# The test can't be executed (no shebang), raising an OSError
# (OSError: [Errno 8] Exec format error)
self.assertRaises(OSError, tc.runTest)
self.assertRaises(OSError, tc.test)
avocado_not_a_test.remove()
def test_py_simple_test(self):
......@@ -213,7 +213,7 @@ class LoaderTest(unittest.TestCase):
self.loader.discover_tests(params={'id': avocado_simple_test.path})[0])
self.assertTrue(test_class == test.SimpleTest)
tc = test_class(**test_parameters)
tc.runTest()
tc.test()
avocado_simple_test.remove()
def test_py_simple_test_notexec(self):
......@@ -226,7 +226,7 @@ class LoaderTest(unittest.TestCase):
self.loader.discover_tests(params={'id': avocado_simple_test.path})[0])
self.assertTrue(test_class == test.NotATest)
tc = test_class(**test_parameters)
self.assertRaises(exceptions.NotATestError, tc.runTest)
self.assertRaises(exceptions.NotATestError, tc.test)
avocado_simple_test.remove()
def test_multiple_methods(self):
......
......@@ -31,7 +31,7 @@ class TestClassTest(unittest.TestCase):
def setUp(self):
class AvocadoPass(test.Test):
def runTest(self):
def test(self):
variable = True
self.assertTrue(variable)
self.whiteboard = 'foo'
......
......@@ -48,7 +48,7 @@ class xUnitSucceedTest(unittest.TestCase):
class SimpleTest(Test):
def runTest(self):
def test(self):
pass
self.tmpfile = tempfile.mkstemp()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册