未验证 提交 e2d8d8b9 编写于 作者: C Cleber Rosa

Merge remote-tracking branch 'ldoktor/unittest-support5'

Signed-off-by: NCleber Rosa <crosa@redhat.com>
...@@ -18,7 +18,6 @@ Test loader module. ...@@ -18,7 +18,6 @@ Test loader module.
""" """
import ast import ast
import collections
import imp import imp
import inspect import inspect
import os import os
...@@ -43,6 +42,10 @@ AVAILABLE = None ...@@ -43,6 +42,10 @@ AVAILABLE = None
ALL = True ALL = True
# Regexp to find python unittests
_RE_UNIT_TEST = re.compile(r'test.*')
class MissingTest(object): class MissingTest(object):
""" """
Class representing reference which failed to be discovered Class representing reference which failed to be discovered
...@@ -511,7 +514,8 @@ class FileLoader(TestLoader): ...@@ -511,7 +514,8 @@ class FileLoader(TestLoader):
MissingTest: 'MISSING', MissingTest: 'MISSING',
BrokenSymlink: 'BROKEN_SYMLINK', BrokenSymlink: 'BROKEN_SYMLINK',
AccessDeniedPath: 'ACCESS_DENIED', AccessDeniedPath: 'ACCESS_DENIED',
test.Test: 'INSTRUMENTED'} test.Test: 'INSTRUMENTED',
test.PythonUnittest: 'PyUNITTEST'}
@staticmethod @staticmethod
def get_decorator_mapping(): def get_decorator_mapping():
...@@ -520,7 +524,8 @@ class FileLoader(TestLoader): ...@@ -520,7 +524,8 @@ class FileLoader(TestLoader):
MissingTest: output.TERM_SUPPORT.fail_header_str, MissingTest: output.TERM_SUPPORT.fail_header_str,
BrokenSymlink: output.TERM_SUPPORT.fail_header_str, BrokenSymlink: output.TERM_SUPPORT.fail_header_str,
AccessDeniedPath: output.TERM_SUPPORT.fail_header_str, AccessDeniedPath: output.TERM_SUPPORT.fail_header_str,
test.Test: output.TERM_SUPPORT.healthy_str} test.Test: output.TERM_SUPPORT.healthy_str,
test.PythonUnittest: output.TERM_SUPPORT.healthy_str}
def discover(self, reference, which_tests=DEFAULT): def discover(self, reference, which_tests=DEFAULT):
""" """
...@@ -620,9 +625,11 @@ class FileLoader(TestLoader): ...@@ -620,9 +625,11 @@ class FileLoader(TestLoader):
:type path: str :type path: str
:param class_name: the specific class to be found :param class_name: the specific class to be found
:type path: str :type path: str
:returns: dict with class name and additional info such as method names :returns: tuple where first item is dict with class name and additional
and tags info such as method names and tags; the second item is
:rtype: dict set of class names which look like avocado tests but are
force-disabled.
:rtype: tuple
""" """
# If only the Test class was imported from the avocado namespace # If only the Test class was imported from the avocado namespace
test_import = False test_import = False
...@@ -634,6 +641,7 @@ class FileLoader(TestLoader): ...@@ -634,6 +641,7 @@ class FileLoader(TestLoader):
mod_import_name = None mod_import_name = None
# The resulting test classes # The resulting test classes
result = {} result = {}
disabled = set()
if os.path.isdir(path): if os.path.isdir(path):
path = os.path.join(path, "__init__.py") path = os.path.join(path, "__init__.py")
...@@ -679,6 +687,7 @@ class FileLoader(TestLoader): ...@@ -679,6 +687,7 @@ class FileLoader(TestLoader):
has_disable = safeloader.check_docstring_directive(docstring, has_disable = safeloader.check_docstring_directive(docstring,
'disable') 'disable')
if (has_disable and class_name is None): if (has_disable and class_name is None):
disabled.add(statement.name)
continue continue
cl_tags = safeloader.get_docstring_directives_tags(docstring) cl_tags = safeloader.get_docstring_directives_tags(docstring)
...@@ -709,12 +718,12 @@ class FileLoader(TestLoader): ...@@ -709,12 +718,12 @@ class FileLoader(TestLoader):
# Looking for a 'class FooTest(Parent)' # Looking for a 'class FooTest(Parent)'
else: else:
parent_class = parent.id parent_class = parent.id
res, dis = self._find_avocado_tests(path, parent_class)
res = self._find_avocado_tests(path, parent_class)
if res: if res:
parents.remove(parent) parents.remove(parent)
for cls in res: for cls in res:
info.extend(res[cls]) info.extend(res[cls])
disabled.update(dis)
# If there are parents left to be discovered, they # If there are parents left to be discovered, they
# might be in a different module. # might be in a different module.
...@@ -756,11 +765,12 @@ class FileLoader(TestLoader): ...@@ -756,11 +765,12 @@ class FileLoader(TestLoader):
parent_module = node.module parent_module = node.module
_, ppath, _ = imp.find_module(parent_module, _, ppath, _ = imp.find_module(parent_module,
modules_paths) modules_paths)
res = self._find_avocado_tests(ppath, res, dis = self._find_avocado_tests(ppath,
parent_class) parent_class)
if res: if res:
for cls in res: for cls in res:
info.extend(res[cls]) info.extend(res[cls])
disabled.update(dis)
continue continue
...@@ -785,7 +795,7 @@ class FileLoader(TestLoader): ...@@ -785,7 +795,7 @@ class FileLoader(TestLoader):
result[statement.name] = info result[statement.name] = info
continue continue
return result return result, disabled
@staticmethod @staticmethod
def _get_methods_info(statement_body, class_tags): def _get_methods_info(statement_body, class_tags):
...@@ -803,15 +813,35 @@ class FileLoader(TestLoader): ...@@ -803,15 +813,35 @@ class FileLoader(TestLoader):
return methods_info return methods_info
def _make_avocado_tests(self, test_path, make_broken, subtests_filter, def _find_python_unittests(self, test_path, disabled, subtests_filter):
test_name=None): result = []
class_methods = safeloader.find_class_and_methods(test_path,
_RE_UNIT_TEST)
for klass, methods in class_methods.iteritems():
if klass in disabled:
continue
if test_path.endswith(".py"):
test_path = test_path[:-3]
test_module_name = os.path.relpath(test_path)
test_module_name = test_module_name.replace(os.path.sep, ".")
candidates = ["%s.%s.%s" % (test_module_name, klass, method)
for method in methods]
if subtests_filter:
result += [_ for _ in candidates if subtests_filter.search(_)]
else:
result += candidates
return result
def _make_existing_file_tests(self, test_path, make_broken,
subtests_filter, test_name=None):
if test_name is None: if test_name is None:
test_name = test_path test_name = test_path
try: try:
tests = self._find_avocado_tests(test_path) # Avocado tests
if tests: avocado_tests, disabled = self._find_avocado_tests(test_path)
if avocado_tests:
test_factories = [] test_factories = []
for test_class, info in tests.items(): for test_class, info in avocado_tests.items():
if isinstance(test_class, str): if isinstance(test_class, str):
for test_method, tags in info: for test_method, tags in info:
name = test_name + \ name = test_name + \
...@@ -825,6 +855,21 @@ class FileLoader(TestLoader): ...@@ -825,6 +855,21 @@ class FileLoader(TestLoader):
'tags': tags}) 'tags': tags})
test_factories.append(tst) test_factories.append(tst)
return test_factories return test_factories
# Python unittests
old_dir = os.getcwd()
try:
py_test_dir = os.path.abspath(os.path.dirname(test_path))
py_test_name = os.path.basename(test_path)
os.chdir(py_test_dir)
python_unittests = self._find_python_unittests(py_test_name,
disabled,
subtests_filter)
finally:
os.chdir(old_dir)
if python_unittests:
return [(test.PythonUnittest, {"name": name,
"test_dir": py_test_dir})
for name in python_unittests]
else: else:
if os.access(test_path, os.X_OK): if os.access(test_path, os.X_OK):
# Module does not have an avocado test class inside but # Module does not have an avocado test class inside but
...@@ -883,7 +928,7 @@ class FileLoader(TestLoader): ...@@ -883,7 +928,7 @@ class FileLoader(TestLoader):
"readable") "readable")
path_analyzer = path.PathInspector(test_path) path_analyzer = path.PathInspector(test_path)
if path_analyzer.is_python(): if path_analyzer.is_python():
return self._make_avocado_tests(test_path, make_broken, return self._make_existing_file_tests(test_path, make_broken,
subtests_filter) subtests_filter)
else: else:
if os.access(test_path, os.X_OK): if os.access(test_path, os.X_OK):
...@@ -905,8 +950,9 @@ class FileLoader(TestLoader): ...@@ -905,8 +950,9 @@ class FileLoader(TestLoader):
# Try to resolve test ID (keep compatibility) # Try to resolve test ID (keep compatibility)
test_path = os.path.join(data_dir.get_test_dir(), test_name) test_path = os.path.join(data_dir.get_test_dir(), test_name)
if os.path.exists(test_path): if os.path.exists(test_path):
return self._make_avocado_tests(test_path, make_broken, return self._make_existing_file_tests(test_path, make_broken,
subtests_filter, test_name) subtests_filter,
test_name)
else: else:
if not subtests_filter and ':' in test_name: if not subtests_filter and ':' in test_name:
test_name, subtests_filter = test_name.split(':', 1) test_name, subtests_filter = test_name.split(':', 1)
...@@ -914,7 +960,8 @@ class FileLoader(TestLoader): ...@@ -914,7 +960,8 @@ class FileLoader(TestLoader):
test_name) test_name)
if os.path.exists(test_path): if os.path.exists(test_path):
subtests_filter = re.compile(subtests_filter) subtests_filter = re.compile(subtests_filter)
return self._make_avocado_tests(test_path, make_broken, return self._make_existing_file_tests(test_path,
make_broken,
subtests_filter, subtests_filter,
test_name) test_name)
return make_broken(NotATest, test_name, "File not found " return make_broken(NotATest, test_name, "File not found "
...@@ -961,10 +1008,7 @@ class ExternalLoader(TestLoader): ...@@ -961,10 +1008,7 @@ class ExternalLoader(TestLoader):
'"--external-runner-chdir=test".') '"--external-runner-chdir=test".')
raise LoaderError(msg) raise LoaderError(msg)
cls_external_runner = collections.namedtuple('ExternalLoader', return test.ExternalRunnerSpec(runner, chdir, test_dir)
['runner', 'chdir',
'test_dir'])
return cls_external_runner(runner, chdir, test_dir)
elif chdir: elif chdir:
msg = ('Option "--external-runner-chdir" requires ' msg = ('Option "--external-runner-chdir" requires '
'"--external-runner" to be set.') '"--external-runner" to be set.')
......
...@@ -921,6 +921,16 @@ class SimpleTest(Test): ...@@ -921,6 +921,16 @@ class SimpleTest(Test):
self._execute_cmd() self._execute_cmd()
class ExternalRunnerSpec(object):
"""
Defines the basic options used by ExternalRunner
"""
def __init__(self, runner, chdir=None, test_dir=None):
self.runner = runner
self.chdir = chdir
self.test_dir = test_dir
class ExternalRunnerTest(SimpleTest): class ExternalRunnerTest(SimpleTest):
def __init__(self, name, params=None, base_logdir=None, job=None, def __init__(self, name, params=None, base_logdir=None, job=None,
...@@ -963,6 +973,45 @@ class ExternalRunnerTest(SimpleTest): ...@@ -963,6 +973,45 @@ class ExternalRunnerTest(SimpleTest):
os.chdir(pre_cwd) os.chdir(pre_cwd)
class PythonUnittest(ExternalRunnerTest):
"""
Python unittest test
"""
def __init__(self, name, params=None, base_logdir=None, job=None,
test_dir=None):
runner = "%s -m unittest -q -c" % sys.executable
external_runner = ExternalRunnerSpec(runner, "test", test_dir)
super(PythonUnittest, self).__init__(name, params, base_logdir, job,
external_runner=external_runner)
def _find_result(self, status="OK"):
status_line = "[stderr] %s" % status
with open(self.logfile) as logfile:
lines = iter(logfile)
for line in lines:
if "[stderr] Ran 1 test in" in line:
break
for line in lines:
if status_line in line:
return line
self.error("Fail to parse status from test result.")
def test(self):
try:
super(PythonUnittest, self).test()
except exceptions.TestFail:
status = self._find_result("FAILED")
if "errors" in status:
self.error("Unittest reported error(s)")
elif "failures" in status:
self.fail("Unittest reported failure(s)")
else:
self.error("Unknown failure executing the unittest")
status = self._find_result("OK")
if "skipped" in status:
self.cancel("Unittest reported skip")
class MockingTest(Test): class MockingTest(Test):
""" """
......
...@@ -104,11 +104,11 @@ please refer to the corresponding loader/plugin documentation. ...@@ -104,11 +104,11 @@ please refer to the corresponding loader/plugin documentation.
File Loader File Loader
----------- -----------
For the File Loader, the loader responsible for discovering INSTRUMENTED For the File Loader, the loader responsible for discovering INSTRUMENTED,
and SIMPLE tests, the Test Reference is a path/filename of a test file. PyUNITTEST (classic python unittests) and SIMPLE tests.
If the file corresponds to an INSTRUMENTED test, you can filter the Test If the file corresponds to an INSTRUMENTED or PyUNITTEST test, you can filter
IDs by adding to the Test Reference a ``:`` followed by a regular the Test IDs by adding to the Test Reference a ``:`` followed by a regular
expression. expression.
For instance, if you want to list all tests that are present in the For instance, if you want to list all tests that are present in the
......
...@@ -129,8 +129,8 @@ Example of Test IDs:: ...@@ -129,8 +129,8 @@ Example of Test IDs::
Test Types Test Types
========== ==========
Avocado at its simplest configuration can run two different types of tests [#f1]_. You can mix Avocado at its simplest configuration can run three different types of tests
and match those in a single job. [#f1]_. You can mix and match those in a single job.
Instrumented Instrumented
------------ ------------
...@@ -148,6 +148,16 @@ Test statuses ``PASS``, ``WARN``, ``START`` and ``SKIP`` are considered as ...@@ -148,6 +148,16 @@ Test statuses ``PASS``, ``WARN``, ``START`` and ``SKIP`` are considered as
successful builds. The ``ABORT``, ``ERROR``, ``FAIL``, ``ALERT``, ``RUNNING``, successful builds. The ``ABORT``, ``ERROR``, ``FAIL``, ``ALERT``, ``RUNNING``,
``NOSTATUS`` and ``INTERRUPTED`` are considered as failed ones. ``NOSTATUS`` and ``INTERRUPTED`` are considered as failed ones.
Python unittest
---------------
The discovery of classical python unittest is also supported, although unlike
python unittest we still use static analysis to get individual tests so
dynamically created cases are not recognized. Also note that test result SKIP
is reported as CANCEL in Avocado as SKIP test meaning differs from our
definition. Apart from that there should be no surprises when running
unittests via Avocado.
Simple Simple
------ ------
......
#!/usr/bin/env python
import unittest
class First(unittest.TestCase):
def test_pass(self):
pass
class Second(unittest.TestCase):
def test_fail(self):
self.fail("this is suppose to fail")
def test_error(self):
raise RuntimeError("This is suppose to error")
@unittest.skip("This is suppose to be skipped")
def test_skip(self):
pass
if __name__ == "__main__":
unittest.main()
...@@ -93,7 +93,7 @@ if [ "$AVOCADO_PARALLEL_CHECK" ]; then ...@@ -93,7 +93,7 @@ if [ "$AVOCADO_PARALLEL_CHECK" ]; then
elif [ -z "$AVOCADO_SELF_CHECK" ]; then elif [ -z "$AVOCADO_SELF_CHECK" ]; then
run_rc selftests selftests/run run_rc selftests selftests/run
else else
CMD='scripts/avocado run --job-results-dir=$(mktemp -d) `./contrib/scripts/avocado-find-unittests selftests/{unit,functional,doc}/*.py | xargs` --external-runner="/usr/bin/env python -m unittest"' CMD='scripts/avocado run --job-results-dir=$(mktemp -d) selftests/{unit,functional,doc}'
[ ! $SELF_CHECK_CONTINUOUS ] && CMD+=" --failfast on" [ ! $SELF_CHECK_CONTINUOUS ] && CMD+=" --failfast on"
run_rc selftests "$CMD" run_rc selftests "$CMD"
fi fi
......
此差异已折叠。
import os import os
import json
import subprocess import subprocess
import time import time
import stat import stat
...@@ -108,6 +109,9 @@ from avocado import main ...@@ -108,6 +109,9 @@ from avocado import main
from test2 import * from test2 import *
class BasicTestSuite(SuperTest): class BasicTestSuite(SuperTest):
'''
:avocado: disable
'''
def test1(self): def test1(self):
self.xxx() self.xxx()
...@@ -319,6 +323,30 @@ class LoaderTestFunctional(unittest.TestCase): ...@@ -319,6 +323,30 @@ class LoaderTestFunctional(unittest.TestCase):
self.assertEqual(test, 11, "Number of tests is not 12 (%s):\n%s" self.assertEqual(test, 11, "Number of tests is not 12 (%s):\n%s"
% (test, result)) % (test, result))
def test_python_unittest(self):
test_path = os.path.join(basedir, "selftests", ".data", "unittests.py")
cmd = ("%s run --sysinfo=off --job-results-dir %s --json - -- %s"
% (AVOCADO, self.tmpdir, test_path))
result = process.run(cmd, ignore_status=True)
jres = json.loads(result.stdout)
self.assertEqual(result.exit_status, 1, result)
exps = [("unittests.Second.test_fail", "FAIL"),
("unittests.Second.test_error", "ERROR"),
("unittests.Second.test_skip", "CANCEL"),
("unittests.First.test_pass", "PASS")]
for test in jres["tests"]:
for exp in exps:
if exp[0] in test["id"]:
self.assertEqual(test["status"], exp[1], "Status of %s not"
" as expected\n%s" % (exp, result))
exps.remove(exp)
break
else:
self.fail("No expected result for %s\n%s\n\nexps = %s"
% (test["id"], result, exps))
self.assertFalse(exps, "Some expected result not matched to actual"
"results:\n%s\n\nexps = %s" % (result, exps))
def tearDown(self): def tearDown(self):
shutil.rmtree(self.tmpdir) shutil.rmtree(self.tmpdir)
......
...@@ -99,11 +99,11 @@ class OutputTest(unittest.TestCase): ...@@ -99,11 +99,11 @@ class OutputTest(unittest.TestCase):
def setUp(self): def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
os.chdir(basedir)
@unittest.skipIf(missing_binary('cc'), @unittest.skipIf(missing_binary('cc'),
"C compiler is required by the underlying doublefree.py test") "C compiler is required by the underlying doublefree.py test")
def test_output_doublefree(self): def test_output_doublefree(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'doublefree.py' % (AVOCADO, self.tmpdir)) 'doublefree.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True) result = process.run(cmd_line, ignore_status=True)
...@@ -133,7 +133,6 @@ class OutputTest(unittest.TestCase): ...@@ -133,7 +133,6 @@ class OutputTest(unittest.TestCase):
test = script.Script(os.path.join(self.tmpdir, "output_test.py"), test = script.Script(os.path.join(self.tmpdir, "output_test.py"),
OUTPUT_TEST_CONTENT) OUTPUT_TEST_CONTENT)
test.save() test.save()
os.chdir(basedir)
result = process.run("%s run --job-results-dir %s --sysinfo=off " result = process.run("%s run --job-results-dir %s --sysinfo=off "
"--json - -- %s" % (AVOCADO, self.tmpdir, test)) "--json - -- %s" % (AVOCADO, self.tmpdir, test))
res = json.loads(result.stdout) res = json.loads(result.stdout)
...@@ -162,6 +161,7 @@ class OutputPluginTest(unittest.TestCase): ...@@ -162,6 +161,7 @@ class OutputPluginTest(unittest.TestCase):
def setUp(self): def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
os.chdir(basedir)
def check_output_files(self, debug_log): def check_output_files(self, debug_log):
base_dir = os.path.dirname(debug_log) base_dir = os.path.dirname(debug_log)
...@@ -183,7 +183,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -183,7 +183,6 @@ class OutputPluginTest(unittest.TestCase):
self.assertIn("\n# debug.log of ", tap) self.assertIn("\n# debug.log of ", tap)
def test_output_incompatible_setup(self): def test_output_incompatible_setup(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit - --json - passtest.py' % (AVOCADO, self.tmpdir)) '--xunit - --json - passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True) result = process.run(cmd_line, ignore_status=True)
...@@ -202,7 +201,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -202,7 +201,6 @@ class OutputPluginTest(unittest.TestCase):
@unittest.skipIf(html_uncapable(), @unittest.skipIf(html_uncapable(),
"Uncapable of Avocado Result HTML plugin") "Uncapable of Avocado Result HTML plugin")
def test_output_incompatible_setup_2(self): def test_output_incompatible_setup_2(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--html - passtest.py' % (AVOCADO, self.tmpdir)) '--html - passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True) result = process.run(cmd_line, ignore_status=True)
...@@ -217,7 +215,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -217,7 +215,6 @@ class OutputPluginTest(unittest.TestCase):
def test_output_compatible_setup(self): def test_output_compatible_setup(self):
tmpfile = tempfile.mktemp() tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--journal --xunit %s --json - passtest.py' % '--journal --xunit %s --json - passtest.py' %
(AVOCADO, self.tmpdir, tmpfile)) (AVOCADO, self.tmpdir, tmpfile))
...@@ -239,7 +236,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -239,7 +236,6 @@ class OutputPluginTest(unittest.TestCase):
def test_output_compatible_setup_2(self): def test_output_compatible_setup_2(self):
tmpfile = tempfile.mktemp() tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit - --json %s passtest.py' % '--xunit - --json %s passtest.py' %
(AVOCADO, self.tmpdir, tmpfile)) (AVOCADO, self.tmpdir, tmpfile))
...@@ -269,7 +265,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -269,7 +265,6 @@ class OutputPluginTest(unittest.TestCase):
tmpfile2 = tempfile.mktemp(prefix='avocado_' + __name__) tmpfile2 = tempfile.mktemp(prefix='avocado_' + __name__)
tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
tmpfile3 = tempfile.mktemp(dir=tmpdir) tmpfile3 = tempfile.mktemp(dir=tmpdir)
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit %s --json %s --html %s passtest.py' '--xunit %s --json %s --html %s passtest.py'
% (AVOCADO, self.tmpdir, tmpfile, tmpfile2, tmpfile3)) % (AVOCADO, self.tmpdir, tmpfile, tmpfile2, tmpfile3))
...@@ -301,7 +296,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -301,7 +296,6 @@ class OutputPluginTest(unittest.TestCase):
def test_output_compatible_setup_nooutput(self): def test_output_compatible_setup_nooutput(self):
tmpfile = tempfile.mktemp() tmpfile = tempfile.mktemp()
tmpfile2 = tempfile.mktemp() tmpfile2 = tempfile.mktemp()
os.chdir(basedir)
# Verify --silent can be supplied as app argument # Verify --silent can be supplied as app argument
cmd_line = ('%s --silent run --job-results-dir %s ' cmd_line = ('%s --silent run --job-results-dir %s '
'--sysinfo=off --xunit %s --json %s passtest.py' '--sysinfo=off --xunit %s --json %s passtest.py'
...@@ -348,7 +342,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -348,7 +342,6 @@ class OutputPluginTest(unittest.TestCase):
self.check_output_files(debug_log) self.check_output_files(debug_log)
def test_show_job_log(self): def test_show_job_log(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py --show-job-log' % (AVOCADO, self.tmpdir)) 'passtest.py --show-job-log' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True) result = process.run(cmd_line, ignore_status=True)
...@@ -364,7 +357,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -364,7 +357,6 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(len(job_id), 40) self.assertEqual(len(job_id), 40)
def test_silent_trumps_show_job_log(self): def test_silent_trumps_show_job_log(self):
os.chdir(basedir)
# Also verify --silent can be supplied as run option # Also verify --silent can be supplied as run option
cmd_line = ('%s run --silent --job-results-dir %s ' cmd_line = ('%s run --silent --job-results-dir %s '
'--sysinfo=off passtest.py --show-job-log' '--sysinfo=off passtest.py --show-job-log'
...@@ -378,7 +370,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -378,7 +370,6 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(output, "") self.assertEqual(output, "")
def test_default_enabled_plugins(self): def test_default_enabled_plugins(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py' % (AVOCADO, self.tmpdir)) 'passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True) result = process.run(cmd_line, ignore_status=True)
...@@ -400,7 +391,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -400,7 +391,6 @@ class OutputPluginTest(unittest.TestCase):
def test_verify_whiteboard_save(self): def test_verify_whiteboard_save(self):
tmpfile = tempfile.mktemp() tmpfile = tempfile.mktemp()
try: try:
os.chdir(basedir)
config = os.path.join(self.tmpdir, "conf.ini") config = os.path.join(self.tmpdir, "conf.ini")
content = ("[datadir.paths]\nlogs_dir = %s" content = ("[datadir.paths]\nlogs_dir = %s"
% os.path.relpath(self.tmpdir, ".")) % os.path.relpath(self.tmpdir, "."))
...@@ -431,7 +421,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -431,7 +421,6 @@ class OutputPluginTest(unittest.TestCase):
def test_gendata(self): def test_gendata(self):
tmpfile = tempfile.mktemp() tmpfile = tempfile.mktemp()
try: try:
os.chdir(basedir)
cmd_line = ("%s run --job-results-dir %s " cmd_line = ("%s run --job-results-dir %s "
"--sysinfo=off gendata.py --json %s" % "--sysinfo=off gendata.py --json %s" %
(AVOCADO, self.tmpdir, tmpfile)) (AVOCADO, self.tmpdir, tmpfile))
...@@ -470,7 +459,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -470,7 +459,6 @@ class OutputPluginTest(unittest.TestCase):
def test_redirect_output(self): def test_redirect_output(self):
redirected_output_path = tempfile.mktemp() redirected_output_path = tempfile.mktemp()
try: try:
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s ' cmd_line = ('%s run --job-results-dir %s '
'--sysinfo=off passtest.py > %s' '--sysinfo=off passtest.py > %s'
% (AVOCADO, self.tmpdir, redirected_output_path)) % (AVOCADO, self.tmpdir, redirected_output_path))
...@@ -501,11 +489,9 @@ class OutputPluginTest(unittest.TestCase): ...@@ -501,11 +489,9 @@ class OutputPluginTest(unittest.TestCase):
PERL_TAP_PARSER_SNIPPET PERL_TAP_PARSER_SNIPPET
% self.tmpdir) % self.tmpdir)
perl_script.save() perl_script.save()
os.chdir(basedir)
process.run("perl %s" % perl_script) process.run("perl %s" % perl_script)
def test_tap_totaltests(self): def test_tap_totaltests(self):
os.chdir(basedir)
cmd_line = ("%s run passtest.py " cmd_line = ("%s run passtest.py "
"-m examples/tests/sleeptest.py.data/sleeptest.yaml " "-m examples/tests/sleeptest.py.data/sleeptest.yaml "
"--job-results-dir %s " "--job-results-dir %s "
...@@ -516,7 +502,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -516,7 +502,6 @@ class OutputPluginTest(unittest.TestCase):
% (expr, result.stdout)) % (expr, result.stdout))
def test_broken_pipe(self): def test_broken_pipe(self):
os.chdir(basedir)
cmd_line = "(%s run --help | whacky-unknown-command)" % AVOCADO cmd_line = "(%s run --help | whacky-unknown-command)" % AVOCADO
result = process.run(cmd_line, shell=True, ignore_status=True, result = process.run(cmd_line, shell=True, ignore_status=True,
env={"LC_ALL": "C"}) env={"LC_ALL": "C"})
...@@ -530,7 +515,6 @@ class OutputPluginTest(unittest.TestCase): ...@@ -530,7 +515,6 @@ class OutputPluginTest(unittest.TestCase):
self.assertNotIn("Avocado crashed", result.stderr) self.assertNotIn("Avocado crashed", result.stderr)
def test_results_plugins_no_tests(self): def test_results_plugins_no_tests(self):
os.chdir(basedir)
cmd_line = ("%s run UNEXISTING --job-results-dir %s" cmd_line = ("%s run UNEXISTING --job-results-dir %s"
% (AVOCADO, self.tmpdir)) % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True) result = process.run(cmd_line, ignore_status=True)
......
...@@ -7,6 +7,8 @@ from avocado.core import exit_codes ...@@ -7,6 +7,8 @@ from avocado.core import exit_codes
from avocado.utils import process from avocado.utils import process
from avocado.utils import script from avocado.utils import script
BASEDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
BASEDIR = os.path.abspath(BASEDIR)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado") AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
...@@ -47,6 +49,7 @@ class JobScriptsTest(unittest.TestCase): ...@@ -47,6 +49,7 @@ class JobScriptsTest(unittest.TestCase):
os.mkdir(self.pre_dir) os.mkdir(self.pre_dir)
self.post_dir = os.path.join(self.tmpdir, 'post.d') self.post_dir = os.path.join(self.tmpdir, 'post.d')
os.mkdir(self.post_dir) os.mkdir(self.post_dir)
os.chdir(BASEDIR)
def test_pre_post(self): def test_pre_post(self):
""" """
......
...@@ -153,6 +153,7 @@ class TestStatuses(unittest.TestCase): ...@@ -153,6 +153,7 @@ class TestStatuses(unittest.TestCase):
".data", ".data",
'test_statuses.yaml')) 'test_statuses.yaml'))
os.chdir(basedir)
cmd = ('%s run %s -m %s --sysinfo=off --job-results-dir %s --json -' % cmd = ('%s run %s -m %s --sysinfo=off --job-results-dir %s --json -' %
(AVOCADO, test_file, yaml_file, self.tmpdir)) (AVOCADO, test_file, yaml_file, self.tmpdir))
......
...@@ -17,6 +17,7 @@ class StreamsTest(unittest.TestCase): ...@@ -17,6 +17,7 @@ class StreamsTest(unittest.TestCase):
def setUp(self): def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
os.chdir(basedir)
def test_app_info_stdout(self): def test_app_info_stdout(self):
""" """
......
...@@ -157,8 +157,8 @@ class ProcessTest(unittest.TestCase): ...@@ -157,8 +157,8 @@ class ProcessTest(unittest.TestCase):
def file_lock_action(args): def file_lock_action(args):
path, players = args path, players, max_individual_timeout = args
max_individual_timeout = 0.021 start = time.time()
max_timeout = max_individual_timeout * players max_timeout = max_individual_timeout * players
with FileLock(path, max_timeout): with FileLock(path, max_timeout):
sleeptime = random.random() / 100 sleeptime = random.random() / 100
...@@ -174,9 +174,15 @@ class FileLockTest(unittest.TestCase): ...@@ -174,9 +174,15 @@ class FileLockTest(unittest.TestCase):
"Skipping test that take a long time to run, are " "Skipping test that take a long time to run, are "
"resource intensive or time sensitve") "resource intensive or time sensitve")
def test_filelock(self): def test_filelock(self):
# Calculate the timeout based on t_100_iter + 2e-5*players
start = time.time()
for _ in xrange(100):
with FileLock(self.tmpdir):
pass
timeout = 0.02 + (time.time() - start)
players = 1000 players = 1000
pool = multiprocessing.Pool(players) pool = multiprocessing.Pool(players)
args = [(self.tmpdir, players)] * players args = [(self.tmpdir, players, timeout)] * players
try: try:
pool.map(file_lock_action, args) pool.map(file_lock_action, args)
except: except:
......
...@@ -8,6 +8,7 @@ import unittest ...@@ -8,6 +8,7 @@ import unittest
from avocado.core import test from avocado.core import test
from avocado.core import loader from avocado.core import loader
from avocado.core import test
from avocado.utils import script from avocado.utils import script
# We need to access protected members pylint: disable=W0212 # We need to access protected members pylint: disable=W0212
...@@ -238,6 +239,14 @@ class ThirdChild(Test, SecondChild): ...@@ -238,6 +239,14 @@ class ThirdChild(Test, SecondChild):
pass pass
""" """
PYTHON_UNITTEST = """#!/usr/bin/env python
from unittest import TestCase
class SampleTest(TestCase):
def test(self):
pass
"""
class LoaderTest(unittest.TestCase): class LoaderTest(unittest.TestCase):
...@@ -513,7 +522,7 @@ class LoaderTest(unittest.TestCase): ...@@ -513,7 +522,7 @@ class LoaderTest(unittest.TestCase):
KEEP_METHODS_ORDER) KEEP_METHODS_ORDER)
avocado_keep_methods_order.save() avocado_keep_methods_order.save()
expected_order = ['test2', 'testA', 'test1', 'testZZZ', 'test'] expected_order = ['test2', 'testA', 'test1', 'testZZZ', 'test']
tests = self.loader._find_avocado_tests(avocado_keep_methods_order.path) tests = self.loader._find_avocado_tests(avocado_keep_methods_order.path)[0]
methods = [method[0] for method in tests['MyClass']] methods = [method[0] for method in tests['MyClass']]
self.assertEqual(expected_order, methods) self.assertEqual(expected_order, methods)
avocado_keep_methods_order.remove() avocado_keep_methods_order.remove()
...@@ -529,13 +538,29 @@ class LoaderTest(unittest.TestCase): ...@@ -529,13 +538,29 @@ class LoaderTest(unittest.TestCase):
avocado_recursive_discovery_test2.save() avocado_recursive_discovery_test2.save()
sys.path.append(os.path.dirname(avocado_recursive_discovery_test1.path)) sys.path.append(os.path.dirname(avocado_recursive_discovery_test1.path))
tests = self.loader._find_avocado_tests(avocado_recursive_discovery_test2.path) tests = self.loader._find_avocado_tests(avocado_recursive_discovery_test2.path)[0]
expected = {'ThirdChild': [('test_third_child', set([])), expected = {'ThirdChild': [('test_third_child', set([])),
('test_second_child', set([])), ('test_second_child', set([])),
('test_first_child', set([])), ('test_first_child', set([])),
('test_basic', set([]))]} ('test_basic', set([]))]}
self.assertEqual(expected, tests) self.assertEqual(expected, tests)
def test_python_unittest(self):
disabled_test = script.TemporaryScript("disabled.py",
AVOCADO_TEST_OK_DISABLED,
mode=DEFAULT_NON_EXEC_MODE)
python_unittest = script.TemporaryScript("python_unittest.py",
PYTHON_UNITTEST)
disabled_test.save()
python_unittest.save()
tests = self.loader.discover(disabled_test.path)
self.assertEqual(tests, [])
tests = self.loader.discover(python_unittest.path)
exp = [(test.PythonUnittest,
{"name": "python_unittest.SampleTest.test",
"test_dir": os.path.dirname(python_unittest.path)})]
self.assertEqual(tests, exp)
def tearDown(self): def tearDown(self):
shutil.rmtree(self.tmpdir) shutil.rmtree(self.tmpdir)
......
import copy import copy
import itertools import itertools
import os
import pickle import pickle
import unittest import unittest
import yaml import yaml
...@@ -8,11 +9,10 @@ import yaml ...@@ -8,11 +9,10 @@ import yaml
import avocado_varianter_yaml_to_mux as yaml_to_mux import avocado_varianter_yaml_to_mux as yaml_to_mux
from avocado.core import mux, tree, varianter from avocado.core import mux, tree, varianter
BASEDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
BASEDIR = os.path.abspath(BASEDIR)
if __name__ == "__main__": PATH_PREFIX = os.path.relpath(BASEDIR) + os.path.sep
PATH_PREFIX = "../../"
else:
PATH_PREFIX = ""
def combine(leaves_pools): def combine(leaves_pools):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册