未验证 提交 e2d8d8b9 编写于 作者: C Cleber Rosa

Merge remote-tracking branch 'ldoktor/unittest-support5'

Signed-off-by: NCleber Rosa <crosa@redhat.com>
......@@ -18,7 +18,6 @@ Test loader module.
"""
import ast
import collections
import imp
import inspect
import os
......@@ -43,6 +42,10 @@ AVAILABLE = None
ALL = True
# Regexp to find python unittests
_RE_UNIT_TEST = re.compile(r'test.*')
class MissingTest(object):
"""
Class representing reference which failed to be discovered
......@@ -511,7 +514,8 @@ class FileLoader(TestLoader):
MissingTest: 'MISSING',
BrokenSymlink: 'BROKEN_SYMLINK',
AccessDeniedPath: 'ACCESS_DENIED',
test.Test: 'INSTRUMENTED'}
test.Test: 'INSTRUMENTED',
test.PythonUnittest: 'PyUNITTEST'}
@staticmethod
def get_decorator_mapping():
......@@ -520,7 +524,8 @@ class FileLoader(TestLoader):
MissingTest: output.TERM_SUPPORT.fail_header_str,
BrokenSymlink: output.TERM_SUPPORT.fail_header_str,
AccessDeniedPath: output.TERM_SUPPORT.fail_header_str,
test.Test: output.TERM_SUPPORT.healthy_str}
test.Test: output.TERM_SUPPORT.healthy_str,
test.PythonUnittest: output.TERM_SUPPORT.healthy_str}
def discover(self, reference, which_tests=DEFAULT):
"""
......@@ -620,9 +625,11 @@ class FileLoader(TestLoader):
:type path: str
:param class_name: the specific class to be found
:type path: str
:returns: dict with class name and additional info such as method names
and tags
:rtype: dict
:returns: tuple where first item is dict with class name and additional
info such as method names and tags; the second item is
set of class names which look like avocado tests but are
force-disabled.
:rtype: tuple
"""
# If only the Test class was imported from the avocado namespace
test_import = False
......@@ -634,6 +641,7 @@ class FileLoader(TestLoader):
mod_import_name = None
# The resulting test classes
result = {}
disabled = set()
if os.path.isdir(path):
path = os.path.join(path, "__init__.py")
......@@ -679,6 +687,7 @@ class FileLoader(TestLoader):
has_disable = safeloader.check_docstring_directive(docstring,
'disable')
if (has_disable and class_name is None):
disabled.add(statement.name)
continue
cl_tags = safeloader.get_docstring_directives_tags(docstring)
......@@ -709,12 +718,12 @@ class FileLoader(TestLoader):
# Looking for a 'class FooTest(Parent)'
else:
parent_class = parent.id
res = self._find_avocado_tests(path, parent_class)
res, dis = self._find_avocado_tests(path, parent_class)
if res:
parents.remove(parent)
for cls in res:
info.extend(res[cls])
disabled.update(dis)
# If there are parents left to be discovered, they
# might be in a different module.
......@@ -756,11 +765,12 @@ class FileLoader(TestLoader):
parent_module = node.module
_, ppath, _ = imp.find_module(parent_module,
modules_paths)
res = self._find_avocado_tests(ppath,
parent_class)
res, dis = self._find_avocado_tests(ppath,
parent_class)
if res:
for cls in res:
info.extend(res[cls])
disabled.update(dis)
continue
......@@ -785,7 +795,7 @@ class FileLoader(TestLoader):
result[statement.name] = info
continue
return result
return result, disabled
@staticmethod
def _get_methods_info(statement_body, class_tags):
......@@ -803,15 +813,35 @@ class FileLoader(TestLoader):
return methods_info
def _make_avocado_tests(self, test_path, make_broken, subtests_filter,
test_name=None):
def _find_python_unittests(self, test_path, disabled, subtests_filter):
result = []
class_methods = safeloader.find_class_and_methods(test_path,
_RE_UNIT_TEST)
for klass, methods in class_methods.iteritems():
if klass in disabled:
continue
if test_path.endswith(".py"):
test_path = test_path[:-3]
test_module_name = os.path.relpath(test_path)
test_module_name = test_module_name.replace(os.path.sep, ".")
candidates = ["%s.%s.%s" % (test_module_name, klass, method)
for method in methods]
if subtests_filter:
result += [_ for _ in candidates if subtests_filter.search(_)]
else:
result += candidates
return result
def _make_existing_file_tests(self, test_path, make_broken,
subtests_filter, test_name=None):
if test_name is None:
test_name = test_path
try:
tests = self._find_avocado_tests(test_path)
if tests:
# Avocado tests
avocado_tests, disabled = self._find_avocado_tests(test_path)
if avocado_tests:
test_factories = []
for test_class, info in tests.items():
for test_class, info in avocado_tests.items():
if isinstance(test_class, str):
for test_method, tags in info:
name = test_name + \
......@@ -825,6 +855,21 @@ class FileLoader(TestLoader):
'tags': tags})
test_factories.append(tst)
return test_factories
# Python unittests
old_dir = os.getcwd()
try:
py_test_dir = os.path.abspath(os.path.dirname(test_path))
py_test_name = os.path.basename(test_path)
os.chdir(py_test_dir)
python_unittests = self._find_python_unittests(py_test_name,
disabled,
subtests_filter)
finally:
os.chdir(old_dir)
if python_unittests:
return [(test.PythonUnittest, {"name": name,
"test_dir": py_test_dir})
for name in python_unittests]
else:
if os.access(test_path, os.X_OK):
# Module does not have an avocado test class inside but
......@@ -883,8 +928,8 @@ class FileLoader(TestLoader):
"readable")
path_analyzer = path.PathInspector(test_path)
if path_analyzer.is_python():
return self._make_avocado_tests(test_path, make_broken,
subtests_filter)
return self._make_existing_file_tests(test_path, make_broken,
subtests_filter)
else:
if os.access(test_path, os.X_OK):
return self._make_test(test.SimpleTest,
......@@ -905,8 +950,9 @@ class FileLoader(TestLoader):
# Try to resolve test ID (keep compatibility)
test_path = os.path.join(data_dir.get_test_dir(), test_name)
if os.path.exists(test_path):
return self._make_avocado_tests(test_path, make_broken,
subtests_filter, test_name)
return self._make_existing_file_tests(test_path, make_broken,
subtests_filter,
test_name)
else:
if not subtests_filter and ':' in test_name:
test_name, subtests_filter = test_name.split(':', 1)
......@@ -914,9 +960,10 @@ class FileLoader(TestLoader):
test_name)
if os.path.exists(test_path):
subtests_filter = re.compile(subtests_filter)
return self._make_avocado_tests(test_path, make_broken,
subtests_filter,
test_name)
return self._make_existing_file_tests(test_path,
make_broken,
subtests_filter,
test_name)
return make_broken(NotATest, test_name, "File not found "
"('%s'; '%s')" % (test_name, test_path))
return make_broken(NotATest, test_name, self.__not_test_str)
......@@ -961,10 +1008,7 @@ class ExternalLoader(TestLoader):
'"--external-runner-chdir=test".')
raise LoaderError(msg)
cls_external_runner = collections.namedtuple('ExternalLoader',
['runner', 'chdir',
'test_dir'])
return cls_external_runner(runner, chdir, test_dir)
return test.ExternalRunnerSpec(runner, chdir, test_dir)
elif chdir:
msg = ('Option "--external-runner-chdir" requires '
'"--external-runner" to be set.')
......
......@@ -921,6 +921,16 @@ class SimpleTest(Test):
self._execute_cmd()
class ExternalRunnerSpec(object):
"""
Defines the basic options used by ExternalRunner
"""
def __init__(self, runner, chdir=None, test_dir=None):
self.runner = runner
self.chdir = chdir
self.test_dir = test_dir
class ExternalRunnerTest(SimpleTest):
def __init__(self, name, params=None, base_logdir=None, job=None,
......@@ -963,6 +973,45 @@ class ExternalRunnerTest(SimpleTest):
os.chdir(pre_cwd)
class PythonUnittest(ExternalRunnerTest):
"""
Python unittest test
"""
def __init__(self, name, params=None, base_logdir=None, job=None,
test_dir=None):
runner = "%s -m unittest -q -c" % sys.executable
external_runner = ExternalRunnerSpec(runner, "test", test_dir)
super(PythonUnittest, self).__init__(name, params, base_logdir, job,
external_runner=external_runner)
def _find_result(self, status="OK"):
status_line = "[stderr] %s" % status
with open(self.logfile) as logfile:
lines = iter(logfile)
for line in lines:
if "[stderr] Ran 1 test in" in line:
break
for line in lines:
if status_line in line:
return line
self.error("Fail to parse status from test result.")
def test(self):
try:
super(PythonUnittest, self).test()
except exceptions.TestFail:
status = self._find_result("FAILED")
if "errors" in status:
self.error("Unittest reported error(s)")
elif "failures" in status:
self.fail("Unittest reported failure(s)")
else:
self.error("Unknown failure executing the unittest")
status = self._find_result("OK")
if "skipped" in status:
self.cancel("Unittest reported skip")
class MockingTest(Test):
"""
......
......@@ -104,11 +104,11 @@ please refer to the corresponding loader/plugin documentation.
File Loader
-----------
For the File Loader, the loader responsible for discovering INSTRUMENTED
and SIMPLE tests, the Test Reference is a path/filename of a test file.
For the File Loader, the loader responsible for discovering INSTRUMENTED,
PyUNITTEST (classic python unittests) and SIMPLE tests.
If the file corresponds to an INSTRUMENTED test, you can filter the Test
IDs by adding to the Test Reference a ``:`` followed by a regular
If the file corresponds to an INSTRUMENTED or PyUNITTEST test, you can filter
the Test IDs by adding to the Test Reference a ``:`` followed by a regular
expression.
For instance, if you want to list all tests that are present in the
......
......@@ -129,8 +129,8 @@ Example of Test IDs::
Test Types
==========
Avocado at its simplest configuration can run two different types of tests [#f1]_. You can mix
and match those in a single job.
Avocado at its simplest configuration can run three different types of tests
[#f1]_. You can mix and match those in a single job.
Instrumented
------------
......@@ -148,6 +148,16 @@ Test statuses ``PASS``, ``WARN``, ``START`` and ``SKIP`` are considered as
successful builds. The ``ABORT``, ``ERROR``, ``FAIL``, ``ALERT``, ``RUNNING``,
``NOSTATUS`` and ``INTERRUPTED`` are considered as failed ones.
Python unittest
---------------
The discovery of classical python unittest is also supported, although unlike
python unittest we still use static analysis to get individual tests so
dynamically created cases are not recognized. Also note that test result SKIP
is reported as CANCEL in Avocado as SKIP test meaning differs from our
definition. Apart from that there should be no surprises when running
unittests via Avocado.
Simple
------
......
#!/usr/bin/env python
import unittest
class First(unittest.TestCase):
def test_pass(self):
pass
class Second(unittest.TestCase):
def test_fail(self):
self.fail("this is suppose to fail")
def test_error(self):
raise RuntimeError("This is suppose to error")
@unittest.skip("This is suppose to be skipped")
def test_skip(self):
pass
if __name__ == "__main__":
unittest.main()
......@@ -93,7 +93,7 @@ if [ "$AVOCADO_PARALLEL_CHECK" ]; then
elif [ -z "$AVOCADO_SELF_CHECK" ]; then
run_rc selftests selftests/run
else
CMD='scripts/avocado run --job-results-dir=$(mktemp -d) `./contrib/scripts/avocado-find-unittests selftests/{unit,functional,doc}/*.py | xargs` --external-runner="/usr/bin/env python -m unittest"'
CMD='scripts/avocado run --job-results-dir=$(mktemp -d) selftests/{unit,functional,doc}'
[ ! $SELF_CHECK_CONTINUOUS ] && CMD+=" --failfast on"
run_rc selftests "$CMD"
fi
......
此差异已折叠。
import os
import json
import subprocess
import time
import stat
......@@ -108,6 +109,9 @@ from avocado import main
from test2 import *
class BasicTestSuite(SuperTest):
'''
:avocado: disable
'''
def test1(self):
self.xxx()
......@@ -319,6 +323,30 @@ class LoaderTestFunctional(unittest.TestCase):
self.assertEqual(test, 11, "Number of tests is not 12 (%s):\n%s"
% (test, result))
def test_python_unittest(self):
test_path = os.path.join(basedir, "selftests", ".data", "unittests.py")
cmd = ("%s run --sysinfo=off --job-results-dir %s --json - -- %s"
% (AVOCADO, self.tmpdir, test_path))
result = process.run(cmd, ignore_status=True)
jres = json.loads(result.stdout)
self.assertEqual(result.exit_status, 1, result)
exps = [("unittests.Second.test_fail", "FAIL"),
("unittests.Second.test_error", "ERROR"),
("unittests.Second.test_skip", "CANCEL"),
("unittests.First.test_pass", "PASS")]
for test in jres["tests"]:
for exp in exps:
if exp[0] in test["id"]:
self.assertEqual(test["status"], exp[1], "Status of %s not"
" as expected\n%s" % (exp, result))
exps.remove(exp)
break
else:
self.fail("No expected result for %s\n%s\n\nexps = %s"
% (test["id"], result, exps))
self.assertFalse(exps, "Some expected result not matched to actual"
"results:\n%s\n\nexps = %s" % (result, exps))
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
......@@ -99,11 +99,11 @@ class OutputTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
os.chdir(basedir)
@unittest.skipIf(missing_binary('cc'),
"C compiler is required by the underlying doublefree.py test")
def test_output_doublefree(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'doublefree.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
......@@ -133,7 +133,6 @@ class OutputTest(unittest.TestCase):
test = script.Script(os.path.join(self.tmpdir, "output_test.py"),
OUTPUT_TEST_CONTENT)
test.save()
os.chdir(basedir)
result = process.run("%s run --job-results-dir %s --sysinfo=off "
"--json - -- %s" % (AVOCADO, self.tmpdir, test))
res = json.loads(result.stdout)
......@@ -162,6 +161,7 @@ class OutputPluginTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
os.chdir(basedir)
def check_output_files(self, debug_log):
base_dir = os.path.dirname(debug_log)
......@@ -183,7 +183,6 @@ class OutputPluginTest(unittest.TestCase):
self.assertIn("\n# debug.log of ", tap)
def test_output_incompatible_setup(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit - --json - passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
......@@ -202,7 +201,6 @@ class OutputPluginTest(unittest.TestCase):
@unittest.skipIf(html_uncapable(),
"Uncapable of Avocado Result HTML plugin")
def test_output_incompatible_setup_2(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--html - passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
......@@ -217,7 +215,6 @@ class OutputPluginTest(unittest.TestCase):
def test_output_compatible_setup(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--journal --xunit %s --json - passtest.py' %
(AVOCADO, self.tmpdir, tmpfile))
......@@ -239,7 +236,6 @@ class OutputPluginTest(unittest.TestCase):
def test_output_compatible_setup_2(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit - --json %s passtest.py' %
(AVOCADO, self.tmpdir, tmpfile))
......@@ -269,7 +265,6 @@ class OutputPluginTest(unittest.TestCase):
tmpfile2 = tempfile.mktemp(prefix='avocado_' + __name__)
tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
tmpfile3 = tempfile.mktemp(dir=tmpdir)
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit %s --json %s --html %s passtest.py'
% (AVOCADO, self.tmpdir, tmpfile, tmpfile2, tmpfile3))
......@@ -301,7 +296,6 @@ class OutputPluginTest(unittest.TestCase):
def test_output_compatible_setup_nooutput(self):
tmpfile = tempfile.mktemp()
tmpfile2 = tempfile.mktemp()
os.chdir(basedir)
# Verify --silent can be supplied as app argument
cmd_line = ('%s --silent run --job-results-dir %s '
'--sysinfo=off --xunit %s --json %s passtest.py'
......@@ -348,7 +342,6 @@ class OutputPluginTest(unittest.TestCase):
self.check_output_files(debug_log)
def test_show_job_log(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py --show-job-log' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
......@@ -364,7 +357,6 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(len(job_id), 40)
def test_silent_trumps_show_job_log(self):
os.chdir(basedir)
# Also verify --silent can be supplied as run option
cmd_line = ('%s run --silent --job-results-dir %s '
'--sysinfo=off passtest.py --show-job-log'
......@@ -378,7 +370,6 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(output, "")
def test_default_enabled_plugins(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
......@@ -400,7 +391,6 @@ class OutputPluginTest(unittest.TestCase):
def test_verify_whiteboard_save(self):
tmpfile = tempfile.mktemp()
try:
os.chdir(basedir)
config = os.path.join(self.tmpdir, "conf.ini")
content = ("[datadir.paths]\nlogs_dir = %s"
% os.path.relpath(self.tmpdir, "."))
......@@ -431,7 +421,6 @@ class OutputPluginTest(unittest.TestCase):
def test_gendata(self):
tmpfile = tempfile.mktemp()
try:
os.chdir(basedir)
cmd_line = ("%s run --job-results-dir %s "
"--sysinfo=off gendata.py --json %s" %
(AVOCADO, self.tmpdir, tmpfile))
......@@ -470,7 +459,6 @@ class OutputPluginTest(unittest.TestCase):
def test_redirect_output(self):
redirected_output_path = tempfile.mktemp()
try:
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s '
'--sysinfo=off passtest.py > %s'
% (AVOCADO, self.tmpdir, redirected_output_path))
......@@ -501,11 +489,9 @@ class OutputPluginTest(unittest.TestCase):
PERL_TAP_PARSER_SNIPPET
% self.tmpdir)
perl_script.save()
os.chdir(basedir)
process.run("perl %s" % perl_script)
def test_tap_totaltests(self):
os.chdir(basedir)
cmd_line = ("%s run passtest.py "
"-m examples/tests/sleeptest.py.data/sleeptest.yaml "
"--job-results-dir %s "
......@@ -516,7 +502,6 @@ class OutputPluginTest(unittest.TestCase):
% (expr, result.stdout))
def test_broken_pipe(self):
os.chdir(basedir)
cmd_line = "(%s run --help | whacky-unknown-command)" % AVOCADO
result = process.run(cmd_line, shell=True, ignore_status=True,
env={"LC_ALL": "C"})
......@@ -530,7 +515,6 @@ class OutputPluginTest(unittest.TestCase):
self.assertNotIn("Avocado crashed", result.stderr)
def test_results_plugins_no_tests(self):
os.chdir(basedir)
cmd_line = ("%s run UNEXISTING --job-results-dir %s"
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
......
......@@ -7,6 +7,8 @@ from avocado.core import exit_codes
from avocado.utils import process
from avocado.utils import script
BASEDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
BASEDIR = os.path.abspath(BASEDIR)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
......@@ -47,6 +49,7 @@ class JobScriptsTest(unittest.TestCase):
os.mkdir(self.pre_dir)
self.post_dir = os.path.join(self.tmpdir, 'post.d')
os.mkdir(self.post_dir)
os.chdir(BASEDIR)
def test_pre_post(self):
"""
......
......@@ -153,6 +153,7 @@ class TestStatuses(unittest.TestCase):
".data",
'test_statuses.yaml'))
os.chdir(basedir)
cmd = ('%s run %s -m %s --sysinfo=off --job-results-dir %s --json -' %
(AVOCADO, test_file, yaml_file, self.tmpdir))
......
......@@ -17,6 +17,7 @@ class StreamsTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
os.chdir(basedir)
def test_app_info_stdout(self):
"""
......
......@@ -157,8 +157,8 @@ class ProcessTest(unittest.TestCase):
def file_lock_action(args):
path, players = args
max_individual_timeout = 0.021
path, players, max_individual_timeout = args
start = time.time()
max_timeout = max_individual_timeout * players
with FileLock(path, max_timeout):
sleeptime = random.random() / 100
......@@ -174,9 +174,15 @@ class FileLockTest(unittest.TestCase):
"Skipping test that take a long time to run, are "
"resource intensive or time sensitve")
def test_filelock(self):
# Calculate the timeout based on t_100_iter + 2e-5*players
start = time.time()
for _ in xrange(100):
with FileLock(self.tmpdir):
pass
timeout = 0.02 + (time.time() - start)
players = 1000
pool = multiprocessing.Pool(players)
args = [(self.tmpdir, players)] * players
args = [(self.tmpdir, players, timeout)] * players
try:
pool.map(file_lock_action, args)
except:
......
......@@ -8,6 +8,7 @@ import unittest
from avocado.core import test
from avocado.core import loader
from avocado.core import test
from avocado.utils import script
# We need to access protected members pylint: disable=W0212
......@@ -238,6 +239,14 @@ class ThirdChild(Test, SecondChild):
pass
"""
PYTHON_UNITTEST = """#!/usr/bin/env python
from unittest import TestCase
class SampleTest(TestCase):
def test(self):
pass
"""
class LoaderTest(unittest.TestCase):
......@@ -513,7 +522,7 @@ class LoaderTest(unittest.TestCase):
KEEP_METHODS_ORDER)
avocado_keep_methods_order.save()
expected_order = ['test2', 'testA', 'test1', 'testZZZ', 'test']
tests = self.loader._find_avocado_tests(avocado_keep_methods_order.path)
tests = self.loader._find_avocado_tests(avocado_keep_methods_order.path)[0]
methods = [method[0] for method in tests['MyClass']]
self.assertEqual(expected_order, methods)
avocado_keep_methods_order.remove()
......@@ -529,13 +538,29 @@ class LoaderTest(unittest.TestCase):
avocado_recursive_discovery_test2.save()
sys.path.append(os.path.dirname(avocado_recursive_discovery_test1.path))
tests = self.loader._find_avocado_tests(avocado_recursive_discovery_test2.path)
tests = self.loader._find_avocado_tests(avocado_recursive_discovery_test2.path)[0]
expected = {'ThirdChild': [('test_third_child', set([])),
('test_second_child', set([])),
('test_first_child', set([])),
('test_basic', set([]))]}
self.assertEqual(expected, tests)
def test_python_unittest(self):
disabled_test = script.TemporaryScript("disabled.py",
AVOCADO_TEST_OK_DISABLED,
mode=DEFAULT_NON_EXEC_MODE)
python_unittest = script.TemporaryScript("python_unittest.py",
PYTHON_UNITTEST)
disabled_test.save()
python_unittest.save()
tests = self.loader.discover(disabled_test.path)
self.assertEqual(tests, [])
tests = self.loader.discover(python_unittest.path)
exp = [(test.PythonUnittest,
{"name": "python_unittest.SampleTest.test",
"test_dir": os.path.dirname(python_unittest.path)})]
self.assertEqual(tests, exp)
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
import copy
import itertools
import os
import pickle
import unittest
import yaml
......@@ -8,11 +9,10 @@ import yaml
import avocado_varianter_yaml_to_mux as yaml_to_mux
from avocado.core import mux, tree, varianter
BASEDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
BASEDIR = os.path.abspath(BASEDIR)
if __name__ == "__main__":
PATH_PREFIX = "../../"
else:
PATH_PREFIX = ""
PATH_PREFIX = os.path.relpath(BASEDIR) + os.path.sep
def combine(leaves_pools):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册