diff --git a/avocado/core/loader.py b/avocado/core/loader.py index 55cc8654baac6fc4b35ad6ec96487d7882383569..71602d0122766091af1dfbdefa228e02e82c88a4 100644 --- a/avocado/core/loader.py +++ b/avocado/core/loader.py @@ -18,7 +18,6 @@ Test loader module. """ import ast -import collections import imp import inspect import os @@ -43,6 +42,10 @@ AVAILABLE = None ALL = True +# Regexp to find python unittests +_RE_UNIT_TEST = re.compile(r'test.*') + + class MissingTest(object): """ Class representing reference which failed to be discovered @@ -511,7 +514,8 @@ class FileLoader(TestLoader): MissingTest: 'MISSING', BrokenSymlink: 'BROKEN_SYMLINK', AccessDeniedPath: 'ACCESS_DENIED', - test.Test: 'INSTRUMENTED'} + test.Test: 'INSTRUMENTED', + test.PythonUnittest: 'PyUNITTEST'} @staticmethod def get_decorator_mapping(): @@ -520,7 +524,8 @@ class FileLoader(TestLoader): MissingTest: output.TERM_SUPPORT.fail_header_str, BrokenSymlink: output.TERM_SUPPORT.fail_header_str, AccessDeniedPath: output.TERM_SUPPORT.fail_header_str, - test.Test: output.TERM_SUPPORT.healthy_str} + test.Test: output.TERM_SUPPORT.healthy_str, + test.PythonUnittest: output.TERM_SUPPORT.healthy_str} def discover(self, reference, which_tests=DEFAULT): """ @@ -620,9 +625,11 @@ class FileLoader(TestLoader): :type path: str :param class_name: the specific class to be found :type path: str - :returns: dict with class name and additional info such as method names - and tags - :rtype: dict + :returns: tuple where first item is dict with class name and additional + info such as method names and tags; the second item is + set of class names which look like avocado tests but are + force-disabled. + :rtype: tuple """ # If only the Test class was imported from the avocado namespace test_import = False @@ -634,6 +641,7 @@ class FileLoader(TestLoader): mod_import_name = None # The resulting test classes result = {} + disabled = set() if os.path.isdir(path): path = os.path.join(path, "__init__.py") @@ -679,6 +687,7 @@ class FileLoader(TestLoader): has_disable = safeloader.check_docstring_directive(docstring, 'disable') if (has_disable and class_name is None): + disabled.add(statement.name) continue cl_tags = safeloader.get_docstring_directives_tags(docstring) @@ -709,12 +718,12 @@ class FileLoader(TestLoader): # Looking for a 'class FooTest(Parent)' else: parent_class = parent.id - - res = self._find_avocado_tests(path, parent_class) + res, dis = self._find_avocado_tests(path, parent_class) if res: parents.remove(parent) for cls in res: info.extend(res[cls]) + disabled.update(dis) # If there are parents left to be discovered, they # might be in a different module. @@ -756,11 +765,12 @@ class FileLoader(TestLoader): parent_module = node.module _, ppath, _ = imp.find_module(parent_module, modules_paths) - res = self._find_avocado_tests(ppath, - parent_class) + res, dis = self._find_avocado_tests(ppath, + parent_class) if res: for cls in res: info.extend(res[cls]) + disabled.update(dis) continue @@ -785,7 +795,7 @@ class FileLoader(TestLoader): result[statement.name] = info continue - return result + return result, disabled @staticmethod def _get_methods_info(statement_body, class_tags): @@ -803,15 +813,35 @@ class FileLoader(TestLoader): return methods_info - def _make_avocado_tests(self, test_path, make_broken, subtests_filter, - test_name=None): + def _find_python_unittests(self, test_path, disabled, subtests_filter): + result = [] + class_methods = safeloader.find_class_and_methods(test_path, + _RE_UNIT_TEST) + for klass, methods in class_methods.iteritems(): + if klass in disabled: + continue + if test_path.endswith(".py"): + test_path = test_path[:-3] + test_module_name = os.path.relpath(test_path) + test_module_name = test_module_name.replace(os.path.sep, ".") + candidates = ["%s.%s.%s" % (test_module_name, klass, method) + for method in methods] + if subtests_filter: + result += [_ for _ in candidates if subtests_filter.search(_)] + else: + result += candidates + return result + + def _make_existing_file_tests(self, test_path, make_broken, + subtests_filter, test_name=None): if test_name is None: test_name = test_path try: - tests = self._find_avocado_tests(test_path) - if tests: + # Avocado tests + avocado_tests, disabled = self._find_avocado_tests(test_path) + if avocado_tests: test_factories = [] - for test_class, info in tests.items(): + for test_class, info in avocado_tests.items(): if isinstance(test_class, str): for test_method, tags in info: name = test_name + \ @@ -825,6 +855,21 @@ class FileLoader(TestLoader): 'tags': tags}) test_factories.append(tst) return test_factories + # Python unittests + old_dir = os.getcwd() + try: + py_test_dir = os.path.abspath(os.path.dirname(test_path)) + py_test_name = os.path.basename(test_path) + os.chdir(py_test_dir) + python_unittests = self._find_python_unittests(py_test_name, + disabled, + subtests_filter) + finally: + os.chdir(old_dir) + if python_unittests: + return [(test.PythonUnittest, {"name": name, + "test_dir": py_test_dir}) + for name in python_unittests] else: if os.access(test_path, os.X_OK): # Module does not have an avocado test class inside but @@ -883,8 +928,8 @@ class FileLoader(TestLoader): "readable") path_analyzer = path.PathInspector(test_path) if path_analyzer.is_python(): - return self._make_avocado_tests(test_path, make_broken, - subtests_filter) + return self._make_existing_file_tests(test_path, make_broken, + subtests_filter) else: if os.access(test_path, os.X_OK): return self._make_test(test.SimpleTest, @@ -905,8 +950,9 @@ class FileLoader(TestLoader): # Try to resolve test ID (keep compatibility) test_path = os.path.join(data_dir.get_test_dir(), test_name) if os.path.exists(test_path): - return self._make_avocado_tests(test_path, make_broken, - subtests_filter, test_name) + return self._make_existing_file_tests(test_path, make_broken, + subtests_filter, + test_name) else: if not subtests_filter and ':' in test_name: test_name, subtests_filter = test_name.split(':', 1) @@ -914,9 +960,10 @@ class FileLoader(TestLoader): test_name) if os.path.exists(test_path): subtests_filter = re.compile(subtests_filter) - return self._make_avocado_tests(test_path, make_broken, - subtests_filter, - test_name) + return self._make_existing_file_tests(test_path, + make_broken, + subtests_filter, + test_name) return make_broken(NotATest, test_name, "File not found " "('%s'; '%s')" % (test_name, test_path)) return make_broken(NotATest, test_name, self.__not_test_str) @@ -961,10 +1008,7 @@ class ExternalLoader(TestLoader): '"--external-runner-chdir=test".') raise LoaderError(msg) - cls_external_runner = collections.namedtuple('ExternalLoader', - ['runner', 'chdir', - 'test_dir']) - return cls_external_runner(runner, chdir, test_dir) + return test.ExternalRunnerSpec(runner, chdir, test_dir) elif chdir: msg = ('Option "--external-runner-chdir" requires ' '"--external-runner" to be set.') diff --git a/avocado/core/test.py b/avocado/core/test.py index 4444b75d8f3f4bd254cba0fdc45a0f2c50a66b61..da8cfab409e3d159b0f2b86aff60afee51b58f1d 100644 --- a/avocado/core/test.py +++ b/avocado/core/test.py @@ -921,6 +921,16 @@ class SimpleTest(Test): self._execute_cmd() +class ExternalRunnerSpec(object): + """ + Defines the basic options used by ExternalRunner + """ + def __init__(self, runner, chdir=None, test_dir=None): + self.runner = runner + self.chdir = chdir + self.test_dir = test_dir + + class ExternalRunnerTest(SimpleTest): def __init__(self, name, params=None, base_logdir=None, job=None, @@ -963,6 +973,45 @@ class ExternalRunnerTest(SimpleTest): os.chdir(pre_cwd) +class PythonUnittest(ExternalRunnerTest): + """ + Python unittest test + """ + def __init__(self, name, params=None, base_logdir=None, job=None, + test_dir=None): + runner = "%s -m unittest -q -c" % sys.executable + external_runner = ExternalRunnerSpec(runner, "test", test_dir) + super(PythonUnittest, self).__init__(name, params, base_logdir, job, + external_runner=external_runner) + + def _find_result(self, status="OK"): + status_line = "[stderr] %s" % status + with open(self.logfile) as logfile: + lines = iter(logfile) + for line in lines: + if "[stderr] Ran 1 test in" in line: + break + for line in lines: + if status_line in line: + return line + self.error("Fail to parse status from test result.") + + def test(self): + try: + super(PythonUnittest, self).test() + except exceptions.TestFail: + status = self._find_result("FAILED") + if "errors" in status: + self.error("Unittest reported error(s)") + elif "failures" in status: + self.fail("Unittest reported failure(s)") + else: + self.error("Unknown failure executing the unittest") + status = self._find_result("OK") + if "skipped" in status: + self.cancel("Unittest reported skip") + + class MockingTest(Test): """ diff --git a/docs/source/Loaders.rst b/docs/source/Loaders.rst index 52e39d7b79b0da4a0fcd9a8c1ab9eabffda0edbb..cac66c97c18dd90559c5731386b9de8748aade0d 100644 --- a/docs/source/Loaders.rst +++ b/docs/source/Loaders.rst @@ -104,11 +104,11 @@ please refer to the corresponding loader/plugin documentation. File Loader ----------- -For the File Loader, the loader responsible for discovering INSTRUMENTED -and SIMPLE tests, the Test Reference is a path/filename of a test file. +For the File Loader, the loader responsible for discovering INSTRUMENTED, +PyUNITTEST (classic python unittests) and SIMPLE tests. -If the file corresponds to an INSTRUMENTED test, you can filter the Test -IDs by adding to the Test Reference a ``:`` followed by a regular +If the file corresponds to an INSTRUMENTED or PyUNITTEST test, you can filter +the Test IDs by adding to the Test Reference a ``:`` followed by a regular expression. For instance, if you want to list all tests that are present in the diff --git a/docs/source/ReferenceGuide.rst b/docs/source/ReferenceGuide.rst index 9de8f363a803c7e4d2bfd40298adb8acd50ba3c6..b007491148aadbe59281fee669688379ddeb1a59 100644 --- a/docs/source/ReferenceGuide.rst +++ b/docs/source/ReferenceGuide.rst @@ -129,8 +129,8 @@ Example of Test IDs:: Test Types ========== -Avocado at its simplest configuration can run two different types of tests [#f1]_. You can mix -and match those in a single job. +Avocado at its simplest configuration can run three different types of tests +[#f1]_. You can mix and match those in a single job. Instrumented ------------ @@ -148,6 +148,16 @@ Test statuses ``PASS``, ``WARN``, ``START`` and ``SKIP`` are considered as successful builds. The ``ABORT``, ``ERROR``, ``FAIL``, ``ALERT``, ``RUNNING``, ``NOSTATUS`` and ``INTERRUPTED`` are considered as failed ones. +Python unittest +--------------- + +The discovery of classical python unittest is also supported, although unlike +python unittest we still use static analysis to get individual tests so +dynamically created cases are not recognized. Also note that test result SKIP +is reported as CANCEL in Avocado as SKIP test meaning differs from our +definition. Apart from that there should be no surprises when running +unittests via Avocado. + Simple ------ diff --git a/selftests/.data/unittests.py b/selftests/.data/unittests.py new file mode 100644 index 0000000000000000000000000000000000000000..128b0f29f22a830881404361e3d3d90e66aa9908 --- /dev/null +++ b/selftests/.data/unittests.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python +import unittest + + +class First(unittest.TestCase): + def test_pass(self): + pass + + +class Second(unittest.TestCase): + def test_fail(self): + self.fail("this is suppose to fail") + + def test_error(self): + raise RuntimeError("This is suppose to error") + + @unittest.skip("This is suppose to be skipped") + def test_skip(self): + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/selftests/checkall b/selftests/checkall index ed1caae1e07d507023bb0dcffa2e8dfac41ff520..a18b597401682de867941aafaff5dc4f38316f61 100755 --- a/selftests/checkall +++ b/selftests/checkall @@ -93,7 +93,7 @@ if [ "$AVOCADO_PARALLEL_CHECK" ]; then elif [ -z "$AVOCADO_SELF_CHECK" ]; then run_rc selftests selftests/run else - CMD='scripts/avocado run --job-results-dir=$(mktemp -d) `./contrib/scripts/avocado-find-unittests selftests/{unit,functional,doc}/*.py | xargs` --external-runner="/usr/bin/env python -m unittest"' + CMD='scripts/avocado run --job-results-dir=$(mktemp -d) selftests/{unit,functional,doc}' [ ! $SELF_CHECK_CONTINUOUS ] && CMD+=" --failfast on" run_rc selftests "$CMD" fi diff --git a/selftests/functional/test_basic.py b/selftests/functional/test_basic.py index 5eef658cb2be31e425deaadf8b9582960aa1df48..eda41d3dbd2585041236991e624293c5e6d07de3 100644 --- a/selftests/functional/test_basic.py +++ b/selftests/functional/test_basic.py @@ -138,6 +138,7 @@ class RunnerOperationTest(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) + os.chdir(basedir) def test_show_version(self): result = process.run('%s -v' % AVOCADO, ignore_status=True) @@ -168,7 +169,6 @@ class RunnerOperationTest(unittest.TestCase): os.write(fd, config) os.close(fd) - os.chdir(basedir) cmd = '%s --config %s config --datadir' % (AVOCADO, config_file) result = process.run(cmd) output = result.stdout @@ -181,7 +181,6 @@ class RunnerOperationTest(unittest.TestCase): self.assertIn(' logs ' + mapping['logs_dir'], result.stdout) def test_runner_all_ok(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' 'passtest.py passtest.py' % (AVOCADO, self.tmpdir)) process.run(cmd_line) @@ -192,7 +191,6 @@ class RunnerOperationTest(unittest.TestCase): "does not contains [\"/run/*\"]\n%s" % variants) def test_runner_failfast(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' 'passtest.py failtest.py passtest.py --failfast on' % (AVOCADO, self.tmpdir)) @@ -204,7 +202,6 @@ class RunnerOperationTest(unittest.TestCase): "Avocado did not return rc %d:\n%s" % (expected_rc, result)) def test_runner_ignore_missing_references_one_missing(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' 'passtest.py badtest.py --ignore-missing-references on' % (AVOCADO, self.tmpdir)) @@ -216,7 +213,6 @@ class RunnerOperationTest(unittest.TestCase): "Avocado did not return rc %d:\n%s" % (expected_rc, result)) def test_runner_ignore_missing_references_all_missing(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' 'badtest.py badtest2.py --ignore-missing-references on' % (AVOCADO, self.tmpdir)) @@ -231,14 +227,12 @@ class RunnerOperationTest(unittest.TestCase): @unittest.skipIf(not CC_BINARY, "C compiler is required by the underlying datadir.py test") def test_datadir_alias(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' 'datadir.py' % (AVOCADO, self.tmpdir)) process.run(cmd_line) def test_shell_alias(self): """ Tests that .sh files are also executable via alias """ - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' 'env_variables.sh' % (AVOCADO, self.tmpdir)) process.run(cmd_line) @@ -246,13 +240,11 @@ class RunnerOperationTest(unittest.TestCase): @unittest.skipIf(not CC_BINARY, "C compiler is required by the underlying datadir.py test") def test_datadir_noalias(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s examples/tests/datadir.py ' 'examples/tests/datadir.py' % (AVOCADO, self.tmpdir)) process.run(cmd_line) def test_runner_noalias(self): - os.chdir(basedir) cmd_line = ("%s run --sysinfo=off --job-results-dir %s examples/tests/passtest.py " "examples/tests/passtest.py" % (AVOCADO, self.tmpdir)) process.run(cmd_line) @@ -266,14 +258,12 @@ class RunnerOperationTest(unittest.TestCase): mytest = script.Script( os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'), LOCAL_IMPORT_TEST_CONTENTS) - os.chdir(basedir) mytest.save() cmd_line = ("%s run --sysinfo=off --job-results-dir %s " "%s" % (AVOCADO, self.tmpdir, mytest)) process.run(cmd_line) def test_unsupported_status(self): - os.chdir(basedir) with script.TemporaryScript("fake_status.py", UNSUPPORTED_STATUS_TEST_CONTENTS, "avocado_unsupported_status") as tst: @@ -293,7 +283,6 @@ class RunnerOperationTest(unittest.TestCase): "resource intensive or time sensitve") def test_hanged_test_with_status(self): """ Check that avocado handles hanged tests properly """ - os.chdir(basedir) with script.TemporaryScript("report_status_and_hang.py", REPORTS_STATUS_AND_HANG, "hanged_test_with_status") as tst: @@ -316,7 +305,6 @@ class RunnerOperationTest(unittest.TestCase): "interrupted. Results:\n%s" % res) def test_no_status_reported(self): - os.chdir(basedir) with script.TemporaryScript("die_without_reporting_status.py", DIE_WITHOUT_REPORTING_STATUS, "no_status_reported") as tst: @@ -332,7 +320,6 @@ class RunnerOperationTest(unittest.TestCase): results["tests"][0]["fail_reason"]) def test_runner_tests_fail(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s passtest.py ' 'failtest.py passtest.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -341,7 +328,6 @@ class RunnerOperationTest(unittest.TestCase): "Avocado did not return rc %d:\n%s" % (expected_rc, result)) def test_runner_nonexistent_test(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir ' '%s bogustest' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -353,7 +339,6 @@ class RunnerOperationTest(unittest.TestCase): "Avocado did not return rc %d:\n%s" % (expected_rc, result)) def test_runner_doublefail(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' '--xunit - doublefail.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -371,7 +356,6 @@ class RunnerOperationTest(unittest.TestCase): "Test did not fail with action exception:\n%s" % output) def test_uncaught_exception(self): - os.chdir(basedir) cmd_line = ("%s run --sysinfo=off --job-results-dir %s " "--json - uncaught_exception.py" % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -382,7 +366,6 @@ class RunnerOperationTest(unittest.TestCase): self.assertIn('"status": "ERROR"', result.stdout) def test_fail_on_exception(self): - os.chdir(basedir) cmd_line = ("%s run --sysinfo=off --job-results-dir %s " "--json - fail_on_exception.py" % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -393,7 +376,6 @@ class RunnerOperationTest(unittest.TestCase): self.assertIn('"status": "FAIL"', result.stdout) def test_exception_not_in_path(self): - os.chdir(basedir) os.mkdir(os.path.join(self.tmpdir, "shared_lib")) mylib = script.Script(os.path.join(self.tmpdir, "shared_lib", "mylib.py"), @@ -413,7 +395,6 @@ class RunnerOperationTest(unittest.TestCase): self.assertNotIn("Failed to read queue", result.stdout) def test_runner_timeout(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' '--xunit - timeouttest.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -433,7 +414,6 @@ class RunnerOperationTest(unittest.TestCase): "Skipping test that take a long time to run, are " "resource intensive or time sensitve") def test_runner_abort(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' '--xunit - abort.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -448,7 +428,6 @@ class RunnerOperationTest(unittest.TestCase): self.assertIn(excerpt, output) def test_silent_output(self): - os.chdir(basedir) cmd_line = ('%s --silent run --sysinfo=off --job-results-dir %s ' 'passtest.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -458,7 +437,6 @@ class RunnerOperationTest(unittest.TestCase): self.assertEqual(result.stdout, expected_output) def test_empty_args_list(self): - os.chdir(basedir) cmd_line = AVOCADO result = process.run(cmd_line, ignore_status=True) expected_rc = exit_codes.AVOCADO_FAIL @@ -467,7 +445,6 @@ class RunnerOperationTest(unittest.TestCase): self.assertIn(expected_output, result.stderr) def test_empty_test_list(self): - os.chdir(basedir) cmd_line = '%s run --sysinfo=off --job-results-dir %s' % (AVOCADO, self.tmpdir) result = process.run(cmd_line, ignore_status=True) @@ -478,7 +455,6 @@ class RunnerOperationTest(unittest.TestCase): self.assertIn(expected_output, result.stderr) def test_not_found(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s sbrubles' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -517,7 +493,6 @@ class RunnerOperationTest(unittest.TestCase): """ Tests that the `latest` link to the latest job results is created early """ - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' 'examples/tests/passtest.py' % (AVOCADO, self.tmpdir)) avocado_process = process.SubProcess(cmd_line) @@ -532,7 +507,6 @@ class RunnerOperationTest(unittest.TestCase): self.assertTrue(os.path.islink(link)) def test_dry_run(self): - os.chdir(basedir) cmd = ("%s run --sysinfo=off passtest.py failtest.py " "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a" " foo:bar:b foo:baz:c bar:bar:bar --dry-run" % AVOCADO) @@ -557,7 +531,6 @@ class RunnerOperationTest(unittest.TestCase): self.assertEqual(log.count(line), 4) def test_invalid_python(self): - os.chdir(basedir) test = script.make_script(os.path.join(self.tmpdir, 'test.py'), INVALID_PYTHON_TEST) cmd_line = ('%s --show test run --sysinfo=off ' @@ -575,7 +548,6 @@ class RunnerOperationTest(unittest.TestCase): "Skipping test that take a long time to run, are " "resource intensive or time sensitve") def test_read(self): - os.chdir(basedir) cmd = "%s run --sysinfo=off --job-results-dir %%s %%s" % AVOCADO cmd %= (self.tmpdir, READ_BINARY) result = process.run(cmd, timeout=10, ignore_status=True) @@ -592,9 +564,9 @@ class RunnerHumanOutputTest(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) + os.chdir(basedir) def test_output_pass(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' 'passtest.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -605,7 +577,6 @@ class RunnerHumanOutputTest(unittest.TestCase): self.assertIn('passtest.py:PassTest.test: PASS', result.stdout) def test_output_fail(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' 'failtest.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -616,7 +587,6 @@ class RunnerHumanOutputTest(unittest.TestCase): self.assertIn('failtest.py:FailTest.test: FAIL', result.stdout) def test_output_error(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' 'errortest.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -627,7 +597,6 @@ class RunnerHumanOutputTest(unittest.TestCase): self.assertIn('errortest.py:ErrorTest.test: ERROR', result.stdout) def test_output_cancel(self): - os.chdir(basedir) cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' 'cancelonsetup.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -641,7 +610,6 @@ class RunnerHumanOutputTest(unittest.TestCase): @unittest.skipIf(not GNU_ECHO_BINARY, 'GNU style echo binary not available') def test_ugly_echo_cmd(self): - os.chdir(basedir) cmd_line = ('%s run --external-runner "%s -ne" ' '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s' ' --sysinfo=off --show-job-log' % @@ -693,9 +661,9 @@ class RunnerSimpleTest(unittest.TestCase): 'avocado_simpletest_' 'functional') self.fail_script.save() + os.chdir(basedir) def test_simpletest_pass(self): - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off' ' "%s"' % (AVOCADO, self.tmpdir, self.pass_script.path)) result = process.run(cmd_line, ignore_status=True) @@ -705,7 +673,6 @@ class RunnerSimpleTest(unittest.TestCase): (expected_rc, result)) def test_simpletest_fail(self): - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off' ' %s' % (AVOCADO, self.tmpdir, self.fail_script.path)) result = process.run(cmd_line, ignore_status=True) @@ -725,7 +692,6 @@ class RunnerSimpleTest(unittest.TestCase): Notice: on a current machine this takes about 0.12s, so 30 seconds is considered to be pretty safe here. """ - os.chdir(basedir) one_hundred = 'failtest.py ' * 100 cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s' % (AVOCADO, self.tmpdir, one_hundred)) @@ -745,7 +711,6 @@ class RunnerSimpleTest(unittest.TestCase): Sleeptest is supposed to take 1 second, let's make a sandwich of 100 failtests and check the test runner timing. """ - os.chdir(basedir) sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 + 'sleeptest.py') cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s' @@ -762,7 +727,6 @@ class RunnerSimpleTest(unittest.TestCase): """ simplewarning.sh uses the avocado-bash-utils """ - os.chdir(basedir) # simplewarning.sh calls "avocado" without specifying a path os.environ['PATH'] += ":" + os.path.join(basedir, 'scripts') # simplewarning.sh calls "avocado exec-path" which hasn't @@ -786,8 +750,8 @@ class RunnerSimpleTest(unittest.TestCase): def test_non_absolute_path(self): avocado_path = os.path.join(basedir, 'scripts', 'avocado') test_base_dir = os.path.dirname(self.pass_script.path) - test_file_name = os.path.basename(self.pass_script.path) os.chdir(test_base_dir) + test_file_name = os.path.basename(self.pass_script.path) cmd_line = ('%s run --job-results-dir %s --sysinfo=off' ' "%s"' % (avocado_path, self.tmpdir, test_file_name)) result = process.run(cmd_line, ignore_status=True) @@ -862,9 +826,9 @@ class ExternalRunnerTest(unittest.TestCase): "exit 1", 'avocado_externalrunner_functional') self.fail_script.save() + os.chdir(basedir) def test_externalrunner_pass(self): - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' '--external-runner=/bin/sh %s' % (AVOCADO, self.tmpdir, self.pass_script.path)) @@ -875,7 +839,6 @@ class ExternalRunnerTest(unittest.TestCase): (expected_rc, result)) def test_externalrunner_fail(self): - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' '--external-runner=/bin/sh %s' % (AVOCADO, self.tmpdir, self.fail_script.path)) @@ -886,7 +849,6 @@ class ExternalRunnerTest(unittest.TestCase): (expected_rc, result)) def test_externalrunner_chdir_no_testdir(self): - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' '--external-runner=/bin/sh --external-runner-chdir=test %s' % (AVOCADO, self.tmpdir, self.pass_script.path)) @@ -900,7 +862,6 @@ class ExternalRunnerTest(unittest.TestCase): (expected_rc, result)) def test_externalrunner_no_url(self): - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' '--external-runner=%s' % (AVOCADO, self.tmpdir, TRUE_CMD)) result = process.run(cmd_line, ignore_status=True) @@ -922,6 +883,7 @@ class AbsPluginsTest(object): def setUp(self): self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__) + os.chdir(basedir) def tearDown(self): shutil.rmtree(self.base_outputdir) @@ -930,7 +892,6 @@ class AbsPluginsTest(object): class PluginsTest(AbsPluginsTest, unittest.TestCase): def test_sysinfo_plugin(self): - os.chdir(basedir) cmd_line = '%s sysinfo %s' % (AVOCADO, self.base_outputdir) result = process.run(cmd_line, ignore_status=True) expected_rc = exit_codes.AVOCADO_ALL_OK @@ -941,7 +902,6 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir") def test_list_plugin(self): - os.chdir(basedir) cmd_line = '%s list' % AVOCADO result = process.run(cmd_line, ignore_status=True) output = result.stdout @@ -952,7 +912,6 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertNotIn('No tests were found on current tests dir', output) def test_list_error_output(self): - os.chdir(basedir) cmd_line = '%s list sbrubles' % AVOCADO result = process.run(cmd_line, ignore_status=True) output = result.stderr @@ -963,7 +922,6 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertIn("Unable to resolve reference", output) def test_list_no_file_loader(self): - os.chdir(basedir) cmd_line = ("%s list --loaders external --verbose -- " "this-wont-be-matched" % AVOCADO) result = process.run(cmd_line, ignore_status=True) @@ -983,7 +941,6 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): """ Runs list verbosely and check for tag related output """ - os.chdir(basedir) test = script.make_script(os.path.join(self.base_outputdir, 'test.py'), VALID_PYTHON_TEST_WITH_TAGS) cmd_line = ("%s list --loaders file --verbose %s" % (AVOCADO, @@ -1003,7 +960,6 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertEqual("BIG_TAG_NAME: 1", stdout_lines[-1]) def test_plugin_list(self): - os.chdir(basedir) cmd_line = '%s plugins' % AVOCADO result = process.run(cmd_line, ignore_status=True) output = result.stdout @@ -1015,7 +971,6 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertNotIn('Disabled', output) def test_config_plugin(self): - os.chdir(basedir) cmd_line = '%s config --paginator off' % AVOCADO result = process.run(cmd_line, ignore_status=True) output = result.stdout @@ -1026,7 +981,6 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertNotIn('Disabled', output) def test_config_plugin_datadir(self): - os.chdir(basedir) cmd_line = '%s config --datadir --paginator off' % AVOCADO result = process.run(cmd_line, ignore_status=True) output = result.stdout @@ -1037,7 +991,6 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertNotIn('Disabled', output) def test_disable_plugin(self): - os.chdir(basedir) cmd_line = '%s plugins' % AVOCADO result = process.run(cmd_line, ignore_status=True) expected_rc = exit_codes.AVOCADO_ALL_OK @@ -1088,7 +1041,6 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): except pkg_resources.DistributionNotFound: pass - os.chdir(basedir) cmd_line = '%s plugins' % AVOCADO result = process.run(cmd_line, ignore_status=True) expected_rc = exit_codes.AVOCADO_ALL_OK @@ -1126,7 +1078,6 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertIn(result_output, zip_file_list) def test_Namespace_object_has_no_attribute(self): - os.chdir(basedir) cmd_line = '%s plugins' % AVOCADO result = process.run(cmd_line, ignore_status=True) output = result.stderr @@ -1152,7 +1103,6 @@ class PluginsXunitTest(AbsPluginsTest, unittest.TestCase): def run_and_check(self, testname, e_rc, e_ntests, e_nerrors, e_nnotfound, e_nfailures, e_nskip): - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off' ' --xunit - %s' % (AVOCADO, self.tmpdir, testname)) result = process.run(cmd_line, ignore_status=True) @@ -1235,7 +1185,6 @@ class PluginsJSONTest(AbsPluginsTest, unittest.TestCase): def run_and_check(self, testname, e_rc, e_ntests, e_nerrors, e_nfailures, e_nskip, e_ncancel=0, external_runner=None): - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off --json - ' '--archive %s' % (AVOCADO, self.tmpdir, testname)) if external_runner is not None: diff --git a/selftests/functional/test_loader.py b/selftests/functional/test_loader.py index 6f13bf0b3959b4351328ca41ad9d229a451c584c..b8ca3caa1fcbc8e5ea15c9f813ee110404d302d9 100644 --- a/selftests/functional/test_loader.py +++ b/selftests/functional/test_loader.py @@ -1,4 +1,5 @@ import os +import json import subprocess import time import stat @@ -108,6 +109,9 @@ from avocado import main from test2 import * class BasicTestSuite(SuperTest): + ''' + :avocado: disable + ''' def test1(self): self.xxx() @@ -319,6 +323,30 @@ class LoaderTestFunctional(unittest.TestCase): self.assertEqual(test, 11, "Number of tests is not 12 (%s):\n%s" % (test, result)) + def test_python_unittest(self): + test_path = os.path.join(basedir, "selftests", ".data", "unittests.py") + cmd = ("%s run --sysinfo=off --job-results-dir %s --json - -- %s" + % (AVOCADO, self.tmpdir, test_path)) + result = process.run(cmd, ignore_status=True) + jres = json.loads(result.stdout) + self.assertEqual(result.exit_status, 1, result) + exps = [("unittests.Second.test_fail", "FAIL"), + ("unittests.Second.test_error", "ERROR"), + ("unittests.Second.test_skip", "CANCEL"), + ("unittests.First.test_pass", "PASS")] + for test in jres["tests"]: + for exp in exps: + if exp[0] in test["id"]: + self.assertEqual(test["status"], exp[1], "Status of %s not" + " as expected\n%s" % (exp, result)) + exps.remove(exp) + break + else: + self.fail("No expected result for %s\n%s\n\nexps = %s" + % (test["id"], result, exps)) + self.assertFalse(exps, "Some expected result not matched to actual" + "results:\n%s\n\nexps = %s" % (result, exps)) + def tearDown(self): shutil.rmtree(self.tmpdir) diff --git a/selftests/functional/test_output.py b/selftests/functional/test_output.py index 7f80dfcdc9c332f6d4307a591097f66e39e306ea..2f863c872e1276e94ced04d07a4d43abde7c691c 100644 --- a/selftests/functional/test_output.py +++ b/selftests/functional/test_output.py @@ -99,11 +99,11 @@ class OutputTest(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) + os.chdir(basedir) @unittest.skipIf(missing_binary('cc'), "C compiler is required by the underlying doublefree.py test") def test_output_doublefree(self): - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' 'doublefree.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -133,7 +133,6 @@ class OutputTest(unittest.TestCase): test = script.Script(os.path.join(self.tmpdir, "output_test.py"), OUTPUT_TEST_CONTENT) test.save() - os.chdir(basedir) result = process.run("%s run --job-results-dir %s --sysinfo=off " "--json - -- %s" % (AVOCADO, self.tmpdir, test)) res = json.loads(result.stdout) @@ -162,6 +161,7 @@ class OutputPluginTest(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) + os.chdir(basedir) def check_output_files(self, debug_log): base_dir = os.path.dirname(debug_log) @@ -183,7 +183,6 @@ class OutputPluginTest(unittest.TestCase): self.assertIn("\n# debug.log of ", tap) def test_output_incompatible_setup(self): - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' '--xunit - --json - passtest.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -202,7 +201,6 @@ class OutputPluginTest(unittest.TestCase): @unittest.skipIf(html_uncapable(), "Uncapable of Avocado Result HTML plugin") def test_output_incompatible_setup_2(self): - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' '--html - passtest.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -217,7 +215,6 @@ class OutputPluginTest(unittest.TestCase): def test_output_compatible_setup(self): tmpfile = tempfile.mktemp() - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' '--journal --xunit %s --json - passtest.py' % (AVOCADO, self.tmpdir, tmpfile)) @@ -239,7 +236,6 @@ class OutputPluginTest(unittest.TestCase): def test_output_compatible_setup_2(self): tmpfile = tempfile.mktemp() - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' '--xunit - --json %s passtest.py' % (AVOCADO, self.tmpdir, tmpfile)) @@ -269,7 +265,6 @@ class OutputPluginTest(unittest.TestCase): tmpfile2 = tempfile.mktemp(prefix='avocado_' + __name__) tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) tmpfile3 = tempfile.mktemp(dir=tmpdir) - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' '--xunit %s --json %s --html %s passtest.py' % (AVOCADO, self.tmpdir, tmpfile, tmpfile2, tmpfile3)) @@ -301,7 +296,6 @@ class OutputPluginTest(unittest.TestCase): def test_output_compatible_setup_nooutput(self): tmpfile = tempfile.mktemp() tmpfile2 = tempfile.mktemp() - os.chdir(basedir) # Verify --silent can be supplied as app argument cmd_line = ('%s --silent run --job-results-dir %s ' '--sysinfo=off --xunit %s --json %s passtest.py' @@ -348,7 +342,6 @@ class OutputPluginTest(unittest.TestCase): self.check_output_files(debug_log) def test_show_job_log(self): - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' 'passtest.py --show-job-log' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -364,7 +357,6 @@ class OutputPluginTest(unittest.TestCase): self.assertEqual(len(job_id), 40) def test_silent_trumps_show_job_log(self): - os.chdir(basedir) # Also verify --silent can be supplied as run option cmd_line = ('%s run --silent --job-results-dir %s ' '--sysinfo=off passtest.py --show-job-log' @@ -378,7 +370,6 @@ class OutputPluginTest(unittest.TestCase): self.assertEqual(output, "") def test_default_enabled_plugins(self): - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' 'passtest.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) @@ -400,7 +391,6 @@ class OutputPluginTest(unittest.TestCase): def test_verify_whiteboard_save(self): tmpfile = tempfile.mktemp() try: - os.chdir(basedir) config = os.path.join(self.tmpdir, "conf.ini") content = ("[datadir.paths]\nlogs_dir = %s" % os.path.relpath(self.tmpdir, ".")) @@ -431,7 +421,6 @@ class OutputPluginTest(unittest.TestCase): def test_gendata(self): tmpfile = tempfile.mktemp() try: - os.chdir(basedir) cmd_line = ("%s run --job-results-dir %s " "--sysinfo=off gendata.py --json %s" % (AVOCADO, self.tmpdir, tmpfile)) @@ -470,7 +459,6 @@ class OutputPluginTest(unittest.TestCase): def test_redirect_output(self): redirected_output_path = tempfile.mktemp() try: - os.chdir(basedir) cmd_line = ('%s run --job-results-dir %s ' '--sysinfo=off passtest.py > %s' % (AVOCADO, self.tmpdir, redirected_output_path)) @@ -501,11 +489,9 @@ class OutputPluginTest(unittest.TestCase): PERL_TAP_PARSER_SNIPPET % self.tmpdir) perl_script.save() - os.chdir(basedir) process.run("perl %s" % perl_script) def test_tap_totaltests(self): - os.chdir(basedir) cmd_line = ("%s run passtest.py " "-m examples/tests/sleeptest.py.data/sleeptest.yaml " "--job-results-dir %s " @@ -516,7 +502,6 @@ class OutputPluginTest(unittest.TestCase): % (expr, result.stdout)) def test_broken_pipe(self): - os.chdir(basedir) cmd_line = "(%s run --help | whacky-unknown-command)" % AVOCADO result = process.run(cmd_line, shell=True, ignore_status=True, env={"LC_ALL": "C"}) @@ -530,7 +515,6 @@ class OutputPluginTest(unittest.TestCase): self.assertNotIn("Avocado crashed", result.stderr) def test_results_plugins_no_tests(self): - os.chdir(basedir) cmd_line = ("%s run UNEXISTING --job-results-dir %s" % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) diff --git a/selftests/functional/test_plugin_jobscripts.py b/selftests/functional/test_plugin_jobscripts.py index 7c1bb532f9e8f7810cb15f2eb0a1b06a9f796ba2..78407240d798dc4e5ed26bb0532b2571b7ac229f 100644 --- a/selftests/functional/test_plugin_jobscripts.py +++ b/selftests/functional/test_plugin_jobscripts.py @@ -7,6 +7,8 @@ from avocado.core import exit_codes from avocado.utils import process from avocado.utils import script +BASEDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..') +BASEDIR = os.path.abspath(BASEDIR) AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado") @@ -47,6 +49,7 @@ class JobScriptsTest(unittest.TestCase): os.mkdir(self.pre_dir) self.post_dir = os.path.join(self.tmpdir, 'post.d') os.mkdir(self.post_dir) + os.chdir(BASEDIR) def test_pre_post(self): """ diff --git a/selftests/functional/test_statuses.py b/selftests/functional/test_statuses.py index 6eceed28b64e8793a18fa770fccffc960ca66121..8433e1fa52e75da6815783a839f10ec928be38f7 100644 --- a/selftests/functional/test_statuses.py +++ b/selftests/functional/test_statuses.py @@ -153,6 +153,7 @@ class TestStatuses(unittest.TestCase): ".data", 'test_statuses.yaml')) + os.chdir(basedir) cmd = ('%s run %s -m %s --sysinfo=off --job-results-dir %s --json -' % (AVOCADO, test_file, yaml_file, self.tmpdir)) diff --git a/selftests/functional/test_streams.py b/selftests/functional/test_streams.py index 2cd626f70d3580d6b983ef8d267e14f458b3a7ac..a2389533bdb09720991fc76ef80a3a20de579b0f 100644 --- a/selftests/functional/test_streams.py +++ b/selftests/functional/test_streams.py @@ -17,6 +17,7 @@ class StreamsTest(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) + os.chdir(basedir) def test_app_info_stdout(self): """ diff --git a/selftests/functional/test_utils.py b/selftests/functional/test_utils.py index 233a494889ace16669a929fc55b4bf1126279358..0f5a99aea60489e6e5f616d643132203143e8418 100644 --- a/selftests/functional/test_utils.py +++ b/selftests/functional/test_utils.py @@ -157,8 +157,8 @@ class ProcessTest(unittest.TestCase): def file_lock_action(args): - path, players = args - max_individual_timeout = 0.021 + path, players, max_individual_timeout = args + start = time.time() max_timeout = max_individual_timeout * players with FileLock(path, max_timeout): sleeptime = random.random() / 100 @@ -174,9 +174,15 @@ class FileLockTest(unittest.TestCase): "Skipping test that take a long time to run, are " "resource intensive or time sensitve") def test_filelock(self): + # Calculate the timeout based on t_100_iter + 2e-5*players + start = time.time() + for _ in xrange(100): + with FileLock(self.tmpdir): + pass + timeout = 0.02 + (time.time() - start) players = 1000 pool = multiprocessing.Pool(players) - args = [(self.tmpdir, players)] * players + args = [(self.tmpdir, players, timeout)] * players try: pool.map(file_lock_action, args) except: diff --git a/selftests/unit/test_loader.py b/selftests/unit/test_loader.py index 24c8a5cc330f07bb11788d02a70e94cb974aeedd..d4c4568160a1ddf664d31977cf9c16e124b2d35d 100644 --- a/selftests/unit/test_loader.py +++ b/selftests/unit/test_loader.py @@ -8,6 +8,7 @@ import unittest from avocado.core import test from avocado.core import loader +from avocado.core import test from avocado.utils import script # We need to access protected members pylint: disable=W0212 @@ -238,6 +239,14 @@ class ThirdChild(Test, SecondChild): pass """ +PYTHON_UNITTEST = """#!/usr/bin/env python +from unittest import TestCase + +class SampleTest(TestCase): + def test(self): + pass +""" + class LoaderTest(unittest.TestCase): @@ -513,7 +522,7 @@ class LoaderTest(unittest.TestCase): KEEP_METHODS_ORDER) avocado_keep_methods_order.save() expected_order = ['test2', 'testA', 'test1', 'testZZZ', 'test'] - tests = self.loader._find_avocado_tests(avocado_keep_methods_order.path) + tests = self.loader._find_avocado_tests(avocado_keep_methods_order.path)[0] methods = [method[0] for method in tests['MyClass']] self.assertEqual(expected_order, methods) avocado_keep_methods_order.remove() @@ -529,13 +538,29 @@ class LoaderTest(unittest.TestCase): avocado_recursive_discovery_test2.save() sys.path.append(os.path.dirname(avocado_recursive_discovery_test1.path)) - tests = self.loader._find_avocado_tests(avocado_recursive_discovery_test2.path) + tests = self.loader._find_avocado_tests(avocado_recursive_discovery_test2.path)[0] expected = {'ThirdChild': [('test_third_child', set([])), ('test_second_child', set([])), ('test_first_child', set([])), ('test_basic', set([]))]} self.assertEqual(expected, tests) + def test_python_unittest(self): + disabled_test = script.TemporaryScript("disabled.py", + AVOCADO_TEST_OK_DISABLED, + mode=DEFAULT_NON_EXEC_MODE) + python_unittest = script.TemporaryScript("python_unittest.py", + PYTHON_UNITTEST) + disabled_test.save() + python_unittest.save() + tests = self.loader.discover(disabled_test.path) + self.assertEqual(tests, []) + tests = self.loader.discover(python_unittest.path) + exp = [(test.PythonUnittest, + {"name": "python_unittest.SampleTest.test", + "test_dir": os.path.dirname(python_unittest.path)})] + self.assertEqual(tests, exp) + def tearDown(self): shutil.rmtree(self.tmpdir) diff --git a/selftests/unit/test_mux.py b/selftests/unit/test_mux.py index 541102186e5aff611560491b0fb10c5e7c96abef..56a57c650e5a7d1cb7e8ba9707503effe2e4c3ee 100644 --- a/selftests/unit/test_mux.py +++ b/selftests/unit/test_mux.py @@ -1,5 +1,6 @@ import copy import itertools +import os import pickle import unittest import yaml @@ -8,11 +9,10 @@ import yaml import avocado_varianter_yaml_to_mux as yaml_to_mux from avocado.core import mux, tree, varianter +BASEDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..') +BASEDIR = os.path.abspath(BASEDIR) -if __name__ == "__main__": - PATH_PREFIX = "../../" -else: - PATH_PREFIX = "" +PATH_PREFIX = os.path.relpath(BASEDIR) + os.path.sep def combine(leaves_pools):