提交 4f7f4aed 编写于 作者: L Lukáš Doktor

selftests: Improve code-coverage report

Currently the `selftests/run_coverage` only reports unit coverage. This
patch allows specifying custom `avocado` command in selftests and uses
it to run coverage to also include the functional tests to results.
Signed-off-by: NLukáš Doktor <ldoktor@redhat.com>
上级 3aa81b0a
......@@ -11,12 +11,14 @@ from avocado.utils import process
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
class ArgumentParsingTest(unittest.TestCase):
def test_unknown_command(self):
os.chdir(basedir)
cmd_line = './scripts/avocado whacky-command-that-doesnt-exist'
cmd_line = '%s whacky-command-that-doesnt-exist' % AVOCADO
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -24,7 +26,7 @@ class ArgumentParsingTest(unittest.TestCase):
def test_known_command_bad_choice(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --sysinfo=foo passtest'
cmd_line = '%s run --sysinfo=foo passtest' % AVOCADO
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -32,7 +34,7 @@ class ArgumentParsingTest(unittest.TestCase):
def test_known_command_bad_argument(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --sysinfo=off --whacky-argument passtest'
cmd_line = '%s run --sysinfo=off --whacky-argument passtest' % AVOCADO
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -55,7 +57,7 @@ class ArgumentParsingErrorEarlyTest(unittest.TestCase):
log_dir = data_dir.get_logs_dir()
self.assertIsNotNone(log_dir)
job = job_id.create_unique_job_id()
cmd_line = './scripts/avocado run --sysinfo=off --force-job-id=%s %s'
cmd_line = '%s run --sysinfo=off --force-job-id=%%s %%s' % AVOCADO
cmd_line %= (job, complement_args)
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, expected_rc,
......
......@@ -27,6 +27,8 @@ from avocado.utils import path as utils_path
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""
......@@ -130,7 +132,7 @@ class RunnerOperationTest(unittest.TestCase):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
def test_show_version(self):
result = process.run('./scripts/avocado -v', ignore_status=True)
result = process.run('%s -v' % AVOCADO, ignore_status=True)
self.assertEqual(result.exit_status, 0)
self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
"Version string does not match 'Avocado \\d\\.\\d:'\n"
......@@ -159,7 +161,7 @@ class RunnerOperationTest(unittest.TestCase):
os.close(fd)
os.chdir(basedir)
cmd = './scripts/avocado --config %s config --datadir' % config_file
cmd = '%s --config %s config --datadir' % (AVOCADO, config_file)
result = process.run(cmd)
output = result.stdout
expected_rc = exit_codes.AVOCADO_ALL_OK
......@@ -172,15 +174,15 @@ class RunnerOperationTest(unittest.TestCase):
def test_runner_all_ok(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'passtest.py passtest.py' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'passtest.py passtest.py' % (AVOCADO, self.tmpdir))
process.run(cmd_line)
def test_runner_failfast(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'passtest.py failtest.py passtest.py --failfast on' %
self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'passtest.py failtest.py passtest.py --failfast on'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertIn('Interrupting job (failfast).', result.stdout)
self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
......@@ -192,29 +194,29 @@ class RunnerOperationTest(unittest.TestCase):
"C compiler is required by the underlying datadir.py test")
def test_datadir_alias(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'datadir.py' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'datadir.py' % (AVOCADO, self.tmpdir))
process.run(cmd_line)
def test_shell_alias(self):
""" Tests that .sh files are also executable via alias """
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'env_variables.sh' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'env_variables.sh' % (AVOCADO, self.tmpdir))
process.run(cmd_line)
@unittest.skipIf(not CC_BINARY,
"C compiler is required by the underlying datadir.py test")
def test_datadir_noalias(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
'examples/tests/datadir.py' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
'examples/tests/datadir.py' % (AVOCADO, self.tmpdir))
process.run(cmd_line)
def test_runner_noalias(self):
os.chdir(basedir)
cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
"examples/tests/passtest.py" % self.tmpdir)
cmd_line = ("%s run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
"examples/tests/passtest.py" % (AVOCADO, self.tmpdir))
process.run(cmd_line)
def test_runner_test_with_local_imports(self):
......@@ -228,8 +230,8 @@ class RunnerOperationTest(unittest.TestCase):
LOCAL_IMPORT_TEST_CONTENTS)
os.chdir(basedir)
mytest.save()
cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
"%s" % (self.tmpdir, mytest))
cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
"%s" % (AVOCADO, self.tmpdir, mytest))
process.run(cmd_line)
def test_unsupported_status(self):
......@@ -237,9 +239,9 @@ class RunnerOperationTest(unittest.TestCase):
with script.TemporaryScript("fake_status.py",
UNSUPPORTED_STATUS_TEST_CONTENTS,
"avocado_unsupported_status") as tst:
res = process.run("./scripts/avocado run --sysinfo=off "
"--job-results-dir %s %s --json -"
% (self.tmpdir, tst), ignore_status=True)
res = process.run("%s run --sysinfo=off --job-results-dir %s %s"
" --json -" % (AVOCADO, self.tmpdir, tst),
ignore_status=True)
self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
results = json.loads(res.stdout)
self.assertEqual(results["tests"][0]["status"], "ERROR",
......@@ -254,9 +256,9 @@ class RunnerOperationTest(unittest.TestCase):
with script.TemporaryScript("report_status_and_hang.py",
REPORTS_STATUS_AND_HANG,
"hanged_test_with_status") as tst:
res = process.run("./scripts/avocado run --sysinfo=off "
"--job-results-dir %s %s --json -"
% (self.tmpdir, tst), ignore_status=True)
res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
"--json -" % (AVOCADO, self.tmpdir, tst),
ignore_status=True)
self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
results = json.loads(res.stdout)
self.assertEqual(results["tests"][0]["status"], "ERROR",
......@@ -273,9 +275,9 @@ class RunnerOperationTest(unittest.TestCase):
with script.TemporaryScript("die_without_reporting_status.py",
DIE_WITHOUT_REPORTING_STATUS,
"no_status_reported") as tst:
res = process.run("./scripts/avocado run --sysinfo=off "
"--job-results-dir %s %s --json -"
% (self.tmpdir, tst), ignore_status=True)
res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
"--json -" % (AVOCADO, self.tmpdir, tst),
ignore_status=True)
self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
results = json.loads(res.stdout)
self.assertEqual(results["tests"][0]["status"], "ERROR",
......@@ -286,8 +288,8 @@ class RunnerOperationTest(unittest.TestCase):
def test_runner_tests_fail(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'passtest.py failtest.py passtest.py' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s passtest.py '
'failtest.py passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -295,8 +297,8 @@ class RunnerOperationTest(unittest.TestCase):
def test_runner_nonexistent_test(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
'%s bogustest' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir '
'%s bogustest' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_JOB_FAIL
unexpected_rc = exit_codes.AVOCADO_FAIL
......@@ -307,8 +309,8 @@ class RunnerOperationTest(unittest.TestCase):
def test_runner_doublefail(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'--xunit - doublefail.py' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'--xunit - doublefail.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
......@@ -325,8 +327,8 @@ class RunnerOperationTest(unittest.TestCase):
def test_uncaught_exception(self):
os.chdir(basedir)
cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
"--json - uncaught_exception.py" % self.tmpdir)
cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
"--json - uncaught_exception.py" % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -336,8 +338,8 @@ class RunnerOperationTest(unittest.TestCase):
def test_fail_on_exception(self):
os.chdir(basedir)
cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
"--json - fail_on_exception.py" % self.tmpdir)
cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
"--json - fail_on_exception.py" % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -347,8 +349,8 @@ class RunnerOperationTest(unittest.TestCase):
def test_runner_timeout(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'--xunit - timeouttest.py' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'--xunit - timeouttest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
......@@ -367,8 +369,8 @@ class RunnerOperationTest(unittest.TestCase):
"resource intensive or time sensitve")
def test_runner_abort(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'--xunit - abort.py' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'--xunit - abort.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
excerpt = 'Test died without reporting the status.'
......@@ -382,8 +384,8 @@ class RunnerOperationTest(unittest.TestCase):
def test_silent_output(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
'--job-results-dir %s passtest.py' % self.tmpdir)
cmd_line = ('%s --silent run --sysinfo=off --job-results-dir %s '
'passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
expected_output = ''
......@@ -392,7 +394,7 @@ class RunnerOperationTest(unittest.TestCase):
def test_empty_args_list(self):
os.chdir(basedir)
cmd_line = './scripts/avocado'
cmd_line = AVOCADO
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_FAIL
expected_output = 'error: too few arguments'
......@@ -401,7 +403,8 @@ class RunnerOperationTest(unittest.TestCase):
def test_empty_test_list(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s' % self.tmpdir
cmd_line = '%s run --sysinfo=off --job-results-dir %s' % (AVOCADO,
self.tmpdir)
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_JOB_FAIL
expected_output = ('No test references provided nor any other '
......@@ -411,7 +414,8 @@ class RunnerOperationTest(unittest.TestCase):
def test_not_found(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s sbrubles' % self.tmpdir
cmd_line = ('%s run --sysinfo=off --job-results-dir %s sbrubles'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_JOB_FAIL
self.assertEqual(result.exit_status, expected_rc)
......@@ -419,25 +423,25 @@ class RunnerOperationTest(unittest.TestCase):
self.assertNotIn('Unable to resolve reference', result.stdout)
def test_invalid_unique_id(self):
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
'%s --force-job-id foobar passtest.py' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s --force-job-id '
'foobar passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertIn('needs to be a 40 digit hex', result.stderr)
self.assertNotIn('needs to be a 40 digit hex', result.stdout)
def test_valid_unique_id(self):
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
'passtest.py' % self.tmpdir)
'passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertNotIn('needs to be a 40 digit hex', result.stderr)
self.assertIn('PASS', result.stdout)
def test_automatic_unique_id(self):
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'passtest.py --json -' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py --json -' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
r = json.loads(result.stdout)
......@@ -449,8 +453,8 @@ class RunnerOperationTest(unittest.TestCase):
Tests that the `latest` link to the latest job results is created early
"""
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'examples/tests/passtest.py' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'examples/tests/passtest.py' % (AVOCADO, self.tmpdir))
avocado_process = process.SubProcess(cmd_line)
avocado_process.start()
link = os.path.join(self.tmpdir, 'latest')
......@@ -464,9 +468,9 @@ class RunnerOperationTest(unittest.TestCase):
def test_dry_run(self):
os.chdir(basedir)
cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
cmd = ("%s run --sysinfo=off passtest.py failtest.py "
"gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
" foo:bar:b foo:baz:c bar:bar:bar --dry-run")
" foo:bar:b foo:baz:c bar:bar:bar --dry-run" % AVOCADO)
result = json.loads(process.run(cmd).stdout)
debuglog = result['debuglog']
log = open(debuglog, 'r').read()
......@@ -491,8 +495,8 @@ class RunnerOperationTest(unittest.TestCase):
os.chdir(basedir)
test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
INVALID_PYTHON_TEST)
cmd_line = ('./scripts/avocado --show test run --sysinfo=off '
'--job-results-dir %s %s') % (self.tmpdir, test)
cmd_line = ('%s --show test run --sysinfo=off '
'--job-results-dir %s %s') % (AVOCADO, self.tmpdir, test)
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -504,7 +508,7 @@ class RunnerOperationTest(unittest.TestCase):
@unittest.skipIf(not READ_BINARY, "read binary not available.")
def test_read(self):
os.chdir(basedir)
cmd = "./scripts/avocado run --sysinfo=off --job-results-dir %s %s"
cmd = "%s run --sysinfo=off --job-results-dir %%s %%s" % AVOCADO
cmd %= (self.tmpdir, READ_BINARY)
result = process.run(cmd, timeout=10, ignore_status=True)
self.assertLess(result.duration, 8, "Duration longer than expected."
......@@ -523,8 +527,8 @@ class RunnerHumanOutputTest(unittest.TestCase):
def test_output_pass(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'passtest.py' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -534,8 +538,8 @@ class RunnerHumanOutputTest(unittest.TestCase):
def test_output_fail(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'failtest.py' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'failtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -545,8 +549,8 @@ class RunnerHumanOutputTest(unittest.TestCase):
def test_output_error(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'errortest.py' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'errortest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -556,8 +560,8 @@ class RunnerHumanOutputTest(unittest.TestCase):
def test_output_cancel(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'cancelonsetup.py' % self.tmpdir)
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'cancelonsetup.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -570,10 +574,10 @@ class RunnerHumanOutputTest(unittest.TestCase):
'GNU style echo binary not available')
def test_ugly_echo_cmd(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --external-runner "%s -ne" '
cmd_line = ('%s run --external-runner "%s -ne" '
'"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
' --sysinfo=off --show-job-log' %
(GNU_ECHO_BINARY, self.tmpdir))
(AVOCADO, GNU_ECHO_BINARY, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -594,13 +598,13 @@ class RunnerHumanOutputTest(unittest.TestCase):
'1-foo\\\\n\\\'\\"\\\\nbar_baz')
def test_replay_skip_skipped(self):
cmd = ("./scripts/avocado run --job-results-dir %s --json - "
"cancelonsetup.py" % self.tmpdir)
cmd = ("%s run --job-results-dir %s --json - "
"cancelonsetup.py" % (AVOCADO, self.tmpdir))
result = process.run(cmd)
result = json.loads(result.stdout)
jobid = str(result["job_id"])
cmd = ("./scripts/avocado run --job-results-dir %s "
"--replay %s --replay-test-status PASS") % (self.tmpdir, jobid)
cmd = ("%s run --job-results-dir %s --replay %s "
"--replay-test-status PASS" % (AVOCADO, self.tmpdir, jobid))
process.run(cmd)
def tearDown(self):
......@@ -624,8 +628,8 @@ class RunnerSimpleTest(unittest.TestCase):
def test_simpletest_pass(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
' "%s"' % (self.tmpdir, self.pass_script.path))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
' "%s"' % (AVOCADO, self.tmpdir, self.pass_script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -634,8 +638,8 @@ class RunnerSimpleTest(unittest.TestCase):
def test_simpletest_fail(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
' %s' % (self.tmpdir, self.fail_script.path))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
' %s' % (AVOCADO, self.tmpdir, self.fail_script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -652,8 +656,8 @@ class RunnerSimpleTest(unittest.TestCase):
"""
os.chdir(basedir)
one_hundred = 'failtest.py ' * 100
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
' %s' % (self.tmpdir, one_hundred))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
% (AVOCADO, self.tmpdir, one_hundred))
initial_time = time.time()
result = process.run(cmd_line, ignore_status=True)
actual_time = time.time() - initial_time
......@@ -670,8 +674,8 @@ class RunnerSimpleTest(unittest.TestCase):
os.chdir(basedir)
sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
'sleeptest.py')
cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
self.tmpdir, sleep_fail_sleep)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
% (AVOCADO, self.tmpdir, sleep_fail_sleep))
initial_time = time.time()
result = process.run(cmd_line, ignore_status=True)
actual_time = time.time() - initial_time
......@@ -685,8 +689,9 @@ class RunnerSimpleTest(unittest.TestCase):
simplewarning.sh uses the avocado-bash-utils
"""
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'examples/tests/simplewarning.sh --show-job-log'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -714,9 +719,10 @@ class RunnerSimpleTest(unittest.TestCase):
@unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
def test_kill_stopped_sleep(self):
proc = aexpect.Expect("./scripts/avocado run 60 --job-results-dir %s "
"--external-runner %s --sysinfo=off --job-timeout 3"
% (self.tmpdir, SLEEP_BINARY))
proc = aexpect.Expect("%s run 60 --job-results-dir %s "
"--external-runner %s --sysinfo=off "
"--job-timeout 3"
% (AVOCADO, self.tmpdir, SLEEP_BINARY))
proc.read_until_output_matches(["\(1/1\)"], timeout=3,
internal_timeout=0.01)
# We need pid of the avocado process, not the shell executing it
......@@ -777,8 +783,9 @@ class ExternalRunnerTest(unittest.TestCase):
def test_externalrunner_pass(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
cmd_line %= (self.tmpdir, self.pass_script.path)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--external-runner=/bin/sh %s'
% (AVOCADO, self.tmpdir, self.pass_script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -787,8 +794,9 @@ class ExternalRunnerTest(unittest.TestCase):
def test_externalrunner_fail(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
cmd_line %= (self.tmpdir, self.fail_script.path)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--external-runner=/bin/sh %s'
% (AVOCADO, self.tmpdir, self.fail_script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -797,9 +805,9 @@ class ExternalRunnerTest(unittest.TestCase):
def test_externalrunner_chdir_no_testdir(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
'--external-runner-chdir=test %s')
cmd_line %= (self.tmpdir, self.pass_script.path)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--external-runner=/bin/sh --external-runner-chdir=test %s'
% (AVOCADO, self.tmpdir, self.pass_script.path))
result = process.run(cmd_line, ignore_status=True)
expected_output = ('Option "--external-runner-chdir=test" requires '
'"--external-runner-testdir" to be set')
......@@ -811,8 +819,8 @@ class ExternalRunnerTest(unittest.TestCase):
def test_externalrunner_no_url(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'--external-runner=%s' % (self.tmpdir, TRUE_CMD))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--external-runner=%s' % (AVOCADO, self.tmpdir, TRUE_CMD))
result = process.run(cmd_line, ignore_status=True)
expected_output = ('No test references provided nor any other '
'arguments resolved into tests')
......@@ -841,7 +849,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
def test_sysinfo_plugin(self):
os.chdir(basedir)
cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
cmd_line = '%s sysinfo %s' % (AVOCADO, self.base_outputdir)
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -852,7 +860,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
def test_list_plugin(self):
os.chdir(basedir)
cmd_line = './scripts/avocado list'
cmd_line = '%s list' % AVOCADO
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
expected_rc = exit_codes.AVOCADO_ALL_OK
......@@ -863,7 +871,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
def test_list_error_output(self):
os.chdir(basedir)
cmd_line = './scripts/avocado list sbrubles'
cmd_line = '%s list sbrubles' % AVOCADO
result = process.run(cmd_line, ignore_status=True)
output = result.stderr
expected_rc = exit_codes.AVOCADO_FAIL
......@@ -874,7 +882,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
def test_plugin_list(self):
os.chdir(basedir)
cmd_line = './scripts/avocado plugins'
cmd_line = '%s plugins' % AVOCADO
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
expected_rc = exit_codes.AVOCADO_ALL_OK
......@@ -886,7 +894,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
def test_config_plugin(self):
os.chdir(basedir)
cmd_line = './scripts/avocado config --paginator off'
cmd_line = '%s config --paginator off' % AVOCADO
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
expected_rc = exit_codes.AVOCADO_ALL_OK
......@@ -897,7 +905,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
def test_config_plugin_datadir(self):
os.chdir(basedir)
cmd_line = './scripts/avocado config --datadir --paginator off'
cmd_line = '%s config --datadir --paginator off' % AVOCADO
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
expected_rc = exit_codes.AVOCADO_ALL_OK
......@@ -908,7 +916,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
def test_disable_plugin(self):
os.chdir(basedir)
cmd_line = './scripts/avocado plugins'
cmd_line = '%s plugins' % AVOCADO
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -920,7 +928,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
config = script.TemporaryScript("disable_sysinfo_cmd.conf",
config_content)
with config:
cmd_line = './scripts/avocado --config %s plugins' % config
cmd_line = '%s --config %s plugins' % (AVOCADO, config)
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -940,9 +948,9 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
json and xunit output files *do* make into the archive.
"""
def run_config(config_path):
cmd = ('./scripts/avocado --config %s run passtest.py --archive '
cmd = ('%s --config %s run passtest.py --archive '
'--job-results-dir %s --sysinfo=off'
% (config_path, self.base_outputdir))
% (AVOCADO, config_path, self.base_outputdir))
result = process.run(cmd, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -959,7 +967,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
pass
os.chdir(basedir)
cmd_line = './scripts/avocado plugins'
cmd_line = '%s plugins' % AVOCADO
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -997,7 +1005,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
def test_Namespace_object_has_no_attribute(self):
os.chdir(basedir)
cmd_line = './scripts/avocado plugins'
cmd_line = '%s plugins' % AVOCADO
result = process.run(cmd_line, ignore_status=True)
output = result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
......@@ -1022,8 +1030,8 @@ class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
e_nnotfound, e_nfailures, e_nskip):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
' --xunit - %s' % (self.tmpdir, testname))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
' --xunit - %s' % (AVOCADO, self.tmpdir, testname))
result = process.run(cmd_line, ignore_status=True)
xml_output = result.stdout
self.assertEqual(result.exit_status, e_rc,
......@@ -1105,8 +1113,8 @@ class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
e_nfailures, e_nskip, e_ncancel=0, external_runner=None):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
(self.tmpdir, testname))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off --json - '
'--archive %s' % (AVOCADO, self.tmpdir, testname))
if external_runner is not None:
cmd_line += " --external-runner '%s'" % external_runner
result = process.run(cmd_line, ignore_status=True)
......
......@@ -11,7 +11,9 @@ from avocado.utils import script
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO_TEST_CANCEL = """
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
TEST_CANCEL = """
import avocado
class AvocadoCancelTest(avocado.Test):
......@@ -28,7 +30,7 @@ class AvocadoCancelTest(avocado.Test):
self.log.info('teardown code')
"""
AVOCADO_TEST_CANCEL_ON_SETUP = """
TEST_CANCEL_ON_SETUP = """
import avocado
class AvocadoCancelTest(avocado.Test):
......@@ -53,23 +55,23 @@ class TestCancel(unittest.TestCase):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
test_path = os.path.join(self.tmpdir, 'test_cancel.py')
self.test_cancel = script.Script(test_path,
AVOCADO_TEST_CANCEL)
self.test_cancel.save()
self._test_cancel = script.Script(test_path,
TEST_CANCEL)
self._test_cancel.save()
test_path = os.path.join(self.tmpdir, 'test_cancel_on_setup.py')
self.test_cancel_on_setup = script.Script(test_path,
AVOCADO_TEST_CANCEL_ON_SETUP)
self.test_cancel_on_setup.save()
self._test_cancel_on_setup = script.Script(test_path,
TEST_CANCEL_ON_SETUP)
self._test_cancel_on_setup.save()
def test_cancel(self):
os.chdir(basedir)
cmd_line = ['./scripts/avocado',
cmd_line = [AVOCADO,
'run',
'--sysinfo=off',
'--job-results-dir',
'%s' % self.tmpdir,
'%s' % self.test_cancel,
'%s' % self._test_cancel,
'--json -']
result = process.run(' '.join(cmd_line), ignore_status=True)
json_results = json.loads(result.stdout)
......@@ -83,12 +85,12 @@ class TestCancel(unittest.TestCase):
def test_cancel_on_setup(self):
os.chdir(basedir)
cmd_line = ['./scripts/avocado',
cmd_line = [AVOCADO,
'run',
'--sysinfo=off',
'--job-results-dir',
'%s' % self.tmpdir,
'%s' % self.test_cancel_on_setup,
'%s' % self._test_cancel_on_setup,
'--json -']
result = process.run(' '.join(cmd_line), ignore_status=True)
json_results = json.loads(result.stdout)
......
......@@ -11,6 +11,8 @@ from avocado.utils import script
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
SCRIPT_CONTENT = """#!/bin/sh
echo "Avocado Version: $AVOCADO_VERSION"
......@@ -46,7 +48,8 @@ class EnvironmentVariablesTest(unittest.TestCase):
def test_environment_vars(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=on %s' % (self.tmpdir, self.script.path)
cmd_line = ('%s run --job-results-dir %s --sysinfo=on %s'
% (AVOCADO, self.tmpdir, self.script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......
......@@ -8,6 +8,8 @@ from avocado.utils import process
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
class GDBPluginTest(unittest.TestCase):
......@@ -16,16 +18,17 @@ class GDBPluginTest(unittest.TestCase):
def test_gdb_prerun_commands(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'--gdb-prerun-commands=/dev/null passtest.py' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--gdb-prerun-commands=/dev/null passtest.py'
% (AVOCADO, self.tmpdir))
process.run(cmd_line)
def test_gdb_multiple_prerun_commands(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--gdb-prerun-commands=/dev/null '
'--gdb-prerun-commands=foo:/dev/null passtest.py'
% self.tmpdir)
% (AVOCADO, self.tmpdir))
process.run(cmd_line)
def tearDown(self):
......
......@@ -16,6 +16,8 @@ from avocado.utils import data_factory
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
# What is commonly known as "0755" or "u=rwx,g=rx,o=rx"
DEFAULT_MODE = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
......@@ -69,11 +71,9 @@ class InterruptTest(unittest.TestCase):
bad_test.save()
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'%s %s %s' % (self.tmpdir,
bad_test.path,
bad_test.path,
bad_test.path))
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'%s %s %s' % (AVOCADO, self.tmpdir, bad_test.path,
bad_test.path, bad_test.path))
proc = aexpect.Expect(command=cmd_line, linesep='')
proc.read_until_last_line_matches(os.path.basename(bad_test.path))
proc.sendline('\x03')
......@@ -133,11 +133,9 @@ class InterruptTest(unittest.TestCase):
good_test.save()
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
'%s %s %s' % (self.tmpdir,
good_test.path,
good_test.path,
good_test.path))
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'%s %s %s' % (AVOCADO, self.tmpdir, good_test.path,
good_test.path, good_test.path))
proc = aexpect.Expect(command=cmd_line, linesep='')
proc.read_until_last_line_matches(os.path.basename(good_test.path))
proc.sendline('\x03')
......
......@@ -13,6 +13,8 @@ from avocado.utils import script
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
SCRIPT_CONTENT = """#!/bin/bash
sleep 2
......@@ -107,54 +109,60 @@ class JobTimeOutTest(unittest.TestCase):
% (idx, debug_log))
def test_sleep_longer_timeout(self):
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit - --job-timeout=5 %s examples/tests/passtest.py' %
(self.tmpdir, self.script.path))
(AVOCADO, self.tmpdir, self.script.path))
self.run_and_check(cmd_line, 0, 2, 0, 0, 0)
def test_sleep_short_timeout(self):
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit - --job-timeout=1 %s examples/tests/passtest.py' %
(self.tmpdir, self.script.path))
(AVOCADO, self.tmpdir, self.script.path))
self.run_and_check(cmd_line, exit_codes.AVOCADO_JOB_INTERRUPTED,
2, 1, 0, 1)
self._check_timeout_msg(1)
def test_sleep_short_timeout_with_test_methods(self):
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit - --job-timeout=1 %s' %
(self.tmpdir, self.py.path))
(AVOCADO, self.tmpdir, self.py.path))
self.run_and_check(cmd_line, exit_codes.AVOCADO_JOB_INTERRUPTED,
3, 1, 0, 2)
self._check_timeout_msg(1)
def test_invalid_values(self):
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'--job-timeout=1,5 examples/tests/passtest.py' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--job-timeout=1,5 examples/tests/passtest.py'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL)
self.assertIn('Invalid value', result.stderr)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'--job-timeout=123x examples/tests/passtest.py' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--job-timeout=123x examples/tests/passtest.py'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL)
self.assertIn('Invalid value', result.stderr)
def test_valid_values(self):
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'--job-timeout=123 examples/tests/passtest.py' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--job-timeout=123 examples/tests/passtest.py'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'--job-timeout=123s examples/tests/passtest.py' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--job-timeout=123s examples/tests/passtest.py'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'--job-timeout=123m examples/tests/passtest.py' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--job-timeout=123m examples/tests/passtest.py'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'--job-timeout=123h examples/tests/passtest.py' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--job-timeout=123h examples/tests/passtest.py'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
......
......@@ -11,14 +11,17 @@ from avocado.utils import process
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
class JournalPluginTests(unittest.TestCase):
def setUp(self):
os.chdir(basedir)
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
self.cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - '
'--journal examples/tests/passtest.py' % self.tmpdir)
self.cmd_line = ('%s run --job-results-dir %s --sysinfo=off --json - '
'--journal examples/tests/passtest.py'
% (AVOCADO, self.tmpdir))
self.result = process.run(self.cmd_line, ignore_status=True)
data = json.loads(self.result.stdout)
self.job_id = data['job_id']
......
......@@ -15,6 +15,8 @@ from avocado.utils import process
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
AVOCADO_TEST_OK = """#!/usr/bin/env python
from avocado import Test
......@@ -152,7 +154,7 @@ class LoaderTestFunctional(unittest.TestCase):
'avocado_loader_test',
mode=mode)
test_script.save()
cmd_line = ('./scripts/avocado list -V %s' % test_script.path)
cmd_line = ('%s list -V %s' % (AVOCADO, test_script.path))
result = process.run(cmd_line)
self.assertIn('%s: %s' % (exp_str, count), result.stdout)
test_script.remove()
......@@ -161,7 +163,7 @@ class LoaderTestFunctional(unittest.TestCase):
current_time = time.time()
deadline = current_time + timeout
test_process = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=os.setsid)
preexec_fn=os.setsid, shell=True)
while not test_process.poll():
if time.time() > deadline:
os.killpg(os.getpgid(test_process.pid), signal.SIGKILL)
......@@ -188,7 +190,7 @@ class LoaderTestFunctional(unittest.TestCase):
'avocado_loader_test',
mode=self.MODE_0664)
test_script.save()
cmd_line = ('./scripts/avocado list -V %s' % test_script.path)
cmd_line = ('%s list -V %s' % (AVOCADO, test_script.path))
initial_time = time.time()
result = process.run(cmd_line, ignore_status=True)
test_script.remove()
......@@ -225,17 +227,13 @@ class LoaderTestFunctional(unittest.TestCase):
AVOCADO_SIMPLE_PYTHON_LIKE_MULTIPLE_FILES)
os.chdir(basedir)
mytest.save()
cmd_line = "./scripts/avocado list -V %s" % mytest
cmd_line = "%s list -V %s" % (AVOCADO, mytest)
result = process.run(cmd_line)
self.assertIn('SIMPLE: 1', result.stdout)
# job should be able to finish under 5 seconds. If this fails, it's
# possible that we hit the "simple test fork bomb" bug
cmd_line = ['./scripts/avocado',
'run',
'--sysinfo=off',
'--job-results-dir',
"%s" % self.tmpdir,
"%s" % mytest]
cmd_line = ("%s run --sysinfo=off --job-results-dir '%s' -- '%s'"
% (AVOCADO, self.tmpdir, mytest))
self._run_with_timeout(cmd_line, 5)
def test_simple_using_main(self):
......@@ -246,12 +244,8 @@ class LoaderTestFunctional(unittest.TestCase):
os.chdir(basedir)
# job should be able to finish under 5 seconds. If this fails, it's
# possible that we hit the "simple test fork bomb" bug
cmd_line = ['./scripts/avocado',
'run',
'--sysinfo=off',
'--job-results-dir',
"%s" % self.tmpdir,
"%s" % mytest]
cmd_line = ("%s run --sysinfo=off --job-results-dir '%s' -- '%s'"
% (AVOCADO, self.tmpdir, mytest))
self._run_with_timeout(cmd_line, 5)
def tearDown(self):
......
......@@ -12,6 +12,8 @@ from avocado.utils import process
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
DEBUG_OUT = """Variant 16: amd@examples/mux-environment.yaml, virtio@examples/mux-environment.yaml, mint@examples/mux-environment.yaml, debug@examples/mux-environment.yaml
/distro/mint:init => systemv@examples/mux-environment.yaml:/distro/mint
......@@ -43,67 +45,70 @@ class MultiplexTests(unittest.TestCase):
return result
def test_mplex_plugin(self):
cmd_line = './scripts/avocado multiplex -m examples/tests/sleeptest.py.data/sleeptest.yaml'
cmd_line = ('%s multiplex -m examples/tests/sleeptest.py.data/'
'sleeptest.yaml' % AVOCADO)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
def test_mplex_plugin_nonexistent(self):
cmd_line = './scripts/avocado multiplex -m nonexist'
cmd_line = '%s multiplex -m nonexist' % AVOCADO
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
self.assertIn('No such file or directory', result.stderr)
def test_mplex_debug(self):
cmd_line = ('./scripts/avocado multiplex -c -d -m '
cmd_line = ('%s multiplex -c -d -m '
'/:examples/mux-selftest.yaml '
'/:examples/mux-environment.yaml '
'/:examples/mux-selftest.yaml '
'/:examples/mux-environment.yaml')
'/:examples/mux-environment.yaml' % AVOCADO)
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
self.assertIn(DEBUG_OUT, result.stdout)
def test_run_mplex_noid(self):
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'-m examples/tests/sleeptest.py.data/sleeptest.yaml' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'-m examples/tests/sleeptest.py.data/sleeptest.yaml'
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_JOB_FAIL
self.run_and_check(cmd_line, expected_rc)
def test_run_mplex_passtest(self):
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py -m '
'examples/tests/sleeptest.py.data/sleeptest.yaml'
% self.tmpdir)
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc, (4, 0))
def test_run_mplex_doublepass(self):
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py passtest.py -m '
'examples/tests/sleeptest.py.data/sleeptest.yaml'
% self.tmpdir)
% (AVOCADO, self.tmpdir))
self.run_and_check(cmd_line, exit_codes.AVOCADO_ALL_OK, (8, 0))
def test_run_mplex_failtest(self):
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py failtest.py -m '
'examples/tests/sleeptest.py.data/sleeptest.yaml'
% self.tmpdir)
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.run_and_check(cmd_line, expected_rc, (4, 4))
def test_run_double_mplex(self):
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py -m '
'examples/tests/sleeptest.py.data/sleeptest.yaml '
'examples/tests/sleeptest.py.data/sleeptest.yaml'
% self.tmpdir)
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc, (4, 0))
def test_empty_file(self):
cmd_line = ("./scripts/avocado run --job-results-dir %s -m "
"selftests/.data/empty_file -- passtest.py" % self.tmpdir)
cmd_line = ("%s run --job-results-dir %s -m selftests/.data/empty_file"
" -- passtest.py"
% (AVOCADO, self.tmpdir))
result = self.run_and_check(cmd_line, exit_codes.AVOCADO_ALL_OK,
(1, 0))
......@@ -112,9 +117,11 @@ class MultiplexTests(unittest.TestCase):
('/run/medium', 'ASDFASDF'),
('/run/long', 'This is very long\nmultiline\ntext.')):
variant, msg = variant_msg
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off examples/tests/env_variables.sh '
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'examples/tests/env_variables.sh '
'-m examples/tests/env_variables.sh.data/env_variables.yaml '
'--filter-only %s --show-job-log' % (self.tmpdir, variant))
'--filter-only %s --show-job-log'
% (AVOCADO, self.tmpdir, variant))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
......
......@@ -17,11 +17,13 @@ from avocado.utils import path as utils_path
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
PERL_TAP_PARSER_SNIPPET = """#!/bin/env perl
use TAP::Parser;
my $parser = TAP::Parser->new( { exec => ['./scripts/avocado', 'run', 'passtest.py', 'errortest.py', 'warntest.py', '--tap', '-', '--sysinfo', 'off', '--job-results-dir', '%s'] } );
my $parser = TAP::Parser->new( { exec => ['%s', 'run', 'passtest.py', 'errortest.py', 'warntest.py', '--tap', '-', '--sysinfo', 'off', '--job-results-dir', '%%s'] } );
while ( my $result = $parser->next ) {
$result->is_unknown && die "Unknown line \\"" . $result->as_string . "\\" in the TAP output!\n";
......@@ -29,7 +31,7 @@ while ( my $result = $parser->next ) {
$parser->parse_errors == 0 || die "Parser errors!\n";
$parser->is_good_plan || die "Plan is not a good plan!\n";
$parser->plan eq '1..3' || die "Plan does not match what was expected!\n";
"""
""" % AVOCADO
def image_output_uncapable():
......@@ -69,8 +71,8 @@ class OutputTest(unittest.TestCase):
"C compiler is required by the underlying doublefree.py test")
def test_output_doublefree(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'doublefree.py' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'doublefree.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
output = result.stdout + result.stderr
......@@ -112,8 +114,8 @@ class OutputPluginTest(unittest.TestCase):
def test_output_incompatible_setup(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'--xunit - --json - passtest.py' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit - --json - passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -131,8 +133,8 @@ class OutputPluginTest(unittest.TestCase):
"Uncapable of Avocado Result HTML plugin")
def test_output_incompatible_setup_2(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'--html - passtest.py' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--html - passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_JOB_FAIL
output = result.stdout + result.stderr
......@@ -146,9 +148,9 @@ class OutputPluginTest(unittest.TestCase):
def test_output_compatible_setup(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--journal --xunit %s --json - passtest.py' %
(self.tmpdir, tmpfile))
(AVOCADO, self.tmpdir, tmpfile))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
......@@ -168,9 +170,9 @@ class OutputPluginTest(unittest.TestCase):
def test_output_compatible_setup_2(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit - --json %s passtest.py' %
(self.tmpdir, tmpfile))
(AVOCADO, self.tmpdir, tmpfile))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
......@@ -198,9 +200,9 @@ class OutputPluginTest(unittest.TestCase):
tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
tmpfile3 = tempfile.mktemp(dir=tmpdir)
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'--xunit %s --json %s --html %s passtest.py' %
(self.tmpdir, tmpfile, tmpfile2, tmpfile3))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit %s --json %s --html %s passtest.py'
% (AVOCADO, self.tmpdir, tmpfile, tmpfile2, tmpfile3))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
......@@ -231,9 +233,9 @@ class OutputPluginTest(unittest.TestCase):
tmpfile2 = tempfile.mktemp()
os.chdir(basedir)
# Verify --silent can be supplied as app argument
cmd_line = ('./scripts/avocado --silent run --job-results-dir %s '
'--sysinfo=off --xunit %s --json %s passtest.py' %
(self.tmpdir, tmpfile, tmpfile2))
cmd_line = ('%s --silent run --job-results-dir %s '
'--sysinfo=off --xunit %s --json %s passtest.py'
% (AVOCADO, self.tmpdir, tmpfile, tmpfile2))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
......@@ -256,9 +258,10 @@ class OutputPluginTest(unittest.TestCase):
pass
def test_nonprintable_chars(self):
cmd_line = ("./scripts/avocado run --external-runner /bin/ls "
cmd_line = ("%s run --external-runner /bin/ls "
"'NON_EXISTING_FILE_WITH_NONPRINTABLE_CHARS_IN_HERE\x1b' "
"--job-results-dir %s --sysinfo=off" % self.tmpdir)
"--job-results-dir %s --sysinfo=off"
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
......@@ -276,8 +279,8 @@ class OutputPluginTest(unittest.TestCase):
def test_show_job_log(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'passtest.py --show-job-log' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py --show-job-log' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -293,9 +296,9 @@ class OutputPluginTest(unittest.TestCase):
def test_silent_trumps_show_job_log(self):
os.chdir(basedir)
# Also verify --silent can be supplied as run option
cmd_line = ('./scripts/avocado run --silent --job-results-dir %s '
'--sysinfo=off passtest.py --show-job-log' %
self.tmpdir)
cmd_line = ('%s run --silent --job-results-dir %s '
'--sysinfo=off passtest.py --show-job-log'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
......@@ -306,8 +309,8 @@ class OutputPluginTest(unittest.TestCase):
def test_default_enabled_plugins(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'passtest.py' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
......@@ -332,9 +335,9 @@ class OutputPluginTest(unittest.TestCase):
content = ("[datadir.paths]\nlogs_dir = %s"
% os.path.relpath(self.tmpdir, "."))
script.Script(config, content).save()
cmd_line = ('./scripts/avocado --config %s --show all run '
'--sysinfo=off whiteboard.py --json %s' %
(config, tmpfile))
cmd_line = ('%s --config %s --show all run '
'--sysinfo=off whiteboard.py --json %s'
% (AVOCADO, config, tmpfile))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -359,9 +362,9 @@ class OutputPluginTest(unittest.TestCase):
tmpfile = tempfile.mktemp()
try:
os.chdir(basedir)
cmd_line = ("./scripts/avocado run --job-results-dir %s "
cmd_line = ("%s run --job-results-dir %s "
"--sysinfo=off gendata.py --json %s" %
(self.tmpdir, tmpfile))
(AVOCADO, self.tmpdir, tmpfile))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -398,9 +401,9 @@ class OutputPluginTest(unittest.TestCase):
redirected_output_path = tempfile.mktemp()
try:
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s '
'--sysinfo=off passtest.py > %s' %
(self.tmpdir, redirected_output_path))
cmd_line = ('%s run --job-results-dir %s '
'--sysinfo=off passtest.py > %s'
% (AVOCADO, self.tmpdir, redirected_output_path))
result = process.run(cmd_line, ignore_status=True, shell=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
......@@ -433,10 +436,10 @@ class OutputPluginTest(unittest.TestCase):
def test_tap_totaltests(self):
os.chdir(basedir)
cmd_line = ("./scripts/avocado run passtest.py "
cmd_line = ("%s run passtest.py "
"-m examples/tests/sleeptest.py.data/sleeptest.yaml "
"--job-results-dir %s "
"--tap -" % self.tmpdir)
"--tap -" % (AVOCADO, self.tmpdir))
result = process.run(cmd_line)
expr = '1..4'
self.assertIn(expr, result.stdout, "'%s' not found in:\n%s"
......@@ -444,7 +447,7 @@ class OutputPluginTest(unittest.TestCase):
def test_broken_pipe(self):
os.chdir(basedir)
cmd_line = "(./scripts/avocado run --help | whacky-unknown-command)"
cmd_line = "(%s run --help | whacky-unknown-command)" % AVOCADO
result = process.run(cmd_line, shell=True, ignore_status=True,
env={"LC_ALL": "C"})
expected_rc = 127
......
......@@ -12,6 +12,7 @@ basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
OUTPUT_SCRIPT_CONTENTS = """#!/bin/sh
echo "Hello, avocado!"
"""
......@@ -29,8 +30,9 @@ class RunnerSimpleTest(unittest.TestCase):
def test_output_record_none(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off %s --output-check-record none' %
(self.tmpdir, self.output_script.path))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s '
'--output-check-record none'
% (AVOCADO, self.tmpdir, self.output_script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -43,8 +45,9 @@ class RunnerSimpleTest(unittest.TestCase):
def test_output_record_stdout(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off %s --output-check-record stdout' %
(self.tmpdir, self.output_script.path))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s '
'--output-check-record stdout'
% (AVOCADO, self.tmpdir, self.output_script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -57,8 +60,9 @@ class RunnerSimpleTest(unittest.TestCase):
def test_output_record_all(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off %s --output-check-record all' %
(self.tmpdir, self.output_script.path))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s '
'--output-check-record all'
% (AVOCADO, self.tmpdir, self.output_script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -71,8 +75,8 @@ class RunnerSimpleTest(unittest.TestCase):
def test_output_record_and_check(self):
self.test_output_record_all()
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off %s' %
(self.tmpdir, self.output_script.path))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
% (AVOCADO, self.tmpdir, self.output_script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -85,8 +89,8 @@ class RunnerSimpleTest(unittest.TestCase):
stdout_file = os.path.join("%s.data/stdout.expected" % self.output_script.path)
with open(stdout_file, 'w') as stdout_file_obj:
stdout_file_obj.write(tampered_msg)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off %s --xunit -' %
(self.tmpdir, self.output_script.path))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s --xunit -'
% (AVOCADO, self.tmpdir, self.output_script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -100,8 +104,9 @@ class RunnerSimpleTest(unittest.TestCase):
stdout_file = os.path.join("%s.data/stdout.expected" % self.output_script.path)
with open(stdout_file, 'w') as stdout_file_obj:
stdout_file_obj.write(tampered_msg)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off %s --output-check=off --xunit -' %
(self.tmpdir, self.output_script.path))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s '
'--output-check=off --xunit -'
% (AVOCADO, self.tmpdir, self.output_script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......
......@@ -14,25 +14,27 @@ from avocado.utils import script
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
class DiffTests(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
test = script.make_script(os.path.join(self.tmpdir, 'test'), 'exit 0')
cmd_line = ('./scripts/avocado run %s '
cmd_line = ('%s run %s '
'--external-runner /bin/bash '
'--job-results-dir %s --sysinfo=off --json -' %
(test, self.tmpdir))
(AVOCADO, test, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
self.jobdir = ''.join(glob.glob(os.path.join(self.tmpdir, 'job-*')))
self.tmpdir2 = tempfile.mkdtemp(prefix='avocado_' + __name__)
cmd_line = ('./scripts/avocado run %s '
cmd_line = ('%s run %s '
'--external-runner /bin/bash '
'--job-results-dir %s --sysinfo=off --json -' %
(test, self.tmpdir2))
(AVOCADO, test, self.tmpdir2))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
self.jobdir2 = ''.join(glob.glob(os.path.join(self.tmpdir2, 'job-*')))
......@@ -46,8 +48,8 @@ class DiffTests(unittest.TestCase):
return result
def test_diff(self):
cmd_line = ('./scripts/avocado diff %s %s' %
(self.jobdir, self.jobdir2))
cmd_line = ('%s diff %s %s' %
(AVOCADO, self.jobdir, self.jobdir2))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = "# COMMAND LINE"
......@@ -58,8 +60,8 @@ class DiffTests(unittest.TestCase):
self.assertIn(msg, result.stdout)
def test_diff_nocmdline(self):
cmd_line = ('./scripts/avocado diff %s %s --diff-filter nocmdline' %
(self.jobdir, self.jobdir2))
cmd_line = ('%s diff %s %s --diff-filter nocmdline' %
(AVOCADO, self.jobdir, self.jobdir2))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = "# COMMAND LINE"
......
......@@ -8,6 +8,8 @@ from avocado.utils import process
from avocado.utils import script
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
SCRIPT_PRE_TOUCH = """#!/bin/sh -e
touch %s"""
......@@ -66,8 +68,9 @@ class JobScriptsTest(unittest.TestCase):
SCRIPT_PRE_POST_CFG % (self.pre_dir,
self.post_dir))
with config:
cmd = ('./scripts/avocado --config %s run --job-results-dir %s '
'--sysinfo=off %s' % (config, self.tmpdir, test_check_touch))
cmd = ('%s --config %s run --job-results-dir %s '
'--sysinfo=off %s'
% (AVOCADO, config, self.tmpdir, test_check_touch))
result = process.run(cmd)
# Pre/Post scripts failures do not (currently?) alter the exit status
......@@ -88,8 +91,9 @@ class JobScriptsTest(unittest.TestCase):
config = script.TemporaryScript("non_zero.conf",
SCRIPT_NON_ZERO_CFG % self.pre_dir)
with config:
cmd = ('./scripts/avocado --config %s run --job-results-dir %s '
'--sysinfo=off passtest.py' % (config, self.tmpdir))
cmd = ('%s --config %s run --job-results-dir %s '
'--sysinfo=off passtest.py' % (AVOCADO, config,
self.tmpdir))
result = process.run(cmd)
# Pre/Post scripts failures do not (currently?) alter the exit status
......@@ -110,8 +114,9 @@ class JobScriptsTest(unittest.TestCase):
config = script.TemporaryScript("non_existing_dir.conf",
SCRIPT_NON_EXISTING_DIR_CFG % self.pre_dir)
with config:
cmd = ('./scripts/avocado --config %s run --job-results-dir %s '
'--sysinfo=off passtest.py' % (config, self.tmpdir))
cmd = ('%s --config %s run --job-results-dir %s '
'--sysinfo=off passtest.py' % (AVOCADO, config,
self.tmpdir))
result = process.run(cmd)
# Pre/Post scripts failures do not (currently?) alter the exit status
......
......@@ -13,15 +13,17 @@ from avocado.utils import process
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
class ReplayTests(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
cmd_line = ('./scripts/avocado run passtest.py '
cmd_line = ('%s run passtest.py '
'-m examples/tests/sleeptest.py.data/sleeptest.yaml '
'--job-results-dir %s --sysinfo=off --json -' %
self.tmpdir)
'--job-results-dir %s --sysinfo=off --json -'
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
self.jobdir = ''.join(glob.glob(os.path.join(self.tmpdir, 'job-*')))
......@@ -41,9 +43,9 @@ class ReplayTests(unittest.TestCase):
"""
Runs a replay job with an invalid jobid.
"""
cmd_line = ('./scripts/avocado run --replay %s '
'--job-results-dir %s --sysinfo=off' %
('foo', self.tmpdir))
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, 'foo', self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
self.run_and_check(cmd_line, expected_rc)
......@@ -51,8 +53,8 @@ class ReplayTests(unittest.TestCase):
"""
Runs a replay job using the 'latest' keyword.
"""
cmd_line = ('./scripts/avocado run --replay latest '
'--job-results-dir %s --sysinfo=off' % self.tmpdir)
cmd_line = ('%s run --replay latest --job-results-dir %s --sysinfo=off'
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
......@@ -70,9 +72,9 @@ class ReplayTests(unittest.TestCase):
"""
Runs a replay job.
"""
cmd_line = ('./scripts/avocado run --replay %s '
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (self.jobid, self.tmpdir))
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
......@@ -81,9 +83,9 @@ class ReplayTests(unittest.TestCase):
Runs a replay job with a partial jobid.
"""
partial_id = self.jobid[:5]
cmd_line = ('./scripts/avocado run --replay %s '
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (partial_id, self.tmpdir))
% (AVOCADO, partial_id, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
......@@ -91,9 +93,9 @@ class ReplayTests(unittest.TestCase):
"""
Runs a replay job identifying the job by its results directory.
"""
cmd_line = ('./scripts/avocado run --replay %s '
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (self.jobdir, self.tmpdir))
% (AVOCADO, self.jobdir, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
......@@ -101,9 +103,9 @@ class ReplayTests(unittest.TestCase):
"""
Runs a replay job with an invalid option for '--replay-ignore'
"""
cmd_line = ('./scripts/avocado run --replay %s --replay-ignore foo'
cmd_line = ('%s run --replay %s --replay-ignore foo'
'--job-results-dir %s --sysinfo=off'
% (self.jobid, self.tmpdir))
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Invalid --replay-ignore option. Valid options are ' \
......@@ -114,9 +116,9 @@ class ReplayTests(unittest.TestCase):
"""
Runs a replay job ignoring the variants.
"""
cmd_line = ('./scripts/avocado run --replay %s --replay-ignore variants '
cmd_line = ('%s run --replay %s --replay-ignore variants '
'--job-results-dir %s --sysinfo=off'
% (self.jobid, self.tmpdir))
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Ignoring variants from source job with --replay-ignore.'
......@@ -126,9 +128,9 @@ class ReplayTests(unittest.TestCase):
"""
Runs a replay job with an invalid option for '--replay-test-status'
"""
cmd_line = ('./scripts/avocado run --replay %s --replay-test-status E '
cmd_line = ('%s run --replay %s --replay-test-status E '
'--job-results-dir %s --sysinfo=off'
% (self.jobid, self.tmpdir))
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Invalid --replay-test-status option. Valid options are (more ' \
......@@ -139,9 +141,9 @@ class ReplayTests(unittest.TestCase):
"""
Runs a replay job only with tests that failed.
"""
cmd_line = ('./scripts/avocado run --replay %s --replay-test-status '
'FAIL --job-results-dir %s --sysinfo=off' %
(self.jobid, self.tmpdir))
cmd_line = ('%s run --replay %s --replay-test-status '
'FAIL --job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = 'RESULTS : PASS 0 | ERROR 0 | FAIL 0 | SKIP 4 | WARN 0 | INTERRUPT 0'
......@@ -151,9 +153,9 @@ class ReplayTests(unittest.TestCase):
"""
Runs a replay job using remote plugin (not supported).
"""
cmd_line = ('./scripts/avocado run --replay %s --remote-hostname '
'localhost --job-results-dir %s --sysinfo=off' %
(self.jobid, self.tmpdir))
cmd_line = ('%s run --replay %s --remote-hostname '
'localhost --job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = "Currently we don't replay jobs in remote hosts."
......@@ -163,9 +165,9 @@ class ReplayTests(unittest.TestCase):
"""
Runs a replay job with custom variants using '--replay-test-status'
"""
cmd_line = ('./scripts/avocado run --replay %s --replay-ignore variants '
cmd_line = ('%s run --replay %s --replay-ignore variants '
'--replay-test-status FAIL --job-results-dir %s '
'--sysinfo=off' % (self.jobid, self.tmpdir))
'--sysinfo=off' % (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = ("Option `--replay-test-status` is incompatible with "
......@@ -176,9 +178,9 @@ class ReplayTests(unittest.TestCase):
"""
Runs a replay job with custom test references and --replay-test-status
"""
cmd_line = ('./scripts/avocado run sleeptest --replay %s '
cmd_line = ('%s run sleeptest --replay %s '
'--replay-test-status FAIL --job-results-dir %s '
'--sysinfo=off' % (self.jobid, self.tmpdir))
'--sysinfo=off' % (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = ("Option --replay-test-status is incompatible with "
......@@ -191,9 +193,9 @@ class ReplayTests(unittest.TestCase):
"""
shutil.move(os.path.join(self.jobdir, 'jobdata'),
os.path.join(self.jobdir, 'replay'))
cmd_line = ('./scripts/avocado run --replay %s '
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (self.jobid, self.tmpdir))
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
......@@ -202,9 +204,9 @@ class ReplayTests(unittest.TestCase):
Runs a replay job and specifies multiplex file (which should be
ignored)
"""
cmdline = ("./scripts/avocado run --replay %s --job-results-dir %s "
cmdline = ("%s run --replay %s --job-results-dir %s "
"--sysinfo=off -m examples/mux-selftest.yaml"
% (self.jobid, self.tmpdir))
% (AVOCADO, self.jobid, self.tmpdir))
self.run_and_check(cmdline, exit_codes.AVOCADO_ALL_OK)
def tearDown(self):
......
......@@ -14,17 +14,19 @@ from avocado.utils import script
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
class ReplayExtRunnerTests(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
test = script.make_script(os.path.join(self.tmpdir, 'test'), 'exit 0')
cmd_line = ('./scripts/avocado run %s '
cmd_line = ('%s run %s '
'-m examples/tests/sleeptest.py.data/sleeptest.yaml '
'--external-runner /bin/bash '
'--job-results-dir %s --sysinfo=off --json -' %
(test, self.tmpdir))
'--job-results-dir %s --sysinfo=off --json -'
% (AVOCADO, test, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
self.jobdir = ''.join(glob.glob(os.path.join(self.tmpdir, 'job-*')))
......@@ -41,10 +43,10 @@ class ReplayExtRunnerTests(unittest.TestCase):
return result
def test_run_replay_external_runner(self):
cmd_line = ('./scripts/avocado run --replay %s '
cmd_line = ('%s run --replay %s '
'--external-runner /bin/sh '
'--job-results-dir %s --sysinfo=off' %
(self.jobid, self.tmpdir))
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = "Overriding the replay external-runner with the "\
......
......@@ -13,14 +13,16 @@ from avocado.utils import process
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
class ReplayFailfastTests(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
cmd_line = ('./scripts/avocado run passtest.py failtest.py passtest.py '
cmd_line = ('%s run passtest.py failtest.py passtest.py '
'--failfast on --job-results-dir %s --sysinfo=off --json -'
% self.tmpdir)
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
self.run_and_check(cmd_line, expected_rc)
self.jobdir = ''.join(glob.glob(os.path.join(self.tmpdir, 'job-*')))
......@@ -37,16 +39,16 @@ class ReplayFailfastTests(unittest.TestCase):
return result
def test_run_replay_failfast(self):
cmd_line = ('./scripts/avocado run --replay %s '
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (self.jobid, self.tmpdir))
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
result = self.run_and_check(cmd_line, expected_rc)
def test_run_replay_disable_failfast(self):
cmd_line = ('./scripts/avocado run --replay %s --failfast off '
cmd_line = ('%s run --replay %s --failfast off '
'--job-results-dir %s --sysinfo=off'
% (self.jobid, self.tmpdir))
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Overriding the replay failfast with the --failfast value given on the command line.'
......
......@@ -11,6 +11,7 @@ from avocado.utils import script
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
AVOCADO_TEST_SKIP_DECORATORS = """
import avocado
......@@ -105,7 +106,7 @@ class TestSkipDecorators(unittest.TestCase):
def test_skip_decorators(self):
os.chdir(basedir)
cmd_line = ['./scripts/avocado',
cmd_line = [AVOCADO,
'run',
'--sysinfo=off',
'--job-results-dir',
......
......@@ -10,6 +10,8 @@ from avocado.utils import process
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
class StreamsTest(unittest.TestCase):
......@@ -20,7 +22,7 @@ class StreamsTest(unittest.TestCase):
"""
Checks that the application output (<= level info) goes to stdout
"""
result = process.run('./scripts/avocado distro')
result = process.run('%s distro' % AVOCADO)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertIn('Detected distribution', result.stdout)
self.assertEqual('', result.stderr)
......@@ -29,7 +31,7 @@ class StreamsTest(unittest.TestCase):
"""
Checks that the application error (> level info) goes to stderr
"""
result = process.run('./scripts/avocado unknown-whacky-command',
result = process.run('%s unknown-whacky-command' % AVOCADO,
ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL)
self.assertIn("invalid choice: 'unknown-whacky-command'",
......@@ -46,10 +48,12 @@ class StreamsTest(unittest.TestCase):
Also checks the symmetry between `--show early` and the environment
variable `AVOCADO_LOG_EARLY` being set.
"""
cmds = (('./scripts/avocado --show early run --sysinfo=off '
'--job-results-dir %s passtest.py' % self.tmpdir, {}),
('./scripts/avocado run --sysinfo=off --job-results-dir'
' %s passtest.py' % self.tmpdir, {'AVOCADO_LOG_EARLY': 'y'}))
cmds = (('%s --show early run --sysinfo=off '
'--job-results-dir %s passtest.py' % (AVOCADO, self.tmpdir),
{}),
('%s run --sysinfo=off --job-results-dir'
' %s passtest.py' % (AVOCADO, self.tmpdir),
{'AVOCADO_LOG_EARLY': 'y'}))
for cmd, env in cmds:
result = process.run(cmd, env=env, shell=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
......@@ -65,10 +69,10 @@ class StreamsTest(unittest.TestCase):
Also checks the symmetry between `--show test` and `--show-job-log`
"""
for cmd in (('./scripts/avocado --show test run --sysinfo=off '
'--job-results-dir %s passtest.py' % self.tmpdir),
('./scripts/avocado run --show-job-log --sysinfo=off '
'--job-results-dir %s passtest.py' % self.tmpdir)):
for cmd in (('%s --show test run --sysinfo=off --job-results-dir %s '
'passtest.py' % (AVOCADO, self.tmpdir)),
('%s run --show-job-log --sysinfo=off --job-results-dir %s'
' passtest.py' % (AVOCADO, self.tmpdir))):
result = process.run(cmd)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertNotIn("stevedore.extension: found extension EntryPoint.parse",
......@@ -88,10 +92,10 @@ class StreamsTest(unittest.TestCase):
Also checks the symmetry between `--show none` and `--silent`
"""
for cmd in (('./scripts/avocado --show none run --sysinfo=off '
'--job-results-dir %s passtest.py' % self.tmpdir),
('./scripts/avocado --silent run --sysinfo=off '
'--job-results-dir %s passtest.py' % self.tmpdir)):
for cmd in (('%s --show none run --sysinfo=off --job-results-dir %s '
'passtest.py' % (AVOCADO, self.tmpdir)),
('%s --silent run --sysinfo=off --job-results-dir %s '
'passtest.py' % (AVOCADO, self.tmpdir))):
result = process.run(cmd)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertEqual('', result.stdout)
......@@ -103,8 +107,8 @@ class StreamsTest(unittest.TestCase):
Also checks the symmetry between `--show none` and `--silent`
"""
for cmd in ('./scripts/avocado --show none unknown-whacky-command',
'./scripts/avocado --silent unknown-whacky-command'):
for cmd in ('%s --show none unknown-whacky-command' % AVOCADO,
'%s --silent unknown-whacky-command' % AVOCADO):
result = process.run(cmd, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL)
self.assertEqual('', result.stdout)
......@@ -115,7 +119,7 @@ class StreamsTest(unittest.TestCase):
Checks if "--show stream:level" works for non-built-in-streams
"""
def run(show, no_lines):
result = process.run("./scripts/avocado --show %s config" % show)
result = process.run("%s --show %s config" % (AVOCADO, show))
out = (result.stdout + result.stderr).splitlines()
if no_lines == "more_than_one":
self.assertGreater(len(out), 1, "Output of %s should contain "
......
......@@ -11,6 +11,7 @@ from avocado.utils import script
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
COMMANDS_TIMEOUT_CONF = """
[sysinfo.collect]
......@@ -28,8 +29,8 @@ class SysInfoTest(unittest.TestCase):
def test_sysinfo_enabled(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=on '
'passtest.py' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=on '
'passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -54,8 +55,8 @@ class SysInfoTest(unittest.TestCase):
def test_sysinfo_disabled(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
'passtest.py' % self.tmpdir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off passtest.py'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -82,9 +83,9 @@ class SysInfoTest(unittest.TestCase):
config_path = os.path.join(self.tmpdir, "config.conf")
script.make_script(config_path,
COMMANDS_TIMEOUT_CONF % (timeout, commands_path))
cmd_line = ("./scripts/avocado --show all --config %s run "
"--job-results-dir %s --sysinfo=on passtest.py"
% (config_path, self.tmpdir))
cmd_line = ("%s --show all --config %s run --job-results-dir %s "
"--sysinfo=on passtest.py"
% (AVOCADO, config_path, self.tmpdir))
result = process.run(cmd_line)
if timeout > 0:
self.assertLess(result.duration, exp_duration, "Execution took "
......
......@@ -14,6 +14,8 @@ from avocado.utils import script
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
INSTRUMENTED_SCRIPT = """import os
import tempfile
from avocado import Test
......@@ -67,9 +69,10 @@ class TestsTmpDirTests(unittest.TestCase):
Tests whether automatically created teststmpdir is shared across
all tests.
"""
cmd_line = ("./scripts/avocado run --sysinfo=off "
"--job-results-dir %s %s %s" %
(self.tmpdir, self.simple_test, self.instrumented_test))
cmd_line = ("%s run --sysinfo=off "
"--job-results-dir %s %s %s"
% (AVOCADO, self.tmpdir, self.simple_test,
self.instrumented_test))
self.run_and_check(cmd_line, exit_codes.AVOCADO_ALL_OK)
def test_manualy_created(self):
......@@ -78,8 +81,8 @@ class TestsTmpDirTests(unittest.TestCase):
avocado
"""
shared_tmp = tempfile.mkdtemp(dir=self.tmpdir)
cmd = ("./scripts/avocado run --sysinfo=off "
"--job-results-dir %s %%s" % self.tmpdir)
cmd = ("%s run --sysinfo=off --job-results-dir %s %%s"
% (AVOCADO, self.tmpdir))
self.run_and_check(cmd % self.simple_test, exit_codes.AVOCADO_ALL_OK,
{test.COMMON_TMPDIR_NAME: shared_tmp})
self.run_and_check(cmd % self.instrumented_test,
......
......@@ -12,6 +12,7 @@ from avocado.utils import path as utils_path
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
SCRIPT_CONTENT = """#!/bin/bash
touch %s
......@@ -51,8 +52,9 @@ class WrapperTest(unittest.TestCase):
"C compiler is required by the underlying datadir.py test")
def test_global_wrapper(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --wrapper %s '
'examples/tests/datadir.py' % (self.tmpdir, self.script.path))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off --wrapper %s '
'examples/tests/datadir.py'
% (AVOCADO, self.tmpdir, self.script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -67,8 +69,9 @@ class WrapperTest(unittest.TestCase):
"C compiler is required by the underlying datadir.py test")
def test_process_wrapper(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --wrapper %s:*/datadir '
'examples/tests/datadir.py' % (self.tmpdir, self.script.path))
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--wrapper %s:*/datadir examples/tests/datadir.py'
% (AVOCADO, self.tmpdir, self.script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
......@@ -83,8 +86,9 @@ class WrapperTest(unittest.TestCase):
"C compiler is required by the underlying datadir.py test")
def test_both_wrappers(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --wrapper %s --wrapper %s:*/datadir '
'examples/tests/datadir.py' % (self.tmpdir, self.dummy.path,
cmd_line = ('%s run --job-results-dir %s --sysinfo=off --wrapper %s '
'--wrapper %s:*/datadir examples/tests/datadir.py'
% (AVOCADO, self.tmpdir, self.dummy.path,
self.script.path))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
......
......@@ -6,7 +6,9 @@
# Author: Lukas Doktor <ldoktor@redhat.com>
coverage erase
coverage run --include "avocado/*" ./selftests/run
rm .coverage.*
AVOCADO_CHECK_FULL=1 UNITTEST_AVOCADO_CMD="coverage run -p --include 'avocado/*' ./scripts/avocado" coverage run -p --include "avocado/*" ./selftests/run
coverage combine .coverage*
echo
coverage report -m --include "avocado/core/*"
echo
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册