提交 0ed93c08 编写于 作者: C Cleber Rosa

selftests: replace sleeptest with passtest

This patch replaces most of the calls to sleeptest in our functional
test with passtest, in an attempt to reduce, without invalidating the
purporse, the time it takes to run them.
Signed-off-by: NCleber Rosa <crosa@redhat.com>
上级 01083317
......@@ -68,7 +68,7 @@ class RunnerOperationTest(unittest.TestCase):
def test_runner_all_ok(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run sleeptest sleeptest'
cmd_line = './scripts/avocado run passtest passtest'
process.run(cmd_line)
def test_datadir_alias(self):
......@@ -83,12 +83,12 @@ class RunnerOperationTest(unittest.TestCase):
def test_runner_noalias(self):
os.chdir(basedir)
cmd_line = "./scripts/avocado run examples/tests/sleeptest.py examples/tests/sleeptest.py"
cmd_line = "./scripts/avocado run examples/tests/passtest.py examples/tests/passtest.py"
process.run(cmd_line)
def test_runner_tests_fail(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run sleeptest failtest sleeptest'
cmd_line = './scripts/avocado run passtest failtest passtest'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
......@@ -151,7 +151,7 @@ class RunnerOperationTest(unittest.TestCase):
def test_silent_output(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run sleeptest --silent'
cmd_line = './scripts/avocado run passtest --silent'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
expected_output = ''
......@@ -432,8 +432,8 @@ class PluginsXunitTest(PluginsTest):
"Unexpected number of test skips, "
"XML:\n%s" % xml_output)
def test_xunit_plugin_sleeptest(self):
self.run_and_check('sleeptest', 0, 1, 0, 0, 0, 0)
def test_xunit_plugin_passtest(self):
self.run_and_check('passtest', 0, 1, 0, 0, 0, 0)
def test_xunit_plugin_failtest(self):
self.run_and_check('failtest', 1, 1, 0, 0, 1, 0)
......@@ -448,7 +448,7 @@ class PluginsXunitTest(PluginsTest):
self.run_and_check('sbrubles', 1, 1, 1, 0, 0, 0)
def test_xunit_plugin_mixedtest(self):
self.run_and_check('sleeptest failtest skiptest errortest sbrubles',
self.run_and_check('passtest failtest skiptest errortest sbrubles',
1, 5, 2, 0, 1, 1)
......@@ -491,8 +491,8 @@ class PluginsJSONTest(PluginsTest):
self.assertEqual(n_skip, e_nskip,
"Different number of skipped tests")
def test_json_plugin_sleeptest(self):
self.run_and_check('sleeptest', 0, 1, 0, 0, 0, 0)
def test_json_plugin_passtest(self):
self.run_and_check('passtest', 0, 1, 0, 0, 0, 0)
def test_json_plugin_failtest(self):
self.run_and_check('failtest', 1, 1, 0, 0, 1, 0)
......@@ -507,7 +507,7 @@ class PluginsJSONTest(PluginsTest):
self.run_and_check('sbrubles', 1, 1, 0, 1, 0, 0)
def test_json_plugin_mixedtest(self):
self.run_and_check('sleeptest failtest skiptest errortest sbrubles',
self.run_and_check('passtest failtest skiptest errortest sbrubles',
1, 5, 1, 1, 1, 1)
if __name__ == '__main__':
......
......@@ -33,7 +33,7 @@ class JournalPluginTests(unittest.TestCase):
def setUp(self):
os.chdir(basedir)
self.cmd_line = './scripts/avocado run --json - --journal examples/tests/sleeptest.py'
self.cmd_line = './scripts/avocado run --json - --journal examples/tests/passtest.py'
self.result = process.run(self.cmd_line, ignore_status=True)
data = json.loads(self.result.stdout)
self.job_id = data['job_id']
......
......@@ -61,7 +61,7 @@ class OutputPluginTest(unittest.TestCase):
def test_output_incompatible_setup(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --xunit - --json - sleeptest'
cmd_line = './scripts/avocado run --xunit - --json - passtest'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 2
output = result.stdout + result.stderr
......@@ -74,7 +74,7 @@ class OutputPluginTest(unittest.TestCase):
def test_output_incompatible_setup_2(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --vm --json - sleeptest'
cmd_line = './scripts/avocado run --vm --json - passtest'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 2
output = result.stdout + result.stderr
......@@ -88,7 +88,7 @@ class OutputPluginTest(unittest.TestCase):
def test_output_compatible_setup(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = './scripts/avocado run --journal --xunit %s --json - sleeptest' % tmpfile
cmd_line = './scripts/avocado run --journal --xunit %s --json - passtest' % tmpfile
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
......@@ -108,7 +108,7 @@ class OutputPluginTest(unittest.TestCase):
def test_output_compatible_setup_2(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = './scripts/avocado run --xunit - --json %s sleeptest' % tmpfile
cmd_line = './scripts/avocado run --xunit - --json %s passtest' % tmpfile
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
......@@ -132,7 +132,7 @@ class OutputPluginTest(unittest.TestCase):
tmpfile = tempfile.mktemp()
tmpfile2 = tempfile.mktemp()
os.chdir(basedir)
cmd_line = './scripts/avocado run --xunit %s --json %s sleeptest' % (tmpfile, tmpfile2)
cmd_line = './scripts/avocado run --xunit %s --json %s passtest' % (tmpfile, tmpfile2)
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
......@@ -158,7 +158,7 @@ class OutputPluginTest(unittest.TestCase):
tmpfile = tempfile.mktemp()
tmpfile2 = tempfile.mktemp()
os.chdir(basedir)
cmd_line = './scripts/avocado run --silent --xunit %s --json %s sleeptest' % (tmpfile, tmpfile2)
cmd_line = './scripts/avocado run --silent --xunit %s --json %s passtest' % (tmpfile, tmpfile2)
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
......@@ -182,7 +182,7 @@ class OutputPluginTest(unittest.TestCase):
def test_show_job_log(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run sleeptest --show-job-log'
cmd_line = './scripts/avocado run passtest --show-job-log'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
......@@ -191,7 +191,7 @@ class OutputPluginTest(unittest.TestCase):
def test_silent_trumps_show_job_log(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run sleeptest --show-job-log --silent'
cmd_line = './scripts/avocado run passtest --show-job-log --silent'
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
......@@ -202,7 +202,7 @@ class OutputPluginTest(unittest.TestCase):
def test_default_enabled_plugins(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run sleeptest'
cmd_line = './scripts/avocado run passtest'
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
......
......@@ -43,10 +43,10 @@ class StandaloneTests(unittest.TestCase):
"Stand alone %s did not return rc "
"%d:\n%s" % (tstname, expected_rc, result))
def test_sleeptest(self):
cmd_line = './examples/tests/sleeptest.py'
def test_passtest(self):
cmd_line = './examples/tests/passtest.py'
expected_rc = 0
self.run_and_check(cmd_line, expected_rc, 'sleeptest')
self.run_and_check(cmd_line, expected_rc, 'passtest')
def test_skiptest(self):
cmd_line = './examples/tests/skiptest.py'
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册