test_basic.py 35.3 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9 10
import aexpect
import signal
11

12 13 14 15 16
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

17
from avocado.core import exit_codes
18 19 20 21
from avocado.utils import process
from avocado.utils import script


22
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
23 24 25
basedir = os.path.abspath(basedir)


26 27 28 29
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
30 31
PASS_SHELL_CONTENTS = "exit 0"

32 33 34 35
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
36 37
FAIL_SHELL_CONTENTS = "exit 1"

38 39 40 41 42 43 44 45 46 47 48 49 50 51
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

52 53 54

class RunnerOperationTest(unittest.TestCase):

55
    def setUp(self):
56
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
57

58 59
    def test_runner_all_ok(self):
        os.chdir(basedir)
60
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest passtest' % self.tmpdir
61 62
        process.run(cmd_line)

63 64
    def test_datadir_alias(self):
        os.chdir(basedir)
65
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s datadir' % self.tmpdir
66 67 68 69
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
70 71
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
72 73
        process.run(cmd_line)

74 75
    def test_runner_noalias(self):
        os.chdir(basedir)
76 77
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
78 79
        process.run(cmd_line)

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

95 96
    def test_runner_tests_fail(self):
        os.chdir(basedir)
97
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest failtest passtest' % self.tmpdir
98
        result = process.run(cmd_line, ignore_status=True)
99
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
100 101 102 103 104
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
105
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s bogustest' % self.tmpdir
106
        result = process.run(cmd_line, ignore_status=True)
107 108
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
109 110 111 112 113
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

114 115
    def test_runner_doublefail(self):
        os.chdir(basedir)
116
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - doublefail' % self.tmpdir
117 118
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
119 120
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
121 122 123 124
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
125
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
126
                      "Cleanup exception not printed to log output")
127
        self.assertIn("TestFail: This test is supposed to fail",
128
                      output,
129
                      "Test did not fail with action exception:\n%s" % output)
130

131 132 133 134 135
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
136
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
137 138 139 140 141
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

142
    def test_fail_on_exception(self):
143 144
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
145
                    "--json - fail_on_exception" % self.tmpdir)
146
        result = process.run(cmd_line, ignore_status=True)
147
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
148 149 150 151 152
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

153 154
    def test_runner_timeout(self):
        os.chdir(basedir)
155
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - timeouttest' % self.tmpdir
156 157
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
158 159
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
160 161 162 163
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
164
        self.assertIn("TestTimeoutError: Timeout reached waiting for", output,
165
                      "Test did not fail with timeout exception:\n%s" % output)
166 167
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
168

169 170
    def test_runner_abort(self):
        os.chdir(basedir)
171
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - abort' % self.tmpdir
172
        result = process.run(cmd_line, ignore_status=True)
173 174
        output = result.stdout
        excerpt = 'Test process aborted'
175 176
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
177 178 179 180
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
181
        self.assertIn(excerpt, output)
182

183 184
    def test_silent_output(self):
        os.chdir(basedir)
185
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest --silent' % self.tmpdir
186
        result = process.run(cmd_line, ignore_status=True)
187
        expected_rc = exit_codes.AVOCADO_ALL_OK
188 189 190 191
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
        self.assertEqual(result.stderr, expected_output)

192 193 194 195
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
196
        expected_rc = exit_codes.AVOCADO_FAIL
197
        expected_output = 'error: too few arguments'
198
        self.assertEqual(result.exit_status, expected_rc)
199
        self.assertIn(expected_output, result.stderr)
200

201 202
    def test_empty_test_list(self):
        os.chdir(basedir)
203
        cmd_line = './scripts/avocado run --sysinfo=off'
204
        result = process.run(cmd_line, ignore_status=True)
205
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
206
        expected_output = 'No tests found for given urls'
207
        self.assertEqual(result.exit_status, expected_rc)
208
        self.assertIn(expected_output, result.stderr)
209

210 211
    def test_not_found(self):
        os.chdir(basedir)
212
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
213
        result = process.run(cmd_line, ignore_status=True)
214
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
215
        self.assertEqual(result.exit_status, expected_rc)
216 217
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
218

219
    def test_invalid_unique_id(self):
220
        cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar passtest'
221
        result = process.run(cmd_line, ignore_status=True)
222
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
223
        self.assertIn('needs to be a 40 digit hex', result.stderr)
224
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
225 226

    def test_valid_unique_id(self):
227
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
228
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 passtest' % self.tmpdir)
229
        result = process.run(cmd_line, ignore_status=True)
230
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
231
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
232
        self.assertIn('PASS', result.stdout)
233

234
    def test_automatic_unique_id(self):
235
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off passtest --json -' % self.tmpdir
236
        result = process.run(cmd_line, ignore_status=True)
237
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
238 239 240 241
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

242 243 244 245 246
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - skip_outside_setup" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
247
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
248 249 250 251 252
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

253 254 255 256 257
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
258 259
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
260 261 262 263 264 265
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
266
                avocado_process.wait()
267 268 269 270
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
    def test_dry_run(self):
        os.chdir(basedir)
        cmd = ("./scripts/avocado run --sysinfo=off passtest failtest "
               "errortest --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a "
               "foo:bar:b foo:baz:c bar:bar:bar --dry-run")
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
        self.assertEqual(result['skip'], 3)
        for i in xrange(3):
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
            self.assertEqual(log.count(line), 3)

296 297 298
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

299

300 301 302
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
303
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
304 305 306 307 308

    def test_output_pass(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
309
        expected_rc = exit_codes.AVOCADO_ALL_OK
310 311 312 313 314 315 316 317 318
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s failtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
319
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
320 321 322 323 324 325 326 327 328
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s errortest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
329
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
330 331 332 333 334 335 336 337 338
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s skiponsetup' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
339
        expected_rc = exit_codes.AVOCADO_ALL_OK
340 341 342
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
343 344
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
345

346 347 348 349 350 351 352 353 354 355 356 357
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run "/bin/echo -ne '
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
                    ' --sysinfo=off  --show-job-log' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
358 359 360
        self.assertIn('[stdout] foo', result.stderr, result)
        self.assertIn('[stdout] \'"', result.stderr, result)
        self.assertIn('[stdout] bar/baz', result.stderr, result)
361
        self.assertIn('PASS /bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz',
362
                      result.stderr, result)
363 364 365 366 367 368 369 370 371
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

372 373 374 375 376 377 378
    def test_replay_skip_skipped(self):
        result = process.run("./scripts/avocado run skiponsetup --json -")
        result = json.loads(result.stdout)
        jobid = result["job_id"]
        process.run(str("./scripts/avocado run --replay %s "
                        "--replay-test-status PASS" % jobid))

379 380 381
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

382

383
class RunnerSimpleTest(unittest.TestCase):
384 385

    def setUp(self):
386
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
387 388 389
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
390
            'avocado_simpletest_functional')
391
        self.pass_script.save()
L
Lukáš Doktor 已提交
392 393 394 395
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
396
        self.fail_script.save()
397

398
    def test_simpletest_pass(self):
399
        os.chdir(basedir)
L
Lukáš Doktor 已提交
400 401
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
402
        result = process.run(cmd_line, ignore_status=True)
403
        expected_rc = exit_codes.AVOCADO_ALL_OK
404 405 406 407
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

408
    def test_simpletest_fail(self):
409
        os.chdir(basedir)
L
Lukáš Doktor 已提交
410 411
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
412
        result = process.run(cmd_line, ignore_status=True)
413
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
414 415 416 417
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

418 419 420
    def test_runner_onehundred_fail_timing(self):
        """
        We can be pretty sure that a failtest should return immediattely. Let's
421
        run 100 of them and assure they not take more than 30 seconds to run.
422

423 424
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
425 426 427
        """
        os.chdir(basedir)
        one_hundred = 'failtest ' * 100
L
Lukáš Doktor 已提交
428 429
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
430 431 432
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
433
        self.assertLess(actual_time, 30.0)
434
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
435 436 437 438 439 440 441 442 443 444
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
        sleep_fail_sleep = 'sleeptest ' + 'failtest ' * 100 + 'sleeptest'
L
Lukáš Doktor 已提交
445 446
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
447 448 449
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
450
        self.assertLess(actual_time, 33.0)
451
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
452 453 454
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

455 456 457 458 459
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
460 461
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
462
        result = process.run(cmd_line, ignore_status=True)
463 464 465 466
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
467 468
        self.assertIn('DEBUG| Debug message', result.stderr, result)
        self.assertIn('INFO | Info message', result.stderr, result)
469
        self.assertIn('WARN | Warning message (should cause this test to '
470
                      'finish with warning)', result.stderr, result)
471
        self.assertIn('ERROR| Error message (ordinary message not changing '
472
                      'the results)', result.stderr, result)
473

474 475 476 477 478 479 480 481 482 483 484 485 486
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
    def test_kill_stopped_sleep(self):
        sleep = process.run("which sleep", ignore_status=True, shell=True)
        if sleep.exit_status:
            self.skipTest("Sleep binary not found in PATH")
        sleep = "'%s 60'" % sleep.stdout.strip()
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
        deadline = time.time() + 5
        while time.time() < deadline:
            if not proc.is_alive():
                break
        else:
            proc.kill(signal.SIGKILL)
            self.fail("Avocado process still alive 1s after job-timeout:\n%s"
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
        self.assertEqual(proc.get_status(), 1, "Avocado did not finish with "
                         "1.")

517
    def tearDown(self):
518 519
        self.pass_script.remove()
        self.fail_script.remove()
520
        shutil.rmtree(self.tmpdir)
521 522


523
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
524 525

    def setUp(self):
526
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
527 528 529
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
530
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
531 532 533 534
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
535
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
536 537
        self.fail_script.save()

538
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
539
        os.chdir(basedir)
540
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
541 542
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
543
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
544 545 546 547
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

548
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
549
        os.chdir(basedir)
550
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
551 552
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
553
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
554 555 556 557
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

558
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
559
        os.chdir(basedir)
560 561
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
562 563
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
564 565
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
566
        self.assertIn(expected_output, result.stderr)
567
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
568 569 570 571 572 573 574 575 576 577
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


578
class AbsPluginsTest(object):
579

580
    def setUp(self):
581
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
582

583 584 585 586 587 588
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

589 590 591 592
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
593
        expected_rc = exit_codes.AVOCADO_ALL_OK
594 595 596 597 598 599
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

600 601 602 603 604
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
605
        expected_rc = exit_codes.AVOCADO_ALL_OK
606 607 608 609 610
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

611 612 613 614 615
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
616
        expected_rc = exit_codes.AVOCADO_FAIL
617 618 619
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
620
        self.assertIn("Unable to discover url", output)
621

622 623 624 625 626
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
627
        expected_rc = exit_codes.AVOCADO_ALL_OK
628 629 630
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
631 632
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
633

634
    def test_config_plugin(self):
635
        os.chdir(basedir)
636
        cmd_line = './scripts/avocado config --paginator off'
637 638
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
639
        expected_rc = exit_codes.AVOCADO_ALL_OK
640 641 642 643 644 645 646
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
647
        cmd_line = './scripts/avocado config --datadir --paginator off'
648 649
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
650
        expected_rc = exit_codes.AVOCADO_ALL_OK
651 652 653 654 655
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

656 657 658 659 660
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
661
        expected_rc = exit_codes.AVOCADO_ALL_OK
662 663 664 665 666
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

667

668 669 670 671
class ParseXMLError(Exception):
    pass


672
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
673

674
    def setUp(self):
675
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
676 677
        super(PluginsXunitTest, self).setUp()

678
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
679
                      e_nnotfound, e_nfailures, e_nskip):
680
        os.chdir(basedir)
L
Lukáš Doktor 已提交
681 682
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
        except Exception, detail:
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
698 699
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

        n_skip = int(testsuite_tag.attributes['skip'].value)
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

722
    def test_xunit_plugin_passtest(self):
723 724
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 0)
725 726

    def test_xunit_plugin_failtest(self):
727 728
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 0, 1, 0)
729

730
    def test_xunit_plugin_skiponsetuptest(self):
731 732
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 1)
733

734
    def test_xunit_plugin_errortest(self):
735 736
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0, 0)
737

738 739 740 741
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

742 743 744 745 746

class ParseJSONError(Exception):
    pass


747
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
748

749
    def setUp(self):
750
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
751 752
        super(PluginsJSONTest, self).setUp()

753
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
754 755
                      e_nfailures, e_nskip):
        os.chdir(basedir)
756 757
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
        except Exception, detail:
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
783
        return json_data
784

785
    def test_json_plugin_passtest(self):
786 787
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0)
788 789

    def test_json_plugin_failtest(self):
790 791
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 1, 0)
792

793
    def test_json_plugin_skiponsetuptest(self):
794 795
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 1)
796

797
    def test_json_plugin_errortest(self):
798 799
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0)
800

801 802 803 804 805 806 807 808 809 810 811 812 813
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
                         '/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

814 815 816 817
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

818 819
if __name__ == '__main__':
    unittest.main()