test_basic.py 39.8 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9 10
import aexpect
import signal
11
import re
12

13 14 15 16 17
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

18
from avocado.core import exit_codes
19 20 21 22
from avocado.utils import process
from avocado.utils import script


23
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
24 25 26
basedir = os.path.abspath(basedir)


27 28 29 30
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
31 32
PASS_SHELL_CONTENTS = "exit 0"

33 34 35 36
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
37 38
FAIL_SHELL_CONTENTS = "exit 1"

39 40 41 42 43 44 45 46 47 48 49 50 51 52
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

53 54 55 56 57 58 59 60 61 62 63 64
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
        self.status = 'not supported'

    def test(self):
        pass
'''

65 66 67 68 69 70 71 72 73 74 75
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

76 77 78

class RunnerOperationTest(unittest.TestCase):

79
    def setUp(self):
80
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
81

82 83 84
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
85 86 87
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
88

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

123 124
    def test_runner_all_ok(self):
        os.chdir(basedir)
125
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest passtest' % self.tmpdir
126 127
        process.run(cmd_line)

128 129
    def test_datadir_alias(self):
        os.chdir(basedir)
130
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s datadir' % self.tmpdir
131 132 133 134
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
135 136
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
137 138
        process.run(cmd_line)

139 140
    def test_runner_noalias(self):
        os.chdir(basedir)
141 142
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
143 144
        process.run(cmd_line)

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Original fail_reason: None",
                          results["tests"][0]["fail_reason"])

176 177
    def test_runner_tests_fail(self):
        os.chdir(basedir)
178
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest failtest passtest' % self.tmpdir
179
        result = process.run(cmd_line, ignore_status=True)
180
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
181 182 183 184 185
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
186
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s bogustest' % self.tmpdir
187
        result = process.run(cmd_line, ignore_status=True)
188 189
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
190 191 192 193 194
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

195 196
    def test_runner_doublefail(self):
        os.chdir(basedir)
197
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - doublefail' % self.tmpdir
198 199
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
200 201
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
202 203 204 205
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
206
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
207
                      "Cleanup exception not printed to log output")
208
        self.assertIn("TestFail: This test is supposed to fail",
209
                      output,
210
                      "Test did not fail with action exception:\n%s" % output)
211

212 213 214 215 216
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
217
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
218 219 220 221 222
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

223
    def test_fail_on_exception(self):
224 225
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
226
                    "--json - fail_on_exception" % self.tmpdir)
227
        result = process.run(cmd_line, ignore_status=True)
228
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
229 230 231 232 233
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

234 235
    def test_runner_timeout(self):
        os.chdir(basedir)
236
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - timeouttest' % self.tmpdir
237 238
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
239
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
240
        unexpected_rc = exit_codes.AVOCADO_FAIL
241 242 243 244
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
245
        self.assertIn("TestTimeoutInterrupted: Timeout reached waiting for", output,
246
                      "Test did not fail with timeout exception:\n%s" % output)
247 248
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
249

250 251
    def test_runner_abort(self):
        os.chdir(basedir)
252
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - abort' % self.tmpdir
253
        result = process.run(cmd_line, ignore_status=True)
254 255
        output = result.stdout
        excerpt = 'Test process aborted'
256 257
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
258 259 260 261
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
262
        self.assertIn(excerpt, output)
263

264 265
    def test_silent_output(self):
        os.chdir(basedir)
266
        cmd_line = './scripts/avocado --silent run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
267
        result = process.run(cmd_line, ignore_status=True)
268
        expected_rc = exit_codes.AVOCADO_ALL_OK
269 270
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
271
        self.assertEqual(result.stdout, expected_output)
272

273 274 275 276
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
277
        expected_rc = exit_codes.AVOCADO_FAIL
278
        expected_output = 'error: too few arguments'
279
        self.assertEqual(result.exit_status, expected_rc)
280
        self.assertIn(expected_output, result.stderr)
281

282 283
    def test_empty_test_list(self):
        os.chdir(basedir)
284
        cmd_line = './scripts/avocado run --sysinfo=off'
285
        result = process.run(cmd_line, ignore_status=True)
286
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
287
        expected_output = 'No urls provided nor any arguments produced'
288
        self.assertEqual(result.exit_status, expected_rc)
289
        self.assertIn(expected_output, result.stderr)
290

291 292
    def test_not_found(self):
        os.chdir(basedir)
293
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
294
        result = process.run(cmd_line, ignore_status=True)
295
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
296
        self.assertEqual(result.exit_status, expected_rc)
297 298
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
299

300
    def test_invalid_unique_id(self):
301
        cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar passtest'
302
        result = process.run(cmd_line, ignore_status=True)
303
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
304
        self.assertIn('needs to be a 40 digit hex', result.stderr)
305
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
306 307

    def test_valid_unique_id(self):
308
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
309
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 passtest' % self.tmpdir)
310
        result = process.run(cmd_line, ignore_status=True)
311
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
312
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
313
        self.assertIn('PASS', result.stdout)
314

315
    def test_automatic_unique_id(self):
316
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off passtest --json -' % self.tmpdir
317
        result = process.run(cmd_line, ignore_status=True)
318
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
319 320 321 322
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

323 324 325 326 327
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - skip_outside_setup" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
328
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
329 330 331 332 333
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

334 335 336 337 338
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
339 340
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
341 342 343 344 345 346
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
347
                avocado_process.wait()
348 349 350 351
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
    def test_dry_run(self):
        os.chdir(basedir)
        cmd = ("./scripts/avocado run --sysinfo=off passtest failtest "
               "errortest --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a "
               "foo:bar:b foo:baz:c bar:bar:bar --dry-run")
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
        self.assertEqual(result['skip'], 3)
        for i in xrange(3):
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
            self.assertEqual(log.count(line), 3)

377 378 379 380 381 382 383 384 385 386 387
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
        cmd_line = './scripts/avocado --show test run --sysinfo=off '\
                   '--job-results-dir %s %s' % (self.tmpdir, test)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
388 389
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
390

391 392 393
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

394

395 396 397
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
398
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
399 400 401 402 403

    def test_output_pass(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
404
        expected_rc = exit_codes.AVOCADO_ALL_OK
405 406 407 408 409 410 411 412 413
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s failtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
414
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
415 416 417 418 419 420 421 422 423
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s errortest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
424
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
425 426 427 428 429 430 431 432 433
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s skiponsetup' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
434
        expected_rc = exit_codes.AVOCADO_ALL_OK
435 436 437
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
438 439
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
440

441 442 443 444 445 446 447 448 449 450 451 452
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run "/bin/echo -ne '
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
                    ' --sysinfo=off  --show-job-log' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
453 454 455
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
456
        self.assertIn('PASS 1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz',
457
                      result.stdout, result)
458 459 460 461 462 463 464
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
465
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
466

467 468 469 470 471 472 473
    def test_replay_skip_skipped(self):
        result = process.run("./scripts/avocado run skiponsetup --json -")
        result = json.loads(result.stdout)
        jobid = result["job_id"]
        process.run(str("./scripts/avocado run --replay %s "
                        "--replay-test-status PASS" % jobid))

474 475 476
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

477

478
class RunnerSimpleTest(unittest.TestCase):
479 480

    def setUp(self):
481
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
482 483 484
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
485
            'avocado_simpletest_functional')
486
        self.pass_script.save()
L
Lukáš Doktor 已提交
487 488 489 490
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
491
        self.fail_script.save()
492

493
    def test_simpletest_pass(self):
494
        os.chdir(basedir)
L
Lukáš Doktor 已提交
495 496
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
497
        result = process.run(cmd_line, ignore_status=True)
498
        expected_rc = exit_codes.AVOCADO_ALL_OK
499 500 501 502
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

503
    def test_simpletest_fail(self):
504
        os.chdir(basedir)
L
Lukáš Doktor 已提交
505 506
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
507
        result = process.run(cmd_line, ignore_status=True)
508
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
509 510 511 512
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

513 514 515
    def test_runner_onehundred_fail_timing(self):
        """
        We can be pretty sure that a failtest should return immediattely. Let's
516
        run 100 of them and assure they not take more than 30 seconds to run.
517

518 519
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
520 521 522
        """
        os.chdir(basedir)
        one_hundred = 'failtest ' * 100
L
Lukáš Doktor 已提交
523 524
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
525 526 527
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
528
        self.assertLess(actual_time, 30.0)
529
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
530 531 532 533 534 535 536 537 538 539
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
        sleep_fail_sleep = 'sleeptest ' + 'failtest ' * 100 + 'sleeptest'
L
Lukáš Doktor 已提交
540 541
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
542 543 544
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
545
        self.assertLess(actual_time, 33.0)
546
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
547 548 549
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

550 551 552 553 554
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
555 556
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
557
        result = process.run(cmd_line, ignore_status=True)
558 559 560 561
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
562 563
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
564
        self.assertIn('WARN | Warning message (should cause this test to '
565
                      'finish with warning)', result.stdout, result)
566
        self.assertIn('ERROR| Error message (ordinary message not changing '
567
                      'the results)', result.stdout, result)
568

569 570 571 572 573 574 575 576 577 578 579 580 581
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

582 583 584 585 586 587 588 589 590 591 592 593 594
    def test_kill_stopped_sleep(self):
        sleep = process.run("which sleep", ignore_status=True, shell=True)
        if sleep.exit_status:
            self.skipTest("Sleep binary not found in PATH")
        sleep = "'%s 60'" % sleep.stdout.strip()
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
595
        deadline = time.time() + 9
596 597 598
        while time.time() < deadline:
            if not proc.is_alive():
                break
599
            time.sleep(0.1)
600 601
        else:
            proc.kill(signal.SIGKILL)
602
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
603 604 605 606 607 608 609 610 611 612
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
        self.assertEqual(proc.get_status(), 1, "Avocado did not finish with "
                         "1.")

613
    def tearDown(self):
614 615
        self.pass_script.remove()
        self.fail_script.remove()
616
        shutil.rmtree(self.tmpdir)
617 618


619
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
620 621

    def setUp(self):
622
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
623 624 625
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
626
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
627 628 629 630
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
631
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
632 633
        self.fail_script.save()

634
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
635
        os.chdir(basedir)
636
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
637 638
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
639
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
640 641 642 643
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

644
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
645
        os.chdir(basedir)
646
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
647 648
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
649
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
650 651 652 653
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

654
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
655
        os.chdir(basedir)
656 657
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
658 659
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
660 661
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
662
        self.assertIn(expected_output, result.stderr)
663
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
664 665 666 667 668 669 670 671 672
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
673
        expected_output = ('No urls provided nor any arguments produced')
674 675
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
676 677 678 679 680 681 682 683 684 685
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


686
class AbsPluginsTest(object):
687

688
    def setUp(self):
689
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
690

691 692 693 694 695 696
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

697 698 699 700
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
701
        expected_rc = exit_codes.AVOCADO_ALL_OK
702 703 704 705 706 707
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

708 709 710 711 712
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
713
        expected_rc = exit_codes.AVOCADO_ALL_OK
714 715 716 717 718
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

719 720 721 722 723
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
724
        expected_rc = exit_codes.AVOCADO_FAIL
725 726 727
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
728
        self.assertIn("Unable to discover url", output)
729

730 731 732 733 734
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
735
        expected_rc = exit_codes.AVOCADO_ALL_OK
736 737 738
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
739 740
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
741

742
    def test_config_plugin(self):
743
        os.chdir(basedir)
744
        cmd_line = './scripts/avocado config --paginator off'
745 746
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
747
        expected_rc = exit_codes.AVOCADO_ALL_OK
748 749 750 751 752 753 754
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
755
        cmd_line = './scripts/avocado config --datadir --paginator off'
756 757
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
758
        expected_rc = exit_codes.AVOCADO_ALL_OK
759 760 761 762 763
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

764 765 766 767 768
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
769
        expected_rc = exit_codes.AVOCADO_ALL_OK
770 771 772 773 774
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

775

776 777 778 779
class ParseXMLError(Exception):
    pass


780
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
781

782
    def setUp(self):
783
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
784 785
        super(PluginsXunitTest, self).setUp()

786
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
787
                      e_nnotfound, e_nfailures, e_nskip):
788
        os.chdir(basedir)
L
Lukáš Doktor 已提交
789 790
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
791 792 793 794 795 796 797
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
798
        except Exception as detail:
799 800 801 802 803 804 805
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
806 807
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

825
        n_skip = int(testsuite_tag.attributes['skipped'].value)
826 827 828 829
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

830
    def test_xunit_plugin_passtest(self):
831 832
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 0)
833 834

    def test_xunit_plugin_failtest(self):
835 836
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 0, 1, 0)
837

838
    def test_xunit_plugin_skiponsetuptest(self):
839 840
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 1)
841

842
    def test_xunit_plugin_errortest(self):
843 844
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0, 0)
845

846 847 848 849
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

850 851 852 853 854

class ParseJSONError(Exception):
    pass


855
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
856

857
    def setUp(self):
858
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
859 860
        super(PluginsJSONTest, self).setUp()

861
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
862 863
                      e_nfailures, e_nskip):
        os.chdir(basedir)
864 865
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
866 867 868 869 870 871 872
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
873
        except Exception as detail:
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
891
        return json_data
892

893
    def test_json_plugin_passtest(self):
894 895
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0)
896 897

    def test_json_plugin_failtest(self):
898 899
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 1, 0)
900

901
    def test_json_plugin_skiponsetuptest(self):
902 903
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 1)
904

905
    def test_json_plugin_errortest(self):
906 907
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0)
908

909 910 911 912 913 914 915 916
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
917
                         '1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
918 919
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
920
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
921

922 923 924 925
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

926 927
if __name__ == '__main__':
    unittest.main()