test_basic.py 39.8 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9 10
import aexpect
import signal
11
import re
12

13 14 15 16 17
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

18
from avocado.core import exit_codes
19 20 21 22
from avocado.utils import process
from avocado.utils import script


23
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
24 25 26
basedir = os.path.abspath(basedir)


27 28 29 30
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
31 32
PASS_SHELL_CONTENTS = "exit 0"

33 34 35 36
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
37 38
FAIL_SHELL_CONTENTS = "exit 1"

39 40 41 42 43 44 45 46 47 48 49 50 51 52
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

53 54 55 56 57 58 59 60 61 62 63 64
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
        self.status = 'not supported'

    def test(self):
        pass
'''

65 66 67 68 69 70 71 72 73 74 75
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

76 77 78

class RunnerOperationTest(unittest.TestCase):

79
    def setUp(self):
80
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
81

82 83 84 85 86 87 88
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
        self.assertTrue(re.match(r"^Avocado \d+\.\d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d\\.\\"
                        "d':\n%r" % (result.stderr))

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

123 124
    def test_runner_all_ok(self):
        os.chdir(basedir)
125
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest passtest' % self.tmpdir
126 127
        process.run(cmd_line)

128 129
    def test_datadir_alias(self):
        os.chdir(basedir)
130
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s datadir' % self.tmpdir
131 132 133 134
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
135 136
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
137 138
        process.run(cmd_line)

139 140
    def test_runner_noalias(self):
        os.chdir(basedir)
141 142
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
143 144
        process.run(cmd_line)

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Original fail_reason: None",
                          results["tests"][0]["fail_reason"])

176 177
    def test_runner_tests_fail(self):
        os.chdir(basedir)
178
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest failtest passtest' % self.tmpdir
179
        result = process.run(cmd_line, ignore_status=True)
180
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
181 182 183 184 185
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
186
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s bogustest' % self.tmpdir
187
        result = process.run(cmd_line, ignore_status=True)
188 189
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
190 191 192 193 194
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

195 196
    def test_runner_doublefail(self):
        os.chdir(basedir)
197
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - doublefail' % self.tmpdir
198 199
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
200 201
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
202 203 204 205
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
206
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
207
                      "Cleanup exception not printed to log output")
208
        self.assertIn("TestFail: This test is supposed to fail",
209
                      output,
210
                      "Test did not fail with action exception:\n%s" % output)
211

212 213 214 215 216
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
217
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
218 219 220 221 222
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

223
    def test_fail_on_exception(self):
224 225
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
226
                    "--json - fail_on_exception" % self.tmpdir)
227
        result = process.run(cmd_line, ignore_status=True)
228
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
229 230 231 232 233
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

234 235
    def test_runner_timeout(self):
        os.chdir(basedir)
236
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - timeouttest' % self.tmpdir
237 238
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
239
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
240
        unexpected_rc = exit_codes.AVOCADO_FAIL
241 242 243 244
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
245
        self.assertIn("TestTimeoutInterrupted: Timeout reached waiting for", output,
246
                      "Test did not fail with timeout exception:\n%s" % output)
247 248
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
249

250 251
    def test_runner_abort(self):
        os.chdir(basedir)
252
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - abort' % self.tmpdir
253
        result = process.run(cmd_line, ignore_status=True)
254 255
        output = result.stdout
        excerpt = 'Test process aborted'
256 257
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
258 259 260 261
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
262
        self.assertIn(excerpt, output)
263

264 265
    def test_silent_output(self):
        os.chdir(basedir)
266
        cmd_line = './scripts/avocado --silent run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
267
        result = process.run(cmd_line, ignore_status=True)
268
        expected_rc = exit_codes.AVOCADO_ALL_OK
269 270
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
271
        self.assertEqual(result.stdout, expected_output)
272

273 274 275 276
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
277
        expected_rc = exit_codes.AVOCADO_FAIL
278
        expected_output = 'error: too few arguments'
279
        self.assertEqual(result.exit_status, expected_rc)
280
        self.assertIn(expected_output, result.stderr)
281

282 283
    def test_empty_test_list(self):
        os.chdir(basedir)
284
        cmd_line = './scripts/avocado run --sysinfo=off'
285
        result = process.run(cmd_line, ignore_status=True)
286
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
287
        expected_output = 'No urls provided nor any arguments produced'
288
        self.assertEqual(result.exit_status, expected_rc)
289
        self.assertIn(expected_output, result.stderr)
290

291 292
    def test_not_found(self):
        os.chdir(basedir)
293
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
294
        result = process.run(cmd_line, ignore_status=True)
295
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
296
        self.assertEqual(result.exit_status, expected_rc)
297 298
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
299

300
    def test_invalid_unique_id(self):
301
        cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar passtest'
302
        result = process.run(cmd_line, ignore_status=True)
303
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
304
        self.assertIn('needs to be a 40 digit hex', result.stderr)
305
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
306 307

    def test_valid_unique_id(self):
308
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
309
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 passtest' % self.tmpdir)
310
        result = process.run(cmd_line, ignore_status=True)
311
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
312
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
313
        self.assertIn('PASS', result.stdout)
314

315
    def test_automatic_unique_id(self):
316
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off passtest --json -' % self.tmpdir
317
        result = process.run(cmd_line, ignore_status=True)
318
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
319 320 321 322
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

323 324 325 326 327
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - skip_outside_setup" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
328
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
329 330 331 332 333
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

334 335 336 337 338
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
339 340
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
341 342 343 344 345 346
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
347
                avocado_process.wait()
348 349 350 351
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
    def test_dry_run(self):
        os.chdir(basedir)
        cmd = ("./scripts/avocado run --sysinfo=off passtest failtest "
               "errortest --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a "
               "foo:bar:b foo:baz:c bar:bar:bar --dry-run")
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
        self.assertEqual(result['skip'], 3)
        for i in xrange(3):
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
            self.assertEqual(log.count(line), 3)

377 378 379 380 381 382 383 384 385 386 387 388 389
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
        cmd_line = './scripts/avocado --show test run --sysinfo=off '\
                   '--job-results-dir %s %s' % (self.tmpdir, test)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('MyTest.test_my_name -> TestError', result.stdout)

390 391 392
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

393

394 395 396
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
397
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
398 399 400 401 402

    def test_output_pass(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
403
        expected_rc = exit_codes.AVOCADO_ALL_OK
404 405 406 407 408 409 410 411 412
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s failtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
413
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
414 415 416 417 418 419 420 421 422
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s errortest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
423
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
424 425 426 427 428 429 430 431 432
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s skiponsetup' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
433
        expected_rc = exit_codes.AVOCADO_ALL_OK
434 435 436
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
437 438
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
439

440 441 442 443 444 445 446 447 448 449 450 451
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run "/bin/echo -ne '
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
                    ' --sysinfo=off  --show-job-log' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
452 453 454
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
455
        self.assertIn('PASS /bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz',
456
                      result.stdout, result)
457 458 459 460 461 462 463 464 465
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

466 467 468 469 470 471 472
    def test_replay_skip_skipped(self):
        result = process.run("./scripts/avocado run skiponsetup --json -")
        result = json.loads(result.stdout)
        jobid = result["job_id"]
        process.run(str("./scripts/avocado run --replay %s "
                        "--replay-test-status PASS" % jobid))

473 474 475
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

476

477
class RunnerSimpleTest(unittest.TestCase):
478 479

    def setUp(self):
480
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
481 482 483
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
484
            'avocado_simpletest_functional')
485
        self.pass_script.save()
L
Lukáš Doktor 已提交
486 487 488 489
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
490
        self.fail_script.save()
491

492
    def test_simpletest_pass(self):
493
        os.chdir(basedir)
L
Lukáš Doktor 已提交
494 495
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
496
        result = process.run(cmd_line, ignore_status=True)
497
        expected_rc = exit_codes.AVOCADO_ALL_OK
498 499 500 501
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

502
    def test_simpletest_fail(self):
503
        os.chdir(basedir)
L
Lukáš Doktor 已提交
504 505
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
506
        result = process.run(cmd_line, ignore_status=True)
507
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
508 509 510 511
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

512 513 514
    def test_runner_onehundred_fail_timing(self):
        """
        We can be pretty sure that a failtest should return immediattely. Let's
515
        run 100 of them and assure they not take more than 30 seconds to run.
516

517 518
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
519 520 521
        """
        os.chdir(basedir)
        one_hundred = 'failtest ' * 100
L
Lukáš Doktor 已提交
522 523
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
524 525 526
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
527
        self.assertLess(actual_time, 30.0)
528
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
529 530 531 532 533 534 535 536 537 538
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
        sleep_fail_sleep = 'sleeptest ' + 'failtest ' * 100 + 'sleeptest'
L
Lukáš Doktor 已提交
539 540
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
541 542 543
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
544
        self.assertLess(actual_time, 33.0)
545
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
546 547 548
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

549 550 551 552 553
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
554 555
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
556
        result = process.run(cmd_line, ignore_status=True)
557 558 559 560
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
561 562
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
563
        self.assertIn('WARN | Warning message (should cause this test to '
564
                      'finish with warning)', result.stdout, result)
565
        self.assertIn('ERROR| Error message (ordinary message not changing '
566
                      'the results)', result.stdout, result)
567

568 569 570 571 572 573 574 575 576 577 578 579 580
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

581 582 583 584 585 586 587 588 589 590 591 592 593
    def test_kill_stopped_sleep(self):
        sleep = process.run("which sleep", ignore_status=True, shell=True)
        if sleep.exit_status:
            self.skipTest("Sleep binary not found in PATH")
        sleep = "'%s 60'" % sleep.stdout.strip()
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
594
        deadline = time.time() + 9
595 596 597
        while time.time() < deadline:
            if not proc.is_alive():
                break
598
            time.sleep(0.1)
599 600
        else:
            proc.kill(signal.SIGKILL)
601
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
602 603 604 605 606 607 608 609 610 611
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
        self.assertEqual(proc.get_status(), 1, "Avocado did not finish with "
                         "1.")

612
    def tearDown(self):
613 614
        self.pass_script.remove()
        self.fail_script.remove()
615
        shutil.rmtree(self.tmpdir)
616 617


618
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
619 620

    def setUp(self):
621
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
622 623 624
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
625
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
626 627 628 629
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
630
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
631 632
        self.fail_script.save()

633
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
634
        os.chdir(basedir)
635
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
636 637
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
638
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
639 640 641 642
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

643
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
644
        os.chdir(basedir)
645
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
646 647
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
648
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
649 650 651 652
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

653
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
654
        os.chdir(basedir)
655 656
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
657 658
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
659 660
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
661
        self.assertIn(expected_output, result.stderr)
662
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
663 664 665 666 667 668 669 670 671
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
672
        expected_output = ('No urls provided nor any arguments produced')
673 674
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
675 676 677 678 679 680 681 682 683 684
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


685
class AbsPluginsTest(object):
686

687
    def setUp(self):
688
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
689

690 691 692 693 694 695
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

696 697 698 699
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
700
        expected_rc = exit_codes.AVOCADO_ALL_OK
701 702 703 704 705 706
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

707 708 709 710 711
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
712
        expected_rc = exit_codes.AVOCADO_ALL_OK
713 714 715 716 717
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

718 719 720 721 722
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
723
        expected_rc = exit_codes.AVOCADO_FAIL
724 725 726
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
727
        self.assertIn("Unable to discover url", output)
728

729 730 731 732 733
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
734
        expected_rc = exit_codes.AVOCADO_ALL_OK
735 736 737
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
738 739
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
740

741
    def test_config_plugin(self):
742
        os.chdir(basedir)
743
        cmd_line = './scripts/avocado config --paginator off'
744 745
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
746
        expected_rc = exit_codes.AVOCADO_ALL_OK
747 748 749 750 751 752 753
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
754
        cmd_line = './scripts/avocado config --datadir --paginator off'
755 756
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
757
        expected_rc = exit_codes.AVOCADO_ALL_OK
758 759 760 761 762
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

763 764 765 766 767
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
768
        expected_rc = exit_codes.AVOCADO_ALL_OK
769 770 771 772 773
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

774

775 776 777 778
class ParseXMLError(Exception):
    pass


779
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
780

781
    def setUp(self):
782
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
783 784
        super(PluginsXunitTest, self).setUp()

785
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
786
                      e_nnotfound, e_nfailures, e_nskip):
787
        os.chdir(basedir)
L
Lukáš Doktor 已提交
788 789
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
790 791 792 793 794 795 796
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
797
        except Exception as detail:
798 799 800 801 802 803 804
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
805 806
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

824
        n_skip = int(testsuite_tag.attributes['skipped'].value)
825 826 827 828
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

829
    def test_xunit_plugin_passtest(self):
830 831
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 0)
832 833

    def test_xunit_plugin_failtest(self):
834 835
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 0, 1, 0)
836

837
    def test_xunit_plugin_skiponsetuptest(self):
838 839
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 1)
840

841
    def test_xunit_plugin_errortest(self):
842 843
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0, 0)
844

845 846 847 848
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

849 850 851 852 853

class ParseJSONError(Exception):
    pass


854
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
855

856
    def setUp(self):
857
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
858 859
        super(PluginsJSONTest, self).setUp()

860
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
861 862
                      e_nfailures, e_nskip):
        os.chdir(basedir)
863 864
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
865 866 867 868 869 870 871
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
872
        except Exception as detail:
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
890
        return json_data
891

892
    def test_json_plugin_passtest(self):
893 894
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0)
895 896

    def test_json_plugin_failtest(self):
897 898
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 1, 0)
899

900
    def test_json_plugin_skiponsetuptest(self):
901 902
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 1)
903

904
    def test_json_plugin_errortest(self):
905 906
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0)
907

908 909 910 911 912 913 914 915 916 917 918 919 920
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
                         '/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

921 922 923 924
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

925 926
if __name__ == '__main__':
    unittest.main()