test_basic.py 43.8 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9 10
import aexpect
import signal
11
import re
12

13 14 15 16 17
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

18
from avocado.core import exit_codes
19
from avocado.utils import astring
20 21 22 23
from avocado.utils import process
from avocado.utils import script


24
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
25 26 27
basedir = os.path.abspath(basedir)


28 29 30 31
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
32 33
PASS_SHELL_CONTENTS = "exit 0"

34 35 36 37
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
38 39
FAIL_SHELL_CONTENTS = "exit 1"

40 41 42 43 44 45 46 47 48 49 50 51 52 53
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

54 55 56 57 58 59 60 61 62 63 64 65
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
        self.status = 'not supported'

    def test(self):
        pass
'''

66 67 68 69 70 71 72 73 74 75 76
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

77

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


99 100
class RunnerOperationTest(unittest.TestCase):

101
    def setUp(self):
102
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
103

104 105 106
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
107 108 109
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
110

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

145 146
    def test_runner_all_ok(self):
        os.chdir(basedir)
147 148
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
149 150
        process.run(cmd_line)

151 152
    def test_datadir_alias(self):
        os.chdir(basedir)
153 154 155 156 157 158 159 160 161
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
162 163 164 165
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
166 167
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
168 169
        process.run(cmd_line)

170 171
    def test_runner_noalias(self):
        os.chdir(basedir)
172 173
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
174 175
        process.run(cmd_line)

176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

191 192 193 194 195 196 197 198 199 200 201 202 203
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
204
            self.assertIn("Runner error occurred: Test reports unsupported",
205 206
                          results["tests"][0]["fail_reason"])

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

243 244
    def test_runner_tests_fail(self):
        os.chdir(basedir)
245 246
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
247
        result = process.run(cmd_line, ignore_status=True)
248
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
249 250 251 252 253
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
254 255
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
256
        result = process.run(cmd_line, ignore_status=True)
257 258
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
259 260 261 262 263
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

264 265
    def test_runner_doublefail(self):
        os.chdir(basedir)
266 267
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
268 269
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
270 271
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
272 273 274 275
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
276
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
277
                      "Cleanup exception not printed to log output")
278
        self.assertIn("TestFail: This test is supposed to fail",
279
                      output,
280
                      "Test did not fail with action exception:\n%s" % output)
281

282 283 284
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
285
                    "--json - uncaught_exception.py" % self.tmpdir)
286
        result = process.run(cmd_line, ignore_status=True)
287
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
288 289 290 291 292
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

293
    def test_fail_on_exception(self):
294 295
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
296
                    "--json - fail_on_exception.py" % self.tmpdir)
297
        result = process.run(cmd_line, ignore_status=True)
298
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
299 300 301 302 303
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

304 305
    def test_runner_timeout(self):
        os.chdir(basedir)
306 307
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
308 309
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
310
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
311
        unexpected_rc = exit_codes.AVOCADO_FAIL
312 313 314 315
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
316
        self.assertIn("Runner error occurred: Timeout reached", output,
317
                      "Timeout reached message not found in the output:\n%s" % output)
318 319
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
320

321 322
    def test_runner_abort(self):
        os.chdir(basedir)
323 324
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
325
        result = process.run(cmd_line, ignore_status=True)
326
        output = result.stdout
327
        excerpt = 'Test died without reporting the status.'
328 329
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
330 331 332 333
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
334
        self.assertIn(excerpt, output)
335

336 337
    def test_silent_output(self):
        os.chdir(basedir)
338 339
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
340
        result = process.run(cmd_line, ignore_status=True)
341
        expected_rc = exit_codes.AVOCADO_ALL_OK
342 343
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
344
        self.assertEqual(result.stdout, expected_output)
345

346 347 348 349
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
350
        expected_rc = exit_codes.AVOCADO_FAIL
351
        expected_output = 'error: too few arguments'
352
        self.assertEqual(result.exit_status, expected_rc)
353
        self.assertIn(expected_output, result.stderr)
354

355 356
    def test_empty_test_list(self):
        os.chdir(basedir)
357
        cmd_line = './scripts/avocado run --sysinfo=off'
358
        result = process.run(cmd_line, ignore_status=True)
359
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
360
        expected_output = 'No urls provided nor any arguments produced'
361
        self.assertEqual(result.exit_status, expected_rc)
362
        self.assertIn(expected_output, result.stderr)
363

364 365
    def test_not_found(self):
        os.chdir(basedir)
366
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
367
        result = process.run(cmd_line, ignore_status=True)
368
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
369
        self.assertEqual(result.exit_status, expected_rc)
370 371
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
372

373
    def test_invalid_unique_id(self):
374 375
        cmd_line = ('./scripts/avocado run --sysinfo=off --force-job-id foobar'
                    ' passtest.py')
376
        result = process.run(cmd_line, ignore_status=True)
377
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
378
        self.assertIn('needs to be a 40 digit hex', result.stderr)
379
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
380 381

    def test_valid_unique_id(self):
382
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
383 384
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
385
        result = process.run(cmd_line, ignore_status=True)
386
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
387
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
388
        self.assertIn('PASS', result.stdout)
389

390
    def test_automatic_unique_id(self):
391 392
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
393
        result = process.run(cmd_line, ignore_status=True)
394
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
395 396 397 398
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

399 400 401
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
402
                    "--json - skip_outside_setup.py" % self.tmpdir)
403
        result = process.run(cmd_line, ignore_status=True)
404
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
405 406 407 408 409
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

410 411 412 413 414
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
415 416
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
417 418 419 420 421 422
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
423
                avocado_process.wait()
424 425 426 427
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

428 429
    def test_dry_run(self):
        os.chdir(basedir)
430 431 432
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
               "errortest.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
        self.assertEqual(result['skip'], 3)
        for i in xrange(3):
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
            self.assertEqual(log.count(line), 3)

453 454 455 456 457 458 459 460 461 462 463
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
        cmd_line = './scripts/avocado --show test run --sysinfo=off '\
                   '--job-results-dir %s %s' % (self.tmpdir, test)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
464 465
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
466

467 468 469
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

470

471 472 473
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
474
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
475 476 477

    def test_output_pass(self):
        os.chdir(basedir)
478 479
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
480
        result = process.run(cmd_line, ignore_status=True)
481
        expected_rc = exit_codes.AVOCADO_ALL_OK
482 483 484 485 486 487 488
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
489 490
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
491
        result = process.run(cmd_line, ignore_status=True)
492
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
493 494 495 496 497 498 499
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
500 501
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
502
        result = process.run(cmd_line, ignore_status=True)
503
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
504 505 506 507 508 509 510
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
511 512
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'skiponsetup.py' % self.tmpdir)
513
        result = process.run(cmd_line, ignore_status=True)
514
        expected_rc = exit_codes.AVOCADO_ALL_OK
515 516 517
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
518 519
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
520

521 522 523 524 525 526 527 528 529 530 531 532
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run "/bin/echo -ne '
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
                    ' --sysinfo=off  --show-job-log' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
533 534 535
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
536
        self.assertIn('PASS 1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz',
537
                      result.stdout, result)
538 539 540 541 542 543 544
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
545
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
546

547
    def test_replay_skip_skipped(self):
548
        result = process.run("./scripts/avocado run skiponsetup.py --json -")
549 550 551 552 553
        result = json.loads(result.stdout)
        jobid = result["job_id"]
        process.run(str("./scripts/avocado run --replay %s "
                        "--replay-test-status PASS" % jobid))

554 555 556
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

557

558
class RunnerSimpleTest(unittest.TestCase):
559 560

    def setUp(self):
561
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
562 563 564
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
565
            'avocado_simpletest_functional')
566
        self.pass_script.save()
L
Lukáš Doktor 已提交
567 568 569 570
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
571
        self.fail_script.save()
572

573
    def test_simpletest_pass(self):
574
        os.chdir(basedir)
L
Lukáš Doktor 已提交
575 576
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
577
        result = process.run(cmd_line, ignore_status=True)
578
        expected_rc = exit_codes.AVOCADO_ALL_OK
579 580 581 582
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

583
    def test_simpletest_fail(self):
584
        os.chdir(basedir)
L
Lukáš Doktor 已提交
585 586
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
587
        result = process.run(cmd_line, ignore_status=True)
588
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
589 590 591 592
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

593 594
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
595
        We can be pretty sure that a failtest should return immediately. Let's
596
        run 100 of them and assure they not take more than 30 seconds to run.
597

598 599
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
600 601
        """
        os.chdir(basedir)
602
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
603 604
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
605 606 607
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
608
        self.assertLess(actual_time, 30.0)
609
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
610 611 612 613 614 615 616 617 618
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
619 620
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
621 622
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
623 624 625
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
626
        self.assertLess(actual_time, 33.0)
627
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
628 629 630
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

631 632 633 634 635
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
636 637
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
638
        result = process.run(cmd_line, ignore_status=True)
639 640 641 642
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
643 644
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
645
        self.assertIn('WARN | Warning message (should cause this test to '
646
                      'finish with warning)', result.stdout, result)
647
        self.assertIn('ERROR| Error message (ordinary message not changing '
648
                      'the results)', result.stdout, result)
649

650 651 652 653 654 655 656 657 658 659 660 661 662
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

663 664 665 666 667 668 669 670 671 672 673 674 675
    def test_kill_stopped_sleep(self):
        sleep = process.run("which sleep", ignore_status=True, shell=True)
        if sleep.exit_status:
            self.skipTest("Sleep binary not found in PATH")
        sleep = "'%s 60'" % sleep.stdout.strip()
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
676
        deadline = time.time() + 9
677 678 679
        while time.time() < deadline:
            if not proc.is_alive():
                break
680
            time.sleep(0.1)
681 682
        else:
            proc.kill(signal.SIGKILL)
683
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
684 685 686 687 688 689 690
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
691
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
692
                         "1.")
693
        sleep_dir = astring.string_to_safe_path("1-" + sleep[1:-1])
694
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
695
                                 sleep_dir, "debug.log")
696
        debug_log = open(debug_log).read()
697 698 699 700 701 702 703
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
704

705
    def tearDown(self):
706 707
        self.pass_script.remove()
        self.fail_script.remove()
708
        shutil.rmtree(self.tmpdir)
709 710


711
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
712 713

    def setUp(self):
714
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
715 716 717
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
718
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
719 720 721 722
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
723
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
724 725
        self.fail_script.save()

726
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
727
        os.chdir(basedir)
728
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
729 730
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
731
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
732 733 734 735
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

736
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
737
        os.chdir(basedir)
738
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
739 740
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
741
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
742 743 744 745
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

746
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
747
        os.chdir(basedir)
748 749
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
750 751
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
752 753
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
754
        self.assertIn(expected_output, result.stderr)
755
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
756 757 758 759 760 761 762 763 764
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
765
        expected_output = ('No urls provided nor any arguments produced')
766 767
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
768 769 770 771 772 773 774 775 776 777
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


778
class AbsPluginsTest(object):
779

780
    def setUp(self):
781
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
782

783 784 785 786 787 788
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

789 790 791 792
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
793
        expected_rc = exit_codes.AVOCADO_ALL_OK
794 795 796 797 798 799
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

800 801 802 803 804
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
805
        expected_rc = exit_codes.AVOCADO_ALL_OK
806 807 808 809 810
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

811 812 813 814 815
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
816
        expected_rc = exit_codes.AVOCADO_FAIL
817 818 819
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
820
        self.assertIn("Unable to discover url", output)
821

822 823 824 825 826
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
827
        expected_rc = exit_codes.AVOCADO_ALL_OK
828 829 830
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
831 832
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
833

834
    def test_config_plugin(self):
835
        os.chdir(basedir)
836
        cmd_line = './scripts/avocado config --paginator off'
837 838
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
839
        expected_rc = exit_codes.AVOCADO_ALL_OK
840 841 842 843 844 845 846
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
847
        cmd_line = './scripts/avocado config --datadir --paginator off'
848 849
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
850
        expected_rc = exit_codes.AVOCADO_ALL_OK
851 852 853 854 855
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

856 857 858 859 860
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
861
        expected_rc = exit_codes.AVOCADO_ALL_OK
862 863 864 865 866
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

867

868 869 870 871
class ParseXMLError(Exception):
    pass


872
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
873

874
    def setUp(self):
875
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
876 877
        super(PluginsXunitTest, self).setUp()

878
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
879
                      e_nnotfound, e_nfailures, e_nskip):
880
        os.chdir(basedir)
L
Lukáš Doktor 已提交
881 882
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
883 884 885 886 887 888 889
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
890
        except Exception as detail:
891 892 893 894 895 896 897
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
898 899
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

917
        n_skip = int(testsuite_tag.attributes['skipped'].value)
918 919 920 921
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

922
    def test_xunit_plugin_passtest(self):
923
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
924
                           1, 0, 0, 0, 0)
925 926

    def test_xunit_plugin_failtest(self):
927
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
928
                           1, 0, 0, 1, 0)
929

930
    def test_xunit_plugin_skiponsetuptest(self):
931
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
932
                           1, 0, 0, 0, 1)
933

934
    def test_xunit_plugin_errortest(self):
935
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
936
                           1, 1, 0, 0, 0)
937

938 939 940 941
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

942 943 944 945 946

class ParseJSONError(Exception):
    pass


947
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
948

949
    def setUp(self):
950
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
951 952
        super(PluginsJSONTest, self).setUp()

953
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
954 955
                      e_nfailures, e_nskip):
        os.chdir(basedir)
956 957
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
958 959 960 961 962 963 964
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
965
        except Exception as detail:
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
983
        return json_data
984

985
    def test_json_plugin_passtest(self):
986
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
987
                           1, 0, 0, 0)
988 989

    def test_json_plugin_failtest(self):
990
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
991
                           1, 0, 1, 0)
992

993
    def test_json_plugin_skiponsetuptest(self):
994
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
995
                           1, 0, 0, 1)
996

997
    def test_json_plugin_errortest(self):
998
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
999
                           1, 1, 0, 0)
1000

1001 1002 1003 1004 1005 1006 1007 1008
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1009
                         '1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
1010 1011
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1012
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
1013

1014 1015 1016 1017
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

1018 1019
if __name__ == '__main__':
    unittest.main()