test_basic.py 41.2 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9 10
import aexpect
import signal
11
import re
12

13 14 15 16 17
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

18
from avocado.core import exit_codes
19
from avocado.utils import astring
20 21 22 23
from avocado.utils import process
from avocado.utils import script


24
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
25 26 27
basedir = os.path.abspath(basedir)


28 29 30 31
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
32 33
PASS_SHELL_CONTENTS = "exit 0"

34 35 36 37
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
38 39
FAIL_SHELL_CONTENTS = "exit 1"

40 41 42 43 44 45 46 47 48 49 50 51 52 53
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

54 55 56 57 58 59 60 61 62 63 64 65
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
        self.status = 'not supported'

    def test(self):
        pass
'''

66 67 68 69 70 71 72 73 74 75 76
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

77 78 79

class RunnerOperationTest(unittest.TestCase):

80
    def setUp(self):
81
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
82

83 84 85
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
86 87 88
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
89

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

124 125
    def test_runner_all_ok(self):
        os.chdir(basedir)
126 127
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
128 129
        process.run(cmd_line)

130 131
    def test_datadir_alias(self):
        os.chdir(basedir)
132 133 134 135 136 137 138 139 140
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
141 142 143 144
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
145 146
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
147 148
        process.run(cmd_line)

149 150
    def test_runner_noalias(self):
        os.chdir(basedir)
151 152
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
153 154
        process.run(cmd_line)

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Original fail_reason: None",
                          results["tests"][0]["fail_reason"])

186 187
    def test_runner_tests_fail(self):
        os.chdir(basedir)
188 189
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
190
        result = process.run(cmd_line, ignore_status=True)
191
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
192 193 194 195 196
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
197 198
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
199
        result = process.run(cmd_line, ignore_status=True)
200 201
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
202 203 204 205 206
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

207 208
    def test_runner_doublefail(self):
        os.chdir(basedir)
209 210
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
211 212
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
213 214
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
215 216 217 218
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
219
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
220
                      "Cleanup exception not printed to log output")
221
        self.assertIn("TestFail: This test is supposed to fail",
222
                      output,
223
                      "Test did not fail with action exception:\n%s" % output)
224

225 226 227
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
228
                    "--json - uncaught_exception.py" % self.tmpdir)
229
        result = process.run(cmd_line, ignore_status=True)
230
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
231 232 233 234 235
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

236
    def test_fail_on_exception(self):
237 238
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
239
                    "--json - fail_on_exception.py" % self.tmpdir)
240
        result = process.run(cmd_line, ignore_status=True)
241
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
242 243 244 245 246
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

247 248
    def test_runner_timeout(self):
        os.chdir(basedir)
249 250
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
251 252
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
253
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
254
        unexpected_rc = exit_codes.AVOCADO_FAIL
255 256 257 258
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
259 260
        self.assertIn("RUNNER: Timeout reached", output,
                      "Timeout reached message not found in the output:\n%s" % output)
261 262
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
263

264 265
    def test_runner_abort(self):
        os.chdir(basedir)
266 267
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
268
        result = process.run(cmd_line, ignore_status=True)
269 270
        output = result.stdout
        excerpt = 'Test process aborted'
271 272
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
273 274 275 276
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
277
        self.assertIn(excerpt, output)
278

279 280
    def test_silent_output(self):
        os.chdir(basedir)
281 282
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
283
        result = process.run(cmd_line, ignore_status=True)
284
        expected_rc = exit_codes.AVOCADO_ALL_OK
285 286
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
287
        self.assertEqual(result.stdout, expected_output)
288

289 290 291 292
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
293
        expected_rc = exit_codes.AVOCADO_FAIL
294
        expected_output = 'error: too few arguments'
295
        self.assertEqual(result.exit_status, expected_rc)
296
        self.assertIn(expected_output, result.stderr)
297

298 299
    def test_empty_test_list(self):
        os.chdir(basedir)
300
        cmd_line = './scripts/avocado run --sysinfo=off'
301
        result = process.run(cmd_line, ignore_status=True)
302
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
303
        expected_output = 'No urls provided nor any arguments produced'
304
        self.assertEqual(result.exit_status, expected_rc)
305
        self.assertIn(expected_output, result.stderr)
306

307 308
    def test_not_found(self):
        os.chdir(basedir)
309
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
310
        result = process.run(cmd_line, ignore_status=True)
311
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
312
        self.assertEqual(result.exit_status, expected_rc)
313 314
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
315

316
    def test_invalid_unique_id(self):
317 318
        cmd_line = ('./scripts/avocado run --sysinfo=off --force-job-id foobar'
                    ' passtest.py')
319
        result = process.run(cmd_line, ignore_status=True)
320
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
321
        self.assertIn('needs to be a 40 digit hex', result.stderr)
322
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
323 324

    def test_valid_unique_id(self):
325
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
326 327
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
328
        result = process.run(cmd_line, ignore_status=True)
329
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
330
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
331
        self.assertIn('PASS', result.stdout)
332

333
    def test_automatic_unique_id(self):
334 335
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
336
        result = process.run(cmd_line, ignore_status=True)
337
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
338 339 340 341
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

342 343 344
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
345
                    "--json - skip_outside_setup.py" % self.tmpdir)
346
        result = process.run(cmd_line, ignore_status=True)
347
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
348 349 350 351 352
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

353 354 355 356 357
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
358 359
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
360 361 362 363 364 365
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
366
                avocado_process.wait()
367 368 369 370
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

371 372
    def test_dry_run(self):
        os.chdir(basedir)
373 374 375
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
               "errortest.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
        self.assertEqual(result['skip'], 3)
        for i in xrange(3):
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
            self.assertEqual(log.count(line), 3)

396 397 398 399 400 401 402 403 404 405 406
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
        cmd_line = './scripts/avocado --show test run --sysinfo=off '\
                   '--job-results-dir %s %s' % (self.tmpdir, test)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
407 408
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
409

410 411 412
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

413

414 415 416
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
417
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
418 419 420

    def test_output_pass(self):
        os.chdir(basedir)
421 422
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
423
        result = process.run(cmd_line, ignore_status=True)
424
        expected_rc = exit_codes.AVOCADO_ALL_OK
425 426 427 428 429 430 431
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
432 433
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
434
        result = process.run(cmd_line, ignore_status=True)
435
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
436 437 438 439 440 441 442
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
443 444
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
445
        result = process.run(cmd_line, ignore_status=True)
446
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
447 448 449 450 451 452 453
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
454 455
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'skiponsetup.py' % self.tmpdir)
456
        result = process.run(cmd_line, ignore_status=True)
457
        expected_rc = exit_codes.AVOCADO_ALL_OK
458 459 460
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
461 462
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
463

464 465 466 467 468 469 470 471 472 473 474 475
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run "/bin/echo -ne '
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
                    ' --sysinfo=off  --show-job-log' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
476 477 478
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
479
        self.assertIn('PASS 1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz',
480
                      result.stdout, result)
481 482 483 484 485 486 487
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
488
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
489

490
    def test_replay_skip_skipped(self):
491
        result = process.run("./scripts/avocado run skiponsetup.py --json -")
492 493 494 495 496
        result = json.loads(result.stdout)
        jobid = result["job_id"]
        process.run(str("./scripts/avocado run --replay %s "
                        "--replay-test-status PASS" % jobid))

497 498 499
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

500

501
class RunnerSimpleTest(unittest.TestCase):
502 503

    def setUp(self):
504
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
505 506 507
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
508
            'avocado_simpletest_functional')
509
        self.pass_script.save()
L
Lukáš Doktor 已提交
510 511 512 513
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
514
        self.fail_script.save()
515

516
    def test_simpletest_pass(self):
517
        os.chdir(basedir)
L
Lukáš Doktor 已提交
518 519
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
520
        result = process.run(cmd_line, ignore_status=True)
521
        expected_rc = exit_codes.AVOCADO_ALL_OK
522 523 524 525
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

526
    def test_simpletest_fail(self):
527
        os.chdir(basedir)
L
Lukáš Doktor 已提交
528 529
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
530
        result = process.run(cmd_line, ignore_status=True)
531
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
532 533 534 535
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

536 537 538
    def test_runner_onehundred_fail_timing(self):
        """
        We can be pretty sure that a failtest should return immediattely. Let's
539
        run 100 of them and assure they not take more than 30 seconds to run.
540

541 542
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
543 544
        """
        os.chdir(basedir)
545
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
546 547
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
548 549 550
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
551
        self.assertLess(actual_time, 30.0)
552
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
553 554 555 556 557 558 559 560 561
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
562 563
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
564 565
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
566 567 568
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
569
        self.assertLess(actual_time, 33.0)
570
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
571 572 573
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

574 575 576 577 578
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
579 580
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
581
        result = process.run(cmd_line, ignore_status=True)
582 583 584 585
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
586 587
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
588
        self.assertIn('WARN | Warning message (should cause this test to '
589
                      'finish with warning)', result.stdout, result)
590
        self.assertIn('ERROR| Error message (ordinary message not changing '
591
                      'the results)', result.stdout, result)
592

593 594 595 596 597 598 599 600 601 602 603 604 605
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

606 607 608 609 610 611 612 613 614 615 616 617 618
    def test_kill_stopped_sleep(self):
        sleep = process.run("which sleep", ignore_status=True, shell=True)
        if sleep.exit_status:
            self.skipTest("Sleep binary not found in PATH")
        sleep = "'%s 60'" % sleep.stdout.strip()
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
619
        deadline = time.time() + 9
620 621 622
        while time.time() < deadline:
            if not proc.is_alive():
                break
623
            time.sleep(0.1)
624 625
        else:
            proc.kill(signal.SIGKILL)
626
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
627 628 629 630 631 632 633
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
634
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
635
                         "1.")
636
        sleep_dir = astring.string_to_safe_path("1-" + sleep[1:-1])
637
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
638
                                 sleep_dir, "debug.log")
639 640 641 642 643 644 645
        debug_log = open(debug_log).read()
        self.assertIn("RUNNER: Timeout reached", debug_log, "RUNNER: Timeout "
                      "reached message not in the test's debug.log:\n%s"
                      % debug_log)
        self.assertNotIn("Traceback", debug_log, "Traceback present in the "
                         "test's debug.log file, but it was suppose to be "
                         "stopped and unable to produce it.\n%s" % debug_log)
646

647
    def tearDown(self):
648 649
        self.pass_script.remove()
        self.fail_script.remove()
650
        shutil.rmtree(self.tmpdir)
651 652


653
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
654 655

    def setUp(self):
656
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
657 658 659
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
660
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
661 662 663 664
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
665
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
666 667
        self.fail_script.save()

668
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
669
        os.chdir(basedir)
670
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
671 672
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
673
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
674 675 676 677
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

678
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
679
        os.chdir(basedir)
680
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
681 682
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
683
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
684 685 686 687
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

688
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
689
        os.chdir(basedir)
690 691
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
692 693
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
694 695
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
696
        self.assertIn(expected_output, result.stderr)
697
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
698 699 700 701 702 703 704 705 706
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
707
        expected_output = ('No urls provided nor any arguments produced')
708 709
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
710 711 712 713 714 715 716 717 718 719
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


720
class AbsPluginsTest(object):
721

722
    def setUp(self):
723
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
724

725 726 727 728 729 730
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

731 732 733 734
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
735
        expected_rc = exit_codes.AVOCADO_ALL_OK
736 737 738 739 740 741
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

742 743 744 745 746
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
747
        expected_rc = exit_codes.AVOCADO_ALL_OK
748 749 750 751 752
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

753 754 755 756 757
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
758
        expected_rc = exit_codes.AVOCADO_FAIL
759 760 761
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
762
        self.assertIn("Unable to discover url", output)
763

764 765 766 767 768
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
769
        expected_rc = exit_codes.AVOCADO_ALL_OK
770 771 772
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
773 774
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
775

776
    def test_config_plugin(self):
777
        os.chdir(basedir)
778
        cmd_line = './scripts/avocado config --paginator off'
779 780
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
781
        expected_rc = exit_codes.AVOCADO_ALL_OK
782 783 784 785 786 787 788
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
789
        cmd_line = './scripts/avocado config --datadir --paginator off'
790 791
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
792
        expected_rc = exit_codes.AVOCADO_ALL_OK
793 794 795 796 797
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

798 799 800 801 802
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
803
        expected_rc = exit_codes.AVOCADO_ALL_OK
804 805 806 807 808
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

809

810 811 812 813
class ParseXMLError(Exception):
    pass


814
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
815

816
    def setUp(self):
817
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
818 819
        super(PluginsXunitTest, self).setUp()

820
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
821
                      e_nnotfound, e_nfailures, e_nskip):
822
        os.chdir(basedir)
L
Lukáš Doktor 已提交
823 824
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
825 826 827 828 829 830 831
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
832
        except Exception as detail:
833 834 835 836 837 838 839
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
840 841
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

859
        n_skip = int(testsuite_tag.attributes['skipped'].value)
860 861 862 863
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

864
    def test_xunit_plugin_passtest(self):
865
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
866
                           1, 0, 0, 0, 0)
867 868

    def test_xunit_plugin_failtest(self):
869
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
870
                           1, 0, 0, 1, 0)
871

872
    def test_xunit_plugin_skiponsetuptest(self):
873
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
874
                           1, 0, 0, 0, 1)
875

876
    def test_xunit_plugin_errortest(self):
877
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
878
                           1, 1, 0, 0, 0)
879

880 881 882 883
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

884 885 886 887 888

class ParseJSONError(Exception):
    pass


889
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
890

891
    def setUp(self):
892
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
893 894
        super(PluginsJSONTest, self).setUp()

895
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
896 897
                      e_nfailures, e_nskip):
        os.chdir(basedir)
898 899
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
900 901 902 903 904 905 906
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
907
        except Exception as detail:
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
925
        return json_data
926

927
    def test_json_plugin_passtest(self):
928
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
929
                           1, 0, 0, 0)
930 931

    def test_json_plugin_failtest(self):
932
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
933
                           1, 0, 1, 0)
934

935
    def test_json_plugin_skiponsetuptest(self):
936
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
937
                           1, 0, 0, 1)
938

939
    def test_json_plugin_errortest(self):
940
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
941
                           1, 1, 0, 0)
942

943 944 945 946 947 948 949 950
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
951
                         '1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
952 953
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
954
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
955

956 957 958 959
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

960 961
if __name__ == '__main__':
    unittest.main()