test_basic.py 38.9 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9 10
import aexpect
import signal
11
import re
12

13 14 15 16 17
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

18
from avocado.core import exit_codes
19 20 21 22
from avocado.utils import process
from avocado.utils import script


23
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
24 25 26
basedir = os.path.abspath(basedir)


27 28 29 30
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
31 32
PASS_SHELL_CONTENTS = "exit 0"

33 34 35 36
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
37 38
FAIL_SHELL_CONTENTS = "exit 1"

39 40 41 42 43 44 45 46 47 48 49 50 51 52
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

53 54 55 56 57 58 59 60 61 62 63 64
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
        self.status = 'not supported'

    def test(self):
        pass
'''

65 66 67

class RunnerOperationTest(unittest.TestCase):

68
    def setUp(self):
69
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
70

71 72 73 74 75 76 77
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
        self.assertTrue(re.match(r"^Avocado \d+\.\d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d\\.\\"
                        "d':\n%r" % (result.stderr))

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

112 113
    def test_runner_all_ok(self):
        os.chdir(basedir)
114
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest passtest' % self.tmpdir
115 116
        process.run(cmd_line)

117 118
    def test_datadir_alias(self):
        os.chdir(basedir)
119
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s datadir' % self.tmpdir
120 121 122 123
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
124 125
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
126 127
        process.run(cmd_line)

128 129
    def test_runner_noalias(self):
        os.chdir(basedir)
130 131
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
132 133
        process.run(cmd_line)

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Original fail_reason: None",
                          results["tests"][0]["fail_reason"])

165 166
    def test_runner_tests_fail(self):
        os.chdir(basedir)
167
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest failtest passtest' % self.tmpdir
168
        result = process.run(cmd_line, ignore_status=True)
169
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
170 171 172 173 174
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
175
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s bogustest' % self.tmpdir
176
        result = process.run(cmd_line, ignore_status=True)
177 178
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
179 180 181 182 183
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

184 185
    def test_runner_doublefail(self):
        os.chdir(basedir)
186
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - doublefail' % self.tmpdir
187 188
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
189 190
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
191 192 193 194
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
195
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
196
                      "Cleanup exception not printed to log output")
197
        self.assertIn("TestFail: This test is supposed to fail",
198
                      output,
199
                      "Test did not fail with action exception:\n%s" % output)
200

201 202 203 204 205
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
206
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
207 208 209 210 211
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

212
    def test_fail_on_exception(self):
213 214
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
215
                    "--json - fail_on_exception" % self.tmpdir)
216
        result = process.run(cmd_line, ignore_status=True)
217
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
218 219 220 221 222
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

223 224
    def test_runner_timeout(self):
        os.chdir(basedir)
225
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - timeouttest' % self.tmpdir
226 227
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
228
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
229
        unexpected_rc = exit_codes.AVOCADO_FAIL
230 231 232 233
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
234
        self.assertIn("TestTimeoutInterrupted: Timeout reached waiting for", output,
235
                      "Test did not fail with timeout exception:\n%s" % output)
236 237
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
238

239 240
    def test_runner_abort(self):
        os.chdir(basedir)
241
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - abort' % self.tmpdir
242
        result = process.run(cmd_line, ignore_status=True)
243 244
        output = result.stdout
        excerpt = 'Test process aborted'
245 246
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
247 248 249 250
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
251
        self.assertIn(excerpt, output)
252

253 254
    def test_silent_output(self):
        os.chdir(basedir)
255
        cmd_line = './scripts/avocado --silent run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
256
        result = process.run(cmd_line, ignore_status=True)
257
        expected_rc = exit_codes.AVOCADO_ALL_OK
258 259
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
260
        self.assertEqual(result.stdout, expected_output)
261

262 263 264 265
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
266
        expected_rc = exit_codes.AVOCADO_FAIL
267
        expected_output = 'error: too few arguments'
268
        self.assertEqual(result.exit_status, expected_rc)
269
        self.assertIn(expected_output, result.stderr)
270

271 272
    def test_empty_test_list(self):
        os.chdir(basedir)
273
        cmd_line = './scripts/avocado run --sysinfo=off'
274
        result = process.run(cmd_line, ignore_status=True)
275
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
276
        expected_output = 'No urls provided nor any arguments produced'
277
        self.assertEqual(result.exit_status, expected_rc)
278
        self.assertIn(expected_output, result.stderr)
279

280 281
    def test_not_found(self):
        os.chdir(basedir)
282
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
283
        result = process.run(cmd_line, ignore_status=True)
284
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
285
        self.assertEqual(result.exit_status, expected_rc)
286 287
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
288

289
    def test_invalid_unique_id(self):
290
        cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar passtest'
291
        result = process.run(cmd_line, ignore_status=True)
292
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
293
        self.assertIn('needs to be a 40 digit hex', result.stderr)
294
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
295 296

    def test_valid_unique_id(self):
297
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
298
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 passtest' % self.tmpdir)
299
        result = process.run(cmd_line, ignore_status=True)
300
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
301
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
302
        self.assertIn('PASS', result.stdout)
303

304
    def test_automatic_unique_id(self):
305
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off passtest --json -' % self.tmpdir
306
        result = process.run(cmd_line, ignore_status=True)
307
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
308 309 310 311
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

312 313 314 315 316
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - skip_outside_setup" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
317
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
318 319 320 321 322
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

323 324 325 326 327
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
328 329
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
330 331 332 333 334 335
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
336
                avocado_process.wait()
337 338 339 340
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
    def test_dry_run(self):
        os.chdir(basedir)
        cmd = ("./scripts/avocado run --sysinfo=off passtest failtest "
               "errortest --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a "
               "foo:bar:b foo:baz:c bar:bar:bar --dry-run")
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
        self.assertEqual(result['skip'], 3)
        for i in xrange(3):
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
            self.assertEqual(log.count(line), 3)

366 367 368
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

369

370 371 372
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
373
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
374 375 376 377 378

    def test_output_pass(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
379
        expected_rc = exit_codes.AVOCADO_ALL_OK
380 381 382 383 384 385 386 387 388
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s failtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
389
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
390 391 392 393 394 395 396 397 398
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s errortest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
399
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
400 401 402 403 404 405 406 407 408
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s skiponsetup' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
409
        expected_rc = exit_codes.AVOCADO_ALL_OK
410 411 412
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
413 414
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
415

416 417 418 419 420 421 422 423 424 425 426 427
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run "/bin/echo -ne '
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
                    ' --sysinfo=off  --show-job-log' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
428 429 430
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
431
        self.assertIn('PASS /bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz',
432
                      result.stdout, result)
433 434 435 436 437 438 439 440 441
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

442 443 444 445 446 447 448
    def test_replay_skip_skipped(self):
        result = process.run("./scripts/avocado run skiponsetup --json -")
        result = json.loads(result.stdout)
        jobid = result["job_id"]
        process.run(str("./scripts/avocado run --replay %s "
                        "--replay-test-status PASS" % jobid))

449 450 451
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

452

453
class RunnerSimpleTest(unittest.TestCase):
454 455

    def setUp(self):
456
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
457 458 459
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
460
            'avocado_simpletest_functional')
461
        self.pass_script.save()
L
Lukáš Doktor 已提交
462 463 464 465
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
466
        self.fail_script.save()
467

468
    def test_simpletest_pass(self):
469
        os.chdir(basedir)
L
Lukáš Doktor 已提交
470 471
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
472
        result = process.run(cmd_line, ignore_status=True)
473
        expected_rc = exit_codes.AVOCADO_ALL_OK
474 475 476 477
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

478
    def test_simpletest_fail(self):
479
        os.chdir(basedir)
L
Lukáš Doktor 已提交
480 481
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
482
        result = process.run(cmd_line, ignore_status=True)
483
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
484 485 486 487
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

488 489 490
    def test_runner_onehundred_fail_timing(self):
        """
        We can be pretty sure that a failtest should return immediattely. Let's
491
        run 100 of them and assure they not take more than 30 seconds to run.
492

493 494
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
495 496 497
        """
        os.chdir(basedir)
        one_hundred = 'failtest ' * 100
L
Lukáš Doktor 已提交
498 499
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
500 501 502
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
503
        self.assertLess(actual_time, 30.0)
504
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
505 506 507 508 509 510 511 512 513 514
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
        sleep_fail_sleep = 'sleeptest ' + 'failtest ' * 100 + 'sleeptest'
L
Lukáš Doktor 已提交
515 516
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
517 518 519
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
520
        self.assertLess(actual_time, 33.0)
521
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
522 523 524
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

525 526 527 528 529
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
530 531
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
532
        result = process.run(cmd_line, ignore_status=True)
533 534 535 536
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
537 538
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
539
        self.assertIn('WARN | Warning message (should cause this test to '
540
                      'finish with warning)', result.stdout, result)
541
        self.assertIn('ERROR| Error message (ordinary message not changing '
542
                      'the results)', result.stdout, result)
543

544 545 546 547 548 549 550 551 552 553 554 555 556
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

557 558 559 560 561 562 563 564 565 566 567 568 569
    def test_kill_stopped_sleep(self):
        sleep = process.run("which sleep", ignore_status=True, shell=True)
        if sleep.exit_status:
            self.skipTest("Sleep binary not found in PATH")
        sleep = "'%s 60'" % sleep.stdout.strip()
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
570
        deadline = time.time() + 9
571 572 573
        while time.time() < deadline:
            if not proc.is_alive():
                break
574
            time.sleep(0.1)
575 576
        else:
            proc.kill(signal.SIGKILL)
577
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
578 579 580 581 582 583 584 585 586 587
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
        self.assertEqual(proc.get_status(), 1, "Avocado did not finish with "
                         "1.")

588
    def tearDown(self):
589 590
        self.pass_script.remove()
        self.fail_script.remove()
591
        shutil.rmtree(self.tmpdir)
592 593


594
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
595 596

    def setUp(self):
597
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
598 599 600
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
601
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
602 603 604 605
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
606
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
607 608
        self.fail_script.save()

609
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
610
        os.chdir(basedir)
611
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
612 613
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
614
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
615 616 617 618
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

619
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
620
        os.chdir(basedir)
621
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
622 623
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
624
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
625 626 627 628
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

629
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
630
        os.chdir(basedir)
631 632
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
633 634
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
635 636
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
637
        self.assertIn(expected_output, result.stderr)
638
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
639 640 641 642 643 644 645 646 647
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
648
        expected_output = ('No urls provided nor any arguments produced')
649 650
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
651 652 653 654 655 656 657 658 659 660
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


661
class AbsPluginsTest(object):
662

663
    def setUp(self):
664
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
665

666 667 668 669 670 671
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

672 673 674 675
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
676
        expected_rc = exit_codes.AVOCADO_ALL_OK
677 678 679 680 681 682
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

683 684 685 686 687
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
688
        expected_rc = exit_codes.AVOCADO_ALL_OK
689 690 691 692 693
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

694 695 696 697 698
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
699
        expected_rc = exit_codes.AVOCADO_FAIL
700 701 702
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
703
        self.assertIn("Unable to discover url", output)
704

705 706 707 708 709
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
710
        expected_rc = exit_codes.AVOCADO_ALL_OK
711 712 713
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
714 715
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
716

717
    def test_config_plugin(self):
718
        os.chdir(basedir)
719
        cmd_line = './scripts/avocado config --paginator off'
720 721
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
722
        expected_rc = exit_codes.AVOCADO_ALL_OK
723 724 725 726 727 728 729
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
730
        cmd_line = './scripts/avocado config --datadir --paginator off'
731 732
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
733
        expected_rc = exit_codes.AVOCADO_ALL_OK
734 735 736 737 738
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

739 740 741 742 743
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
744
        expected_rc = exit_codes.AVOCADO_ALL_OK
745 746 747 748 749
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

750

751 752 753 754
class ParseXMLError(Exception):
    pass


755
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
756

757
    def setUp(self):
758
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
759 760
        super(PluginsXunitTest, self).setUp()

761
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
762
                      e_nnotfound, e_nfailures, e_nskip):
763
        os.chdir(basedir)
L
Lukáš Doktor 已提交
764 765
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
766 767 768 769 770 771 772
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
773
        except Exception as detail:
774 775 776 777 778 779 780
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
781 782
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

        n_skip = int(testsuite_tag.attributes['skip'].value)
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

805
    def test_xunit_plugin_passtest(self):
806 807
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 0)
808 809

    def test_xunit_plugin_failtest(self):
810 811
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 0, 1, 0)
812

813
    def test_xunit_plugin_skiponsetuptest(self):
814 815
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 1)
816

817
    def test_xunit_plugin_errortest(self):
818 819
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0, 0)
820

821 822 823 824
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

825 826 827 828 829

class ParseJSONError(Exception):
    pass


830
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
831

832
    def setUp(self):
833
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
834 835
        super(PluginsJSONTest, self).setUp()

836
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
837 838
                      e_nfailures, e_nskip):
        os.chdir(basedir)
839 840
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
841 842 843 844 845 846 847
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
848
        except Exception as detail:
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
866
        return json_data
867

868
    def test_json_plugin_passtest(self):
869 870
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0)
871 872

    def test_json_plugin_failtest(self):
873 874
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 1, 0)
875

876
    def test_json_plugin_skiponsetuptest(self):
877 878
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 1)
879

880
    def test_json_plugin_errortest(self):
881 882
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0)
883

884 885 886 887 888 889 890 891 892 893 894 895 896
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
                         '/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

897 898 899 900
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

901 902
if __name__ == '__main__':
    unittest.main()