test_basic.py 46.5 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9 10
import aexpect
import signal
11
import re
12

13 14 15 16 17
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

18
from avocado.core import exit_codes
19
from avocado.utils import astring
20 21
from avocado.utils import process
from avocado.utils import script
22
from avocado.utils import path as utils_path
23

24
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
25 26 27
basedir = os.path.abspath(basedir)


28 29 30 31
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
32 33
PASS_SHELL_CONTENTS = "exit 0"

34 35 36 37
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
38 39
FAIL_SHELL_CONTENTS = "exit 1"

40 41 42 43 44 45 46 47 48 49 50 51 52 53
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

54 55 56 57 58 59 60 61 62 63 64 65
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
        self.status = 'not supported'

    def test(self):
        pass
'''

66 67 68 69 70 71 72 73 74 75 76
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

77

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


A
Amador Pahim 已提交
99
def probe_binary(binary):
100
    try:
A
Amador Pahim 已提交
101
        return utils_path.find_command(binary)
102
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
103 104 105 106 107 108
        return None

CC_BINARY = probe_binary('cc')
ECHO_BINARY = probe_binary('echo')
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
109 110


111 112
class RunnerOperationTest(unittest.TestCase):

113
    def setUp(self):
114
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
115

116 117 118
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
119 120 121
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
122

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

157 158
    def test_runner_all_ok(self):
        os.chdir(basedir)
159 160
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
161 162
        process.run(cmd_line)

163 164 165
    def test_runner_failfast(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
166 167
                    'passtest.py failtest.py passtest.py --failfast on' %
                    self.tmpdir)
168 169 170 171 172 173 174
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
175
    @unittest.skipIf(not CC_BINARY,
176
                     "C compiler is required by the underlying datadir.py test")
177 178
    def test_datadir_alias(self):
        os.chdir(basedir)
179 180 181 182 183 184 185 186 187
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
188 189
        process.run(cmd_line)

A
Amador Pahim 已提交
190
    @unittest.skipIf(not CC_BINARY,
191
                     "C compiler is required by the underlying datadir.py test")
192 193
    def test_datadir_noalias(self):
        os.chdir(basedir)
194 195
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
196 197
        process.run(cmd_line)

198 199
    def test_runner_noalias(self):
        os.chdir(basedir)
200 201
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
202 203
        process.run(cmd_line)

204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

219 220 221 222 223 224 225 226 227 228 229 230 231
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
232
            self.assertIn("Runner error occurred: Test reports unsupported",
233 234
                          results["tests"][0]["fail_reason"])

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

271 272
    def test_runner_tests_fail(self):
        os.chdir(basedir)
273 274
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
275
        result = process.run(cmd_line, ignore_status=True)
276
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
277 278 279 280 281
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
282 283
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
284
        result = process.run(cmd_line, ignore_status=True)
285 286
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
287 288 289 290 291
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

292 293
    def test_runner_doublefail(self):
        os.chdir(basedir)
294 295
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
296 297
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
298 299
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
300 301 302 303
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
304
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
305
                      "Cleanup exception not printed to log output")
306
        self.assertIn("TestFail: This test is supposed to fail",
307
                      output,
308
                      "Test did not fail with action exception:\n%s" % output)
309

310 311 312
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
313
                    "--json - uncaught_exception.py" % self.tmpdir)
314
        result = process.run(cmd_line, ignore_status=True)
315
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
316 317 318 319 320
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

321
    def test_fail_on_exception(self):
322 323
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
324
                    "--json - fail_on_exception.py" % self.tmpdir)
325
        result = process.run(cmd_line, ignore_status=True)
326
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
327 328 329 330 331
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

332 333
    def test_runner_timeout(self):
        os.chdir(basedir)
334 335
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
336 337
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
338
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
339
        unexpected_rc = exit_codes.AVOCADO_FAIL
340 341 342 343
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
344
        self.assertIn("Runner error occurred: Timeout reached", output,
345
                      "Timeout reached message not found in the output:\n%s" % output)
346 347
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
348

349 350
    def test_runner_abort(self):
        os.chdir(basedir)
351 352
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
353
        result = process.run(cmd_line, ignore_status=True)
354
        output = result.stdout
355
        excerpt = 'Test died without reporting the status.'
356 357
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
358 359 360 361
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
362
        self.assertIn(excerpt, output)
363

364 365
    def test_silent_output(self):
        os.chdir(basedir)
366 367
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
368
        result = process.run(cmd_line, ignore_status=True)
369
        expected_rc = exit_codes.AVOCADO_ALL_OK
370 371
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
372
        self.assertEqual(result.stdout, expected_output)
373

374 375 376 377
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
378
        expected_rc = exit_codes.AVOCADO_FAIL
379
        expected_output = 'error: too few arguments'
380
        self.assertEqual(result.exit_status, expected_rc)
381
        self.assertIn(expected_output, result.stderr)
382

383 384
    def test_empty_test_list(self):
        os.chdir(basedir)
385
        cmd_line = './scripts/avocado run --sysinfo=off'
386
        result = process.run(cmd_line, ignore_status=True)
387
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
388
        expected_output = 'No urls provided nor any arguments produced'
389
        self.assertEqual(result.exit_status, expected_rc)
390
        self.assertIn(expected_output, result.stderr)
391

392 393
    def test_not_found(self):
        os.chdir(basedir)
394
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
395
        result = process.run(cmd_line, ignore_status=True)
396
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
397
        self.assertEqual(result.exit_status, expected_rc)
398 399
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
400

401
    def test_invalid_unique_id(self):
402 403
        cmd_line = ('./scripts/avocado run --sysinfo=off --force-job-id foobar'
                    ' passtest.py')
404
        result = process.run(cmd_line, ignore_status=True)
405
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
406
        self.assertIn('needs to be a 40 digit hex', result.stderr)
407
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
408 409

    def test_valid_unique_id(self):
410
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
411 412
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
413
        result = process.run(cmd_line, ignore_status=True)
414
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
415
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
416
        self.assertIn('PASS', result.stdout)
417

418
    def test_automatic_unique_id(self):
419 420
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
421
        result = process.run(cmd_line, ignore_status=True)
422
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
423 424 425 426
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

427 428 429
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
430
                    "--json - skip_outside_setup.py" % self.tmpdir)
431
        result = process.run(cmd_line, ignore_status=True)
432
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
433 434 435 436 437
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

438 439 440 441 442
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
443 444
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
445 446 447 448 449 450
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
451
                avocado_process.wait()
452 453 454 455
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

456 457
    def test_dry_run(self):
        os.chdir(basedir)
458
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
459
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
460
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
461 462 463 464 465 466 467 468
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
469 470
        self.assertEqual(result['skip'], 4)
        for i in xrange(4):
471 472 473 474 475 476 477 478
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
479
            self.assertEqual(log.count(line), 4)
480

481 482 483 484 485 486 487 488 489 490 491
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
        cmd_line = './scripts/avocado --show test run --sysinfo=off '\
                   '--job-results-dir %s %s' % (self.tmpdir, test)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
492 493
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
494

A
Amador Pahim 已提交
495
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
L
Lukáš Doktor 已提交
496 497
    def test_read(self):
        os.chdir(basedir)
A
Amador Pahim 已提交
498 499
        result = process.run("./scripts/avocado run %s" % READ_BINARY,
                             timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
500 501 502 503 504
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

505 506 507
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

508

509 510 511
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
512
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
513 514 515

    def test_output_pass(self):
        os.chdir(basedir)
516 517
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
518
        result = process.run(cmd_line, ignore_status=True)
519
        expected_rc = exit_codes.AVOCADO_ALL_OK
520 521 522 523 524 525 526
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
527 528
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
529
        result = process.run(cmd_line, ignore_status=True)
530
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
531 532 533 534 535 536 537
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
538 539
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
540
        result = process.run(cmd_line, ignore_status=True)
541
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
542 543 544 545 546 547 548
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
549 550
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'skiponsetup.py' % self.tmpdir)
551
        result = process.run(cmd_line, ignore_status=True)
552
        expected_rc = exit_codes.AVOCADO_ALL_OK
553 554 555
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
556 557
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
558

A
Amador Pahim 已提交
559
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
560 561
    def test_ugly_echo_cmd(self):
        os.chdir(basedir)
A
Amador Pahim 已提交
562
        cmd_line = ('./scripts/avocado run "%s -ne '
563
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
564 565
                    ' --sysinfo=off  --show-job-log' %
                    (ECHO_BINARY, self.tmpdir))
566 567 568 569 570
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
571 572 573
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
A
Amador Pahim 已提交
574 575
        self.assertIn('PASS 1-%s -ne foo\\\\n\\\'\\"\\\\nbar/baz' %
                      ECHO_BINARY, result.stdout, result)
576 577 578 579 580 581 582
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
A
Amador Pahim 已提交
583 584
                         '1-%s -ne foo\\\\n\\\'\\"\\\\nbar_baz' %
                         ECHO_BINARY.replace('/', '_'))
585

586
    def test_replay_skip_skipped(self):
587
        result = process.run("./scripts/avocado run skiponsetup.py --json -")
588 589 590 591 592
        result = json.loads(result.stdout)
        jobid = result["job_id"]
        process.run(str("./scripts/avocado run --replay %s "
                        "--replay-test-status PASS" % jobid))

593 594 595
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

596

597
class RunnerSimpleTest(unittest.TestCase):
598 599

    def setUp(self):
600
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
601 602 603
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
604
            'avocado_simpletest_functional')
605
        self.pass_script.save()
L
Lukáš Doktor 已提交
606 607 608 609
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
610
        self.fail_script.save()
611

612
    def test_simpletest_pass(self):
613
        os.chdir(basedir)
L
Lukáš Doktor 已提交
614 615
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
616
        result = process.run(cmd_line, ignore_status=True)
617
        expected_rc = exit_codes.AVOCADO_ALL_OK
618 619 620 621
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

622
    def test_simpletest_fail(self):
623
        os.chdir(basedir)
L
Lukáš Doktor 已提交
624 625
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
626
        result = process.run(cmd_line, ignore_status=True)
627
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
628 629 630 631
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

632 633
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
634
        We can be pretty sure that a failtest should return immediately. Let's
635
        run 100 of them and assure they not take more than 30 seconds to run.
636

637 638
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
639 640
        """
        os.chdir(basedir)
641
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
642 643
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
644 645 646
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
647
        self.assertLess(actual_time, 30.0)
648
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
649 650 651 652 653 654 655 656 657
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
658 659
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
660 661
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
662 663 664
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
665
        self.assertLess(actual_time, 33.0)
666
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
667 668 669
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

670 671 672 673 674
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
675 676
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
677
        result = process.run(cmd_line, ignore_status=True)
678 679 680 681
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
682 683
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
684
        self.assertIn('WARN | Warning message (should cause this test to '
685
                      'finish with warning)', result.stdout, result)
686
        self.assertIn('ERROR| Error message (ordinary message not changing '
687
                      'the results)', result.stdout, result)
688

689 690 691 692 693 694 695 696 697 698 699 700 701
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
702
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
703
    def test_kill_stopped_sleep(self):
A
Amador Pahim 已提交
704
        sleep = "'%s 60'" % SLEEP_BINARY
705 706 707 708 709 710 711 712
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
713
        deadline = time.time() + 9
714 715 716
        while time.time() < deadline:
            if not proc.is_alive():
                break
717
            time.sleep(0.1)
718 719
        else:
            proc.kill(signal.SIGKILL)
720
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
721 722 723 724 725 726 727
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
728
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
729
                         "1.")
730
        sleep_dir = astring.string_to_safe_path("1-" + sleep[1:-1])
731
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
732
                                 sleep_dir, "debug.log")
733
        debug_log = open(debug_log).read()
734 735 736 737 738 739 740
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
741

742
    def tearDown(self):
743 744
        self.pass_script.remove()
        self.fail_script.remove()
745
        shutil.rmtree(self.tmpdir)
746 747


748
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
749 750

    def setUp(self):
751
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
752 753 754
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
755
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
756 757 758 759
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
760
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
761 762
        self.fail_script.save()

763
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
764
        os.chdir(basedir)
765
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
766 767
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
768
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
769 770 771 772
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

773
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
774
        os.chdir(basedir)
775
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
776 777
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
778
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
779 780 781 782
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

783
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
784
        os.chdir(basedir)
785 786
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
787 788
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
789 790
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
791
        self.assertIn(expected_output, result.stderr)
792
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
793 794 795 796 797 798 799 800 801
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
802
        expected_output = ('No urls provided nor any arguments produced')
803 804
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
805 806 807 808 809 810 811 812 813 814
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


815
class AbsPluginsTest(object):
816

817
    def setUp(self):
818
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
819

820 821 822 823 824 825
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

826 827 828 829
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
830
        expected_rc = exit_codes.AVOCADO_ALL_OK
831 832 833 834 835 836
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

837 838 839 840 841
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
842
        expected_rc = exit_codes.AVOCADO_ALL_OK
843 844 845 846 847
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

848 849 850 851 852
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
853
        expected_rc = exit_codes.AVOCADO_FAIL
854 855 856
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
857
        self.assertIn("Unable to discover url", output)
858

859 860 861 862 863
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
864
        expected_rc = exit_codes.AVOCADO_ALL_OK
865 866 867
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
868 869
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
870

871
    def test_config_plugin(self):
872
        os.chdir(basedir)
873
        cmd_line = './scripts/avocado config --paginator off'
874 875
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
876
        expected_rc = exit_codes.AVOCADO_ALL_OK
877 878 879 880 881 882 883
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
884
        cmd_line = './scripts/avocado config --datadir --paginator off'
885 886
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
887
        expected_rc = exit_codes.AVOCADO_ALL_OK
888 889 890 891 892
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
    def test_disable_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
            cmd_line = './scripts/avocado --config %s plugins' % config
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

915 916 917 918 919
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
920
        expected_rc = exit_codes.AVOCADO_ALL_OK
921 922 923 924 925
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

926

927 928 929 930
class ParseXMLError(Exception):
    pass


931
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
932

933
    def setUp(self):
934
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
935 936
        super(PluginsXunitTest, self).setUp()

937
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
938
                      e_nnotfound, e_nfailures, e_nskip):
939
        os.chdir(basedir)
L
Lukáš Doktor 已提交
940 941
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
942 943 944 945 946 947 948
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
949
        except Exception as detail:
950 951 952 953 954 955 956
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
957 958
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

976
        n_skip = int(testsuite_tag.attributes['skipped'].value)
977 978 979 980
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

981
    def test_xunit_plugin_passtest(self):
982
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
983
                           1, 0, 0, 0, 0)
984 985

    def test_xunit_plugin_failtest(self):
986
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
987
                           1, 0, 0, 1, 0)
988

989
    def test_xunit_plugin_skiponsetuptest(self):
990
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
991
                           1, 0, 0, 0, 1)
992

993
    def test_xunit_plugin_errortest(self):
994
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
995
                           1, 1, 0, 0, 0)
996

997 998 999 1000
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1001 1002 1003 1004 1005

class ParseJSONError(Exception):
    pass


1006
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1007

1008
    def setUp(self):
1009
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1010 1011
        super(PluginsJSONTest, self).setUp()

1012
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1013 1014
                      e_nfailures, e_nskip):
        os.chdir(basedir)
1015 1016
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
1017 1018 1019 1020 1021 1022 1023
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1024
        except Exception as detail:
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1042
        return json_data
1043

1044
    def test_json_plugin_passtest(self):
1045
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1046
                           1, 0, 0, 0)
1047 1048

    def test_json_plugin_failtest(self):
1049
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1050
                           1, 0, 1, 0)
1051

1052
    def test_json_plugin_skiponsetuptest(self):
1053
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1054
                           1, 0, 0, 1)
1055

1056
    def test_json_plugin_errortest(self):
1057
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1058
                           1, 1, 0, 0)
1059

A
Amador Pahim 已提交
1060
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
1061 1062 1063 1064 1065 1066
    def test_ugly_echo_cmd(self):
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1067
                         '1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
1068 1069
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1070
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
1071

1072 1073 1074 1075
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

1076 1077
if __name__ == '__main__':
    unittest.main()