test_basic.py 46.5 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9 10
import aexpect
import signal
11
import re
12

13 14 15 16 17
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

18
from avocado.core import exit_codes
19
from avocado.utils import astring
20 21
from avocado.utils import process
from avocado.utils import script
22
from avocado.utils import path as utils_path
23

24
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
25 26 27
basedir = os.path.abspath(basedir)


28 29 30 31
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
32 33
PASS_SHELL_CONTENTS = "exit 0"

34 35 36 37
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
38 39
FAIL_SHELL_CONTENTS = "exit 1"

40 41 42 43 44 45 46 47 48 49 50 51 52 53
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

54 55 56 57 58 59 60 61 62 63 64 65
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
        self.status = 'not supported'

    def test(self):
        pass
'''

66 67 68 69 70 71 72 73 74 75 76
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

77

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


99 100 101 102 103 104 105 106
def missing_binary(binary):
    try:
        utils_path.find_command(binary)
        return False
    except utils_path.CmdNotFoundError:
        return True


107 108
class RunnerOperationTest(unittest.TestCase):

109
    def setUp(self):
110
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
111

112 113 114
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
115 116 117
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
118

119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

153 154
    def test_runner_all_ok(self):
        os.chdir(basedir)
155 156
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
157 158
        process.run(cmd_line)

159 160 161
    def test_runner_failfast(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
162 163
                    'passtest.py failtest.py passtest.py --failfast on' %
                    self.tmpdir)
164 165 166 167 168 169 170
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

171 172
    @unittest.skipIf(missing_binary('cc'),
                     "C compiler is required by the underlying datadir.py test")
173 174
    def test_datadir_alias(self):
        os.chdir(basedir)
175 176 177 178 179 180 181 182 183
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
184 185
        process.run(cmd_line)

186 187
    @unittest.skipIf(missing_binary('cc'),
                     "C compiler is required by the underlying datadir.py test")
188 189
    def test_datadir_noalias(self):
        os.chdir(basedir)
190 191
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
192 193
        process.run(cmd_line)

194 195
    def test_runner_noalias(self):
        os.chdir(basedir)
196 197
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
198 199
        process.run(cmd_line)

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

215 216 217 218 219 220 221 222 223 224 225 226 227
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
228
            self.assertIn("Runner error occurred: Test reports unsupported",
229 230
                          results["tests"][0]["fail_reason"])

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

267 268
    def test_runner_tests_fail(self):
        os.chdir(basedir)
269 270
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
271
        result = process.run(cmd_line, ignore_status=True)
272
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
273 274 275 276 277
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
278 279
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
280
        result = process.run(cmd_line, ignore_status=True)
281 282
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
283 284 285 286 287
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

288 289
    def test_runner_doublefail(self):
        os.chdir(basedir)
290 291
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
292 293
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
294 295
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
296 297 298 299
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
300
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
301
                      "Cleanup exception not printed to log output")
302
        self.assertIn("TestFail: This test is supposed to fail",
303
                      output,
304
                      "Test did not fail with action exception:\n%s" % output)
305

306 307 308
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
309
                    "--json - uncaught_exception.py" % self.tmpdir)
310
        result = process.run(cmd_line, ignore_status=True)
311
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
312 313 314 315 316
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

317
    def test_fail_on_exception(self):
318 319
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
320
                    "--json - fail_on_exception.py" % self.tmpdir)
321
        result = process.run(cmd_line, ignore_status=True)
322
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
323 324 325 326 327
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

328 329
    def test_runner_timeout(self):
        os.chdir(basedir)
330 331
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
332 333
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
334
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
335
        unexpected_rc = exit_codes.AVOCADO_FAIL
336 337 338 339
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
340
        self.assertIn("Runner error occurred: Timeout reached", output,
341
                      "Timeout reached message not found in the output:\n%s" % output)
342 343
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
344

345 346
    def test_runner_abort(self):
        os.chdir(basedir)
347 348
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
349
        result = process.run(cmd_line, ignore_status=True)
350
        output = result.stdout
351
        excerpt = 'Test died without reporting the status.'
352 353
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
354 355 356 357
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
358
        self.assertIn(excerpt, output)
359

360 361
    def test_silent_output(self):
        os.chdir(basedir)
362 363
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
364
        result = process.run(cmd_line, ignore_status=True)
365
        expected_rc = exit_codes.AVOCADO_ALL_OK
366 367
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
368
        self.assertEqual(result.stdout, expected_output)
369

370 371 372 373
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
374
        expected_rc = exit_codes.AVOCADO_FAIL
375
        expected_output = 'error: too few arguments'
376
        self.assertEqual(result.exit_status, expected_rc)
377
        self.assertIn(expected_output, result.stderr)
378

379 380
    def test_empty_test_list(self):
        os.chdir(basedir)
381
        cmd_line = './scripts/avocado run --sysinfo=off'
382
        result = process.run(cmd_line, ignore_status=True)
383
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
384
        expected_output = 'No urls provided nor any arguments produced'
385
        self.assertEqual(result.exit_status, expected_rc)
386
        self.assertIn(expected_output, result.stderr)
387

388 389
    def test_not_found(self):
        os.chdir(basedir)
390
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
391
        result = process.run(cmd_line, ignore_status=True)
392
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
393
        self.assertEqual(result.exit_status, expected_rc)
394 395
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
396

397
    def test_invalid_unique_id(self):
398 399
        cmd_line = ('./scripts/avocado run --sysinfo=off --force-job-id foobar'
                    ' passtest.py')
400
        result = process.run(cmd_line, ignore_status=True)
401
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
402
        self.assertIn('needs to be a 40 digit hex', result.stderr)
403
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
404 405

    def test_valid_unique_id(self):
406
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
407 408
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
409
        result = process.run(cmd_line, ignore_status=True)
410
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
411
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
412
        self.assertIn('PASS', result.stdout)
413

414
    def test_automatic_unique_id(self):
415 416
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
417
        result = process.run(cmd_line, ignore_status=True)
418
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
419 420 421 422
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

423 424 425
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
426
                    "--json - skip_outside_setup.py" % self.tmpdir)
427
        result = process.run(cmd_line, ignore_status=True)
428
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
429 430 431 432 433
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

434 435 436 437 438
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
439 440
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
441 442 443 444 445 446
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
447
                avocado_process.wait()
448 449 450 451
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

452 453
    def test_dry_run(self):
        os.chdir(basedir)
454
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
455
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
456
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
457 458 459 460 461 462 463 464
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
465 466
        self.assertEqual(result['skip'], 4)
        for i in xrange(4):
467 468 469 470 471 472 473 474
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
475
            self.assertEqual(log.count(line), 4)
476

477 478 479 480 481 482 483 484 485 486 487
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
        cmd_line = './scripts/avocado --show test run --sysinfo=off '\
                   '--job-results-dir %s %s' % (self.tmpdir, test)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
488 489
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
490

L
Lukáš Doktor 已提交
491 492 493 494 495 496 497 498 499 500 501 502
    @unittest.skipIf(missing_binary("read"),
                     "read binary not available.")
    def test_read(self):
        cmd = utils_path.find_command("read")
        os.chdir(basedir)
        result = process.run("./scripts/avocado run %s" % cmd, timeout=10,
                             ignore_status=True)
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

503 504 505
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

506

507 508 509
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
510
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
511 512 513

    def test_output_pass(self):
        os.chdir(basedir)
514 515
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
516
        result = process.run(cmd_line, ignore_status=True)
517
        expected_rc = exit_codes.AVOCADO_ALL_OK
518 519 520 521 522 523 524
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
525 526
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
527
        result = process.run(cmd_line, ignore_status=True)
528
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
529 530 531 532 533 534 535
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
536 537
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
538
        result = process.run(cmd_line, ignore_status=True)
539
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
540 541 542 543 544 545 546
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
547 548
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'skiponsetup.py' % self.tmpdir)
549
        result = process.run(cmd_line, ignore_status=True)
550
        expected_rc = exit_codes.AVOCADO_ALL_OK
551 552 553
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
554 555
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
556

557 558 559 560 561 562 563 564 565 566 567 568
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run "/bin/echo -ne '
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
                    ' --sysinfo=off  --show-job-log' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
569 570 571
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
572
        self.assertIn('PASS 1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz',
573
                      result.stdout, result)
574 575 576 577 578 579 580
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
581
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
582

583
    def test_replay_skip_skipped(self):
584
        result = process.run("./scripts/avocado run skiponsetup.py --json -")
585 586 587 588 589
        result = json.loads(result.stdout)
        jobid = result["job_id"]
        process.run(str("./scripts/avocado run --replay %s "
                        "--replay-test-status PASS" % jobid))

590 591 592
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

593

594
class RunnerSimpleTest(unittest.TestCase):
595 596

    def setUp(self):
597
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
598 599 600
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
601
            'avocado_simpletest_functional')
602
        self.pass_script.save()
L
Lukáš Doktor 已提交
603 604 605 606
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
607
        self.fail_script.save()
608

609
    def test_simpletest_pass(self):
610
        os.chdir(basedir)
L
Lukáš Doktor 已提交
611 612
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
613
        result = process.run(cmd_line, ignore_status=True)
614
        expected_rc = exit_codes.AVOCADO_ALL_OK
615 616 617 618
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

619
    def test_simpletest_fail(self):
620
        os.chdir(basedir)
L
Lukáš Doktor 已提交
621 622
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
623
        result = process.run(cmd_line, ignore_status=True)
624
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
625 626 627 628
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

629 630
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
631
        We can be pretty sure that a failtest should return immediately. Let's
632
        run 100 of them and assure they not take more than 30 seconds to run.
633

634 635
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
636 637
        """
        os.chdir(basedir)
638
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
639 640
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
641 642 643
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
644
        self.assertLess(actual_time, 30.0)
645
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
646 647 648 649 650 651 652 653 654
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
655 656
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
657 658
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
659 660 661
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
662
        self.assertLess(actual_time, 33.0)
663
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
664 665 666
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

667 668 669 670 671
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
672 673
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
674
        result = process.run(cmd_line, ignore_status=True)
675 676 677 678
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
679 680
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
681
        self.assertIn('WARN | Warning message (should cause this test to '
682
                      'finish with warning)', result.stdout, result)
683
        self.assertIn('ERROR| Error message (ordinary message not changing '
684
                      'the results)', result.stdout, result)
685

686 687 688 689 690 691 692 693 694 695 696 697 698
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

699 700 701 702 703 704 705 706 707 708 709 710 711
    def test_kill_stopped_sleep(self):
        sleep = process.run("which sleep", ignore_status=True, shell=True)
        if sleep.exit_status:
            self.skipTest("Sleep binary not found in PATH")
        sleep = "'%s 60'" % sleep.stdout.strip()
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
712
        deadline = time.time() + 9
713 714 715
        while time.time() < deadline:
            if not proc.is_alive():
                break
716
            time.sleep(0.1)
717 718
        else:
            proc.kill(signal.SIGKILL)
719
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
720 721 722 723 724 725 726
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
727
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
728
                         "1.")
729
        sleep_dir = astring.string_to_safe_path("1-" + sleep[1:-1])
730
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
731
                                 sleep_dir, "debug.log")
732
        debug_log = open(debug_log).read()
733 734 735 736 737 738 739
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
740

741
    def tearDown(self):
742 743
        self.pass_script.remove()
        self.fail_script.remove()
744
        shutil.rmtree(self.tmpdir)
745 746


747
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
748 749

    def setUp(self):
750
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
751 752 753
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
754
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
755 756 757 758
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
759
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
760 761
        self.fail_script.save()

762
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
763
        os.chdir(basedir)
764
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
765 766
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
767
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
768 769 770 771
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

772
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
773
        os.chdir(basedir)
774
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
775 776
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
777
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
778 779 780 781
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

782
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
783
        os.chdir(basedir)
784 785
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
786 787
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
788 789
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
790
        self.assertIn(expected_output, result.stderr)
791
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
792 793 794 795 796 797 798 799 800
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
801
        expected_output = ('No urls provided nor any arguments produced')
802 803
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
804 805 806 807 808 809 810 811 812 813
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


814
class AbsPluginsTest(object):
815

816
    def setUp(self):
817
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
818

819 820 821 822 823 824
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

825 826 827 828
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
829
        expected_rc = exit_codes.AVOCADO_ALL_OK
830 831 832 833 834 835
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

836 837 838 839 840
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
841
        expected_rc = exit_codes.AVOCADO_ALL_OK
842 843 844 845 846
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

847 848 849 850 851
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
852
        expected_rc = exit_codes.AVOCADO_FAIL
853 854 855
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
856
        self.assertIn("Unable to discover url", output)
857

858 859 860 861 862
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
863
        expected_rc = exit_codes.AVOCADO_ALL_OK
864 865 866
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
867 868
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
869

870
    def test_config_plugin(self):
871
        os.chdir(basedir)
872
        cmd_line = './scripts/avocado config --paginator off'
873 874
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
875
        expected_rc = exit_codes.AVOCADO_ALL_OK
876 877 878 879 880 881 882
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
883
        cmd_line = './scripts/avocado config --datadir --paginator off'
884 885
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
886
        expected_rc = exit_codes.AVOCADO_ALL_OK
887 888 889 890 891
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
    def test_disable_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
            cmd_line = './scripts/avocado --config %s plugins' % config
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

914 915 916 917 918
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
919
        expected_rc = exit_codes.AVOCADO_ALL_OK
920 921 922 923 924
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

925

926 927 928 929
class ParseXMLError(Exception):
    pass


930
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
931

932
    def setUp(self):
933
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
934 935
        super(PluginsXunitTest, self).setUp()

936
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
937
                      e_nnotfound, e_nfailures, e_nskip):
938
        os.chdir(basedir)
L
Lukáš Doktor 已提交
939 940
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
941 942 943 944 945 946 947
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
948
        except Exception as detail:
949 950 951 952 953 954 955
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
956 957
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

975
        n_skip = int(testsuite_tag.attributes['skipped'].value)
976 977 978 979
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

980
    def test_xunit_plugin_passtest(self):
981
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
982
                           1, 0, 0, 0, 0)
983 984

    def test_xunit_plugin_failtest(self):
985
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
986
                           1, 0, 0, 1, 0)
987

988
    def test_xunit_plugin_skiponsetuptest(self):
989
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
990
                           1, 0, 0, 0, 1)
991

992
    def test_xunit_plugin_errortest(self):
993
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
994
                           1, 1, 0, 0, 0)
995

996 997 998 999
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1000 1001 1002 1003 1004

class ParseJSONError(Exception):
    pass


1005
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1006

1007
    def setUp(self):
1008
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1009 1010
        super(PluginsJSONTest, self).setUp()

1011
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1012 1013
                      e_nfailures, e_nskip):
        os.chdir(basedir)
1014 1015
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
1016 1017 1018 1019 1020 1021 1022
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1023
        except Exception as detail:
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1041
        return json_data
1042

1043
    def test_json_plugin_passtest(self):
1044
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1045
                           1, 0, 0, 0)
1046 1047

    def test_json_plugin_failtest(self):
1048
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1049
                           1, 0, 1, 0)
1050

1051
    def test_json_plugin_skiponsetuptest(self):
1052
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1053
                           1, 0, 0, 1)
1054

1055
    def test_json_plugin_errortest(self):
1056
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1057
                           1, 1, 0, 0)
1058

1059 1060 1061 1062 1063 1064 1065 1066
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1067
                         '1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
1068 1069
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1070
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
1071

1072 1073 1074 1075
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

1076 1077
if __name__ == '__main__':
    unittest.main()