test_basic.py 49.7 KB
Newer Older
1 2
import aexpect
import glob
3
import json
4
import os
5
import re
6
import shutil
7
import signal
8
import sys
9
import tempfile
10
import time
11
import xml.dom.minidom
12 13 14
import zipfile

import pkg_resources
15

16 17 18 19 20
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

21
from avocado.core import exit_codes
22
from avocado.utils import astring
23 24
from avocado.utils import process
from avocado.utils import script
25
from avocado.utils import path as utils_path
26

27
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
28 29 30
basedir = os.path.abspath(basedir)


31 32 33 34
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
35 36
PASS_SHELL_CONTENTS = "exit 0"

37 38 39 40
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
41 42
FAIL_SHELL_CONTENTS = "exit 1"

43 44 45 46 47 48 49 50 51 52 53 54 55 56
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

57 58 59 60 61 62 63 64 65 66 67 68
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
        self.status = 'not supported'

    def test(self):
        pass
'''

69 70 71 72 73 74 75 76 77 78 79
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

80

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


A
Amador Pahim 已提交
102
def probe_binary(binary):
103
    try:
A
Amador Pahim 已提交
104
        return utils_path.find_command(binary)
105
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
106 107 108 109 110 111
        return None

CC_BINARY = probe_binary('cc')
ECHO_BINARY = probe_binary('echo')
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
112 113


114 115
class RunnerOperationTest(unittest.TestCase):

116
    def setUp(self):
117
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
118

119 120 121
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
122 123 124
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
125

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

160 161
    def test_runner_all_ok(self):
        os.chdir(basedir)
162 163
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
164 165
        process.run(cmd_line)

166 167 168
    def test_runner_failfast(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
169 170
                    'passtest.py failtest.py passtest.py --failfast on' %
                    self.tmpdir)
171 172 173 174 175 176 177
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
178
    @unittest.skipIf(not CC_BINARY,
179
                     "C compiler is required by the underlying datadir.py test")
180 181
    def test_datadir_alias(self):
        os.chdir(basedir)
182 183 184 185 186 187 188 189 190
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
191 192
        process.run(cmd_line)

A
Amador Pahim 已提交
193
    @unittest.skipIf(not CC_BINARY,
194
                     "C compiler is required by the underlying datadir.py test")
195 196
    def test_datadir_noalias(self):
        os.chdir(basedir)
197 198
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
199 200
        process.run(cmd_line)

201 202
    def test_runner_noalias(self):
        os.chdir(basedir)
203 204
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
205 206
        process.run(cmd_line)

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

222 223 224 225 226 227 228 229 230 231 232 233 234
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
235
            self.assertIn("Runner error occurred: Test reports unsupported",
236 237
                          results["tests"][0]["fail_reason"])

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

274 275
    def test_runner_tests_fail(self):
        os.chdir(basedir)
276 277
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
278
        result = process.run(cmd_line, ignore_status=True)
279
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
280 281 282 283 284
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
285 286
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
287
        result = process.run(cmd_line, ignore_status=True)
288 289
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
290 291 292 293 294
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

295 296
    def test_runner_doublefail(self):
        os.chdir(basedir)
297 298
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
299 300
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
301 302
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
303 304 305 306
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
307
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
308
                      "Cleanup exception not printed to log output")
309
        self.assertIn("TestFail: This test is supposed to fail",
310
                      output,
311
                      "Test did not fail with action exception:\n%s" % output)
312

313 314 315
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
316
                    "--json - uncaught_exception.py" % self.tmpdir)
317
        result = process.run(cmd_line, ignore_status=True)
318
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
319 320 321 322 323
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

324
    def test_fail_on_exception(self):
325 326
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
327
                    "--json - fail_on_exception.py" % self.tmpdir)
328
        result = process.run(cmd_line, ignore_status=True)
329
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
330 331 332 333 334
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

335 336
    def test_runner_timeout(self):
        os.chdir(basedir)
337 338
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
339 340
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
341
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
342
        unexpected_rc = exit_codes.AVOCADO_FAIL
343 344 345 346
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
347
        self.assertIn("Runner error occurred: Timeout reached", output,
348
                      "Timeout reached message not found in the output:\n%s" % output)
349 350
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
351

352 353
    def test_runner_abort(self):
        os.chdir(basedir)
354 355
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
356
        result = process.run(cmd_line, ignore_status=True)
357
        output = result.stdout
358
        excerpt = 'Test died without reporting the status.'
359 360
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
361 362 363 364
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
365
        self.assertIn(excerpt, output)
366

367 368
    def test_silent_output(self):
        os.chdir(basedir)
369 370
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
371
        result = process.run(cmd_line, ignore_status=True)
372
        expected_rc = exit_codes.AVOCADO_ALL_OK
373 374
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
375
        self.assertEqual(result.stdout, expected_output)
376

377 378 379 380
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
381
        expected_rc = exit_codes.AVOCADO_FAIL
382
        expected_output = 'error: too few arguments'
383
        self.assertEqual(result.exit_status, expected_rc)
384
        self.assertIn(expected_output, result.stderr)
385

386 387
    def test_empty_test_list(self):
        os.chdir(basedir)
388
        cmd_line = './scripts/avocado run --sysinfo=off'
389
        result = process.run(cmd_line, ignore_status=True)
390
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
391
        expected_output = 'No urls provided nor any arguments produced'
392
        self.assertEqual(result.exit_status, expected_rc)
393
        self.assertIn(expected_output, result.stderr)
394

395 396
    def test_not_found(self):
        os.chdir(basedir)
397
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
398
        result = process.run(cmd_line, ignore_status=True)
399
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
400
        self.assertEqual(result.exit_status, expected_rc)
401 402
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
403

404
    def test_invalid_unique_id(self):
405 406
        cmd_line = ('./scripts/avocado run --sysinfo=off --force-job-id foobar'
                    ' passtest.py')
407
        result = process.run(cmd_line, ignore_status=True)
408
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
409
        self.assertIn('needs to be a 40 digit hex', result.stderr)
410
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
411 412

    def test_valid_unique_id(self):
413
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
414 415
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
416
        result = process.run(cmd_line, ignore_status=True)
417
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
418
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
419
        self.assertIn('PASS', result.stdout)
420

421
    def test_automatic_unique_id(self):
422 423
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
424
        result = process.run(cmd_line, ignore_status=True)
425
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
426 427 428 429
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

430 431 432
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
433
                    "--json - skip_outside_setup.py" % self.tmpdir)
434
        result = process.run(cmd_line, ignore_status=True)
435
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
436 437 438 439 440
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

441 442 443 444 445
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
446 447
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
448 449 450 451 452 453
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
454
                avocado_process.wait()
455 456 457 458
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

459 460
    def test_dry_run(self):
        os.chdir(basedir)
461
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
462
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
463
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
464 465 466 467 468 469 470 471
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
472 473
        self.assertEqual(result['skip'], 4)
        for i in xrange(4):
474 475 476 477 478 479 480 481
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
482
            self.assertEqual(log.count(line), 4)
483

484 485 486 487 488 489 490 491 492 493 494
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
        cmd_line = './scripts/avocado --show test run --sysinfo=off '\
                   '--job-results-dir %s %s' % (self.tmpdir, test)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
495 496
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
497

A
Amador Pahim 已提交
498
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
L
Lukáš Doktor 已提交
499 500
    def test_read(self):
        os.chdir(basedir)
A
Amador Pahim 已提交
501 502
        result = process.run("./scripts/avocado run %s" % READ_BINARY,
                             timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
503 504 505 506 507
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

508 509 510
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

511

512 513 514
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
515
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
516 517 518

    def test_output_pass(self):
        os.chdir(basedir)
519 520
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
521
        result = process.run(cmd_line, ignore_status=True)
522
        expected_rc = exit_codes.AVOCADO_ALL_OK
523 524 525 526 527 528 529
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
530 531
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
532
        result = process.run(cmd_line, ignore_status=True)
533
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
534 535 536 537 538 539 540
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
541 542
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
543
        result = process.run(cmd_line, ignore_status=True)
544
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
545 546 547 548 549 550 551
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
552 553
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'skiponsetup.py' % self.tmpdir)
554
        result = process.run(cmd_line, ignore_status=True)
555
        expected_rc = exit_codes.AVOCADO_ALL_OK
556 557 558
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
559 560
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
561

A
Amador Pahim 已提交
562
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
563 564
    def test_ugly_echo_cmd(self):
        os.chdir(basedir)
A
Amador Pahim 已提交
565
        cmd_line = ('./scripts/avocado run "%s -ne '
566
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
567 568
                    ' --sysinfo=off  --show-job-log' %
                    (ECHO_BINARY, self.tmpdir))
569 570 571 572 573
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
574 575 576
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
A
Amador Pahim 已提交
577 578
        self.assertIn('PASS 1-%s -ne foo\\\\n\\\'\\"\\\\nbar/baz' %
                      ECHO_BINARY, result.stdout, result)
579 580 581 582 583 584 585
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
A
Amador Pahim 已提交
586 587
                         '1-%s -ne foo\\\\n\\\'\\"\\\\nbar_baz' %
                         ECHO_BINARY.replace('/', '_'))
588

589
    def test_replay_skip_skipped(self):
590
        result = process.run("./scripts/avocado run skiponsetup.py --json -")
591 592 593 594 595
        result = json.loads(result.stdout)
        jobid = result["job_id"]
        process.run(str("./scripts/avocado run --replay %s "
                        "--replay-test-status PASS" % jobid))

596 597 598
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

599

600
class RunnerSimpleTest(unittest.TestCase):
601 602

    def setUp(self):
603
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
604 605 606
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
607
            'avocado_simpletest_functional')
608
        self.pass_script.save()
L
Lukáš Doktor 已提交
609 610 611 612
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
613
        self.fail_script.save()
614

615
    def test_simpletest_pass(self):
616
        os.chdir(basedir)
L
Lukáš Doktor 已提交
617 618
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
619
        result = process.run(cmd_line, ignore_status=True)
620
        expected_rc = exit_codes.AVOCADO_ALL_OK
621 622 623 624
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

625
    def test_simpletest_fail(self):
626
        os.chdir(basedir)
L
Lukáš Doktor 已提交
627 628
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
629
        result = process.run(cmd_line, ignore_status=True)
630
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
631 632 633 634
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

635 636
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
637
        We can be pretty sure that a failtest should return immediately. Let's
638
        run 100 of them and assure they not take more than 30 seconds to run.
639

640 641
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
642 643
        """
        os.chdir(basedir)
644
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
645 646
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
647 648 649
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
650
        self.assertLess(actual_time, 30.0)
651
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
652 653 654 655 656 657 658 659 660
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
661 662
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
663 664
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
665 666 667
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
668
        self.assertLess(actual_time, 33.0)
669
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
670 671 672
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

673 674 675 676 677
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
678 679
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
680
        result = process.run(cmd_line, ignore_status=True)
681 682 683 684
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
685 686
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
687
        self.assertIn('WARN | Warning message (should cause this test to '
688
                      'finish with warning)', result.stdout, result)
689
        self.assertIn('ERROR| Error message (ordinary message not changing '
690
                      'the results)', result.stdout, result)
691

692 693 694 695 696 697 698 699 700 701 702 703 704
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
705
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
706
    def test_kill_stopped_sleep(self):
A
Amador Pahim 已提交
707
        sleep = "'%s 60'" % SLEEP_BINARY
708 709 710 711 712 713 714 715
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
716
        deadline = time.time() + 9
717 718 719
        while time.time() < deadline:
            if not proc.is_alive():
                break
720
            time.sleep(0.1)
721 722
        else:
            proc.kill(signal.SIGKILL)
723
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
724 725 726 727 728 729 730
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
731
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
732
                         "1.")
733
        sleep_dir = astring.string_to_safe_path("1-" + sleep[1:-1])
734
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
735
                                 sleep_dir, "debug.log")
736
        debug_log = open(debug_log).read()
737 738 739 740 741 742 743
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
744

745
    def tearDown(self):
746 747
        self.pass_script.remove()
        self.fail_script.remove()
748
        shutil.rmtree(self.tmpdir)
749 750


751
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
752 753

    def setUp(self):
754
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
755 756 757
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
758
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
759 760 761 762
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
763
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
764 765
        self.fail_script.save()

766
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
767
        os.chdir(basedir)
768
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
769 770
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
771
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
772 773 774 775
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

776
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
777
        os.chdir(basedir)
778
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
779 780
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
781
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
782 783 784 785
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

786
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
787
        os.chdir(basedir)
788 789
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
790 791
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
792 793
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
794
        self.assertIn(expected_output, result.stderr)
795
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
796 797 798 799 800 801 802 803 804
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
805
        expected_output = ('No urls provided nor any arguments produced')
806 807
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
808 809 810 811 812 813 814 815 816 817
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


818
class AbsPluginsTest(object):
819

820
    def setUp(self):
821
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
822

823 824 825 826 827 828
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

829 830 831 832
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
833
        expected_rc = exit_codes.AVOCADO_ALL_OK
834 835 836 837 838 839
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

840 841 842 843 844
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
845
        expected_rc = exit_codes.AVOCADO_ALL_OK
846 847 848 849 850
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

851 852 853 854 855
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
856
        expected_rc = exit_codes.AVOCADO_FAIL
857 858 859
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
860
        self.assertIn("Unable to discover url", output)
861

862 863 864 865 866
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
867
        expected_rc = exit_codes.AVOCADO_ALL_OK
868 869 870
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
871 872
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
873

874
    def test_config_plugin(self):
875
        os.chdir(basedir)
876
        cmd_line = './scripts/avocado config --paginator off'
877 878
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
879
        expected_rc = exit_codes.AVOCADO_ALL_OK
880 881 882 883 884 885 886
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
887
        cmd_line = './scripts/avocado config --datadir --paginator off'
888 889
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
890
        expected_rc = exit_codes.AVOCADO_ALL_OK
891 892 893 894 895
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
    def test_disable_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
            cmd_line = './scripts/avocado --config %s plugins' % config
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

918 919 920 921 922 923 924 925 926 927 928 929 930
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
            cmd = ('./scripts/avocado --config %s run passtest.py --archive '
931 932
                   '--job-results-dir %s --sysinfo=off'
                   % (config_path, self.base_outputdir))
933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
        try:
            pkg_resources.require('avocado_result_html')
            result_plugins.append("html")
            result_outputs.append("html/results.html")
        except pkg_resources.DistributionNotFound:
            pass

        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
972 973
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
974 975 976 977 978 979 980 981 982 983 984
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

985 986 987 988 989
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
990
        expected_rc = exit_codes.AVOCADO_ALL_OK
991 992 993 994 995
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

996

997 998 999 1000
class ParseXMLError(Exception):
    pass


1001
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1002

1003
    def setUp(self):
1004
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1005 1006
        super(PluginsXunitTest, self).setUp()

1007
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1008
                      e_nnotfound, e_nfailures, e_nskip):
1009
        os.chdir(basedir)
L
Lukáš Doktor 已提交
1010 1011
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
1012 1013 1014 1015 1016 1017 1018
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1019
        except Exception as detail:
1020 1021 1022 1023 1024 1025 1026
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1027 1028
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1046
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1047 1048 1049 1050
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1051
    def test_xunit_plugin_passtest(self):
1052
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1053
                           1, 0, 0, 0, 0)
1054 1055

    def test_xunit_plugin_failtest(self):
1056
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1057
                           1, 0, 0, 1, 0)
1058

1059
    def test_xunit_plugin_skiponsetuptest(self):
1060
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1061
                           1, 0, 0, 0, 1)
1062

1063
    def test_xunit_plugin_errortest(self):
1064
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1065
                           1, 1, 0, 0, 0)
1066

1067 1068 1069 1070
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1071 1072 1073 1074 1075

class ParseJSONError(Exception):
    pass


1076
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1077

1078
    def setUp(self):
1079
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1080 1081
        super(PluginsJSONTest, self).setUp()

1082
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1083 1084
                      e_nfailures, e_nskip):
        os.chdir(basedir)
1085 1086
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
1087 1088 1089 1090 1091 1092 1093
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1094
        except Exception as detail:
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1112
        return json_data
1113

1114
    def test_json_plugin_passtest(self):
1115
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1116
                           1, 0, 0, 0)
1117 1118

    def test_json_plugin_failtest(self):
1119
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1120
                           1, 0, 1, 0)
1121

1122
    def test_json_plugin_skiponsetuptest(self):
1123
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1124
                           1, 0, 0, 1)
1125

1126
    def test_json_plugin_errortest(self):
1127
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1128
                           1, 1, 0, 0)
1129

A
Amador Pahim 已提交
1130
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
1131 1132 1133 1134 1135 1136
    def test_ugly_echo_cmd(self):
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1137
                         '1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
1138 1139
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1140
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
1141

1142 1143 1144 1145
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

1146 1147
if __name__ == '__main__':
    unittest.main()