test_basic.py 50.4 KB
Newer Older
1
# This Python file uses the following encoding: utf-8
2 3
import aexpect
import glob
4
import json
5
import os
6
import re
7
import shutil
8
import signal
9
import sys
10
import tempfile
11
import time
12
import xml.dom.minidom
13
import zipfile
14
import unittest
15 16

import pkg_resources
17

18
from avocado.core import exit_codes
19
from avocado.utils import astring
20 21
from avocado.utils import process
from avocado.utils import script
22
from avocado.utils import path as utils_path
23

24
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
25 26 27
basedir = os.path.abspath(basedir)


28 29 30 31
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
32 33
PASS_SHELL_CONTENTS = "exit 0"

34 35 36 37
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
38 39
FAIL_SHELL_CONTENTS = "exit 1"

40 41 42 43 44 45 46 47 48 49 50 51 52 53
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

54 55 56 57 58 59
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
60 61
        # Please do NOT ever use this, it's for unittesting only.
        self._Test__status = 'not supported'
62 63 64 65 66

    def test(self):
        pass
'''

67 68 69 70 71 72 73 74 75 76 77
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

78

79 80 81 82 83 84 85 86 87 88
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

89

90 91 92 93 94 95 96 97 98 99 100
DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


A
Amador Pahim 已提交
101
def probe_binary(binary):
102
    try:
A
Amador Pahim 已提交
103
        return utils_path.find_command(binary)
104
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
105 106
        return None

L
Lukáš Doktor 已提交
107

A
Amador Pahim 已提交
108 109 110 111
CC_BINARY = probe_binary('cc')
ECHO_BINARY = probe_binary('echo')
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
112 113


114 115
class RunnerOperationTest(unittest.TestCase):

116
    def setUp(self):
117
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
118

119 120 121
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
122 123 124
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
125

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

160 161
    def test_runner_all_ok(self):
        os.chdir(basedir)
162 163
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
164 165
        process.run(cmd_line)

166 167 168
    def test_runner_failfast(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
169 170
                    'passtest.py failtest.py passtest.py --failfast on' %
                    self.tmpdir)
171 172 173 174 175 176 177
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
178
    @unittest.skipIf(not CC_BINARY,
179
                     "C compiler is required by the underlying datadir.py test")
180 181
    def test_datadir_alias(self):
        os.chdir(basedir)
182 183 184 185 186 187 188 189 190
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
191 192
        process.run(cmd_line)

A
Amador Pahim 已提交
193
    @unittest.skipIf(not CC_BINARY,
194
                     "C compiler is required by the underlying datadir.py test")
195 196
    def test_datadir_noalias(self):
        os.chdir(basedir)
197 198
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
199 200
        process.run(cmd_line)

201 202
    def test_runner_noalias(self):
        os.chdir(basedir)
203 204
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
205 206
        process.run(cmd_line)

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

222 223 224 225 226 227 228 229 230 231 232 233 234
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
235
            self.assertIn("Runner error occurred: Test reports unsupported",
236 237
                          results["tests"][0]["fail_reason"])

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

274 275
    def test_runner_tests_fail(self):
        os.chdir(basedir)
276 277
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
278
        result = process.run(cmd_line, ignore_status=True)
279
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
280 281 282 283 284
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
285 286
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
287
        result = process.run(cmd_line, ignore_status=True)
288 289
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
290 291 292 293 294
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

295 296
    def test_runner_doublefail(self):
        os.chdir(basedir)
297 298
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
299 300
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
301 302
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
303 304 305 306
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
307
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
308
                      "Cleanup exception not printed to log output")
309
        self.assertIn("TestFail: This test is supposed to fail",
310
                      output,
311
                      "Test did not fail with action exception:\n%s" % output)
312

313 314 315
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
316
                    "--json - uncaught_exception.py" % self.tmpdir)
317
        result = process.run(cmd_line, ignore_status=True)
318
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
319 320 321 322 323
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

324
    def test_fail_on_exception(self):
325 326
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
327
                    "--json - fail_on_exception.py" % self.tmpdir)
328
        result = process.run(cmd_line, ignore_status=True)
329
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
330 331 332 333 334
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

335 336
    def test_runner_timeout(self):
        os.chdir(basedir)
337 338
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
339 340
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
341
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
342
        unexpected_rc = exit_codes.AVOCADO_FAIL
343 344 345 346
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
347
        self.assertIn("Runner error occurred: Timeout reached", output,
348
                      "Timeout reached message not found in the output:\n%s" % output)
349 350
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
351

352 353 354
    @unittest.skipIf(os.environ.get("AVOCADO_CHECK_FULL") != "1",
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
355 356
    def test_runner_abort(self):
        os.chdir(basedir)
357 358
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
359
        result = process.run(cmd_line, ignore_status=True)
360
        output = result.stdout
361
        excerpt = 'Test died without reporting the status.'
362 363
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
364 365 366 367
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
368
        self.assertIn(excerpt, output)
369

370 371
    def test_silent_output(self):
        os.chdir(basedir)
372 373
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
374
        result = process.run(cmd_line, ignore_status=True)
375
        expected_rc = exit_codes.AVOCADO_ALL_OK
376 377
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
378
        self.assertEqual(result.stdout, expected_output)
379

380 381 382 383
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
384
        expected_rc = exit_codes.AVOCADO_FAIL
385
        expected_output = 'error: too few arguments'
386
        self.assertEqual(result.exit_status, expected_rc)
387
        self.assertIn(expected_output, result.stderr)
388

389 390
    def test_empty_test_list(self):
        os.chdir(basedir)
391
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s' % self.tmpdir
392
        result = process.run(cmd_line, ignore_status=True)
393
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
394 395
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
396
        self.assertEqual(result.exit_status, expected_rc)
397
        self.assertIn(expected_output, result.stderr)
398

399 400
    def test_not_found(self):
        os.chdir(basedir)
401
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s sbrubles' % self.tmpdir
402
        result = process.run(cmd_line, ignore_status=True)
403
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
404
        self.assertEqual(result.exit_status, expected_rc)
405 406
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
407

408
    def test_invalid_unique_id(self):
409 410
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s --force-job-id foobar passtest.py' % self.tmpdir)
411
        result = process.run(cmd_line, ignore_status=True)
412
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
413
        self.assertIn('needs to be a 40 digit hex', result.stderr)
414
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
415 416

    def test_valid_unique_id(self):
417
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
418 419
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
420
        result = process.run(cmd_line, ignore_status=True)
421
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
422
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
423
        self.assertIn('PASS', result.stdout)
424

425
    def test_automatic_unique_id(self):
426 427
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
428
        result = process.run(cmd_line, ignore_status=True)
429
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
430 431 432 433
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

434 435 436
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
437
                    "--json - skip_outside_setup.py" % self.tmpdir)
438
        result = process.run(cmd_line, ignore_status=True)
439
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
440 441 442 443 444
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

445 446 447 448 449
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
450 451
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
452 453 454 455 456 457
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
458
                avocado_process.wait()
459 460 461 462
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

463 464
    def test_dry_run(self):
        os.chdir(basedir)
465
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
466
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
467
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
468 469 470 471 472 473 474 475
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
476 477
        self.assertEqual(result['skip'], 4)
        for i in xrange(4):
478 479 480 481 482 483 484 485
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
486
            self.assertEqual(log.count(line), 4)
487

488 489 490 491
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
492 493
        cmd_line = ('./scripts/avocado --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (self.tmpdir, test)
494 495 496 497 498
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
499 500
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
501

A
Amador Pahim 已提交
502
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
L
Lukáš Doktor 已提交
503 504
    def test_read(self):
        os.chdir(basedir)
505 506
        cmd = "./scripts/avocado run --sysinfo=off --job-results-dir %s %s"
        cmd %= (self.tmpdir, READ_BINARY)
507
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
508 509 510 511 512
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

513 514 515
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

516

517 518 519
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
520
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
521 522 523

    def test_output_pass(self):
        os.chdir(basedir)
524 525
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
526
        result = process.run(cmd_line, ignore_status=True)
527
        expected_rc = exit_codes.AVOCADO_ALL_OK
528 529 530 531 532 533 534
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
535 536
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
537
        result = process.run(cmd_line, ignore_status=True)
538
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
539 540 541 542 543 544 545
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
546 547
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
548
        result = process.run(cmd_line, ignore_status=True)
549
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
550 551 552 553 554 555 556
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
557 558
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'skiponsetup.py' % self.tmpdir)
559
        result = process.run(cmd_line, ignore_status=True)
560
        expected_rc = exit_codes.AVOCADO_ALL_OK
561 562 563
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
564 565
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
566

A
Amador Pahim 已提交
567
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
568 569
    def test_ugly_echo_cmd(self):
        os.chdir(basedir)
570 571
        cmd_line = ('./scripts/avocado run --external-runner "%s -ne" '
                    '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
572 573
                    ' --sysinfo=off  --show-job-log' %
                    (ECHO_BINARY, self.tmpdir))
574 575 576 577 578
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
579 580 581
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
582 583
        self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
584 585 586 587 588 589 590
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
591
                         '1-foo\\\\n\\\'\\"\\\\nbar_baz')
592

593
    def test_replay_skip_skipped(self):
594 595 596
        cmd = ("./scripts/avocado run --job-results-dir %s --json - "
               "skiponsetup.py" % self.tmpdir)
        result = process.run(cmd)
597
        result = json.loads(result.stdout)
598
        jobid = str(result["job_id"])
599 600
        cmd = ("./scripts/avocado run --job-results-dir %s "
               "--replay %s --replay-test-status PASS") % (self.tmpdir, jobid)
601
        process.run(cmd)
602

603 604 605
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

606

607
class RunnerSimpleTest(unittest.TestCase):
608 609

    def setUp(self):
610
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
611
        self.pass_script = script.TemporaryScript(
612
            'ʊʋʉʈɑ ʅʛʌ',
613
            PASS_SCRIPT_CONTENTS,
614
            'avocado_simpletest_functional')
615
        self.pass_script.save()
L
Lukáš Doktor 已提交
616 617 618 619
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
620
        self.fail_script.save()
621

622
    def test_simpletest_pass(self):
623
        os.chdir(basedir)
L
Lukáš Doktor 已提交
624
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
625
                    ' "%s"' % (self.tmpdir, self.pass_script.path))
626
        result = process.run(cmd_line, ignore_status=True)
627
        expected_rc = exit_codes.AVOCADO_ALL_OK
628 629 630 631
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

632
    def test_simpletest_fail(self):
633
        os.chdir(basedir)
L
Lukáš Doktor 已提交
634 635
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
636
        result = process.run(cmd_line, ignore_status=True)
637
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
638 639 640 641
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

642 643
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
644
        We can be pretty sure that a failtest should return immediately. Let's
645
        run 100 of them and assure they not take more than 30 seconds to run.
646

647 648
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
649 650
        """
        os.chdir(basedir)
651
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
652 653
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
654 655 656
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
657
        self.assertLess(actual_time, 30.0)
658
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
659 660 661 662 663 664 665 666 667
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
668 669
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
670 671
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
672 673 674
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
675
        self.assertLess(actual_time, 33.0)
676
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
677 678 679
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

680 681 682 683 684
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
685 686
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
687
        result = process.run(cmd_line, ignore_status=True)
688 689 690 691
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
692 693
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
694
        self.assertIn('WARN | Warning message (should cause this test to '
695
                      'finish with warning)', result.stdout, result)
696
        self.assertIn('ERROR| Error message (ordinary message not changing '
697
                      'the results)', result.stdout, result)
698

699 700 701 702 703 704
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
705
                    ' "%s"' % (avocado_path, self.tmpdir, test_file_name))
706 707 708 709 710 711
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
712
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
713
    def test_kill_stopped_sleep(self):
714 715 716
        proc = aexpect.Expect("./scripts/avocado run 60 --job-results-dir %s "
                              "--external-runner %s --sysinfo=off --job-timeout 3"
                              % (self.tmpdir, SLEEP_BINARY))
717 718 719 720 721
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
722
        deadline = time.time() + 9
723 724 725
        while time.time() < deadline:
            if not proc.is_alive():
                break
726
            time.sleep(0.1)
727 728
        else:
            proc.kill(signal.SIGKILL)
729
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
730 731 732 733 734 735 736
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
737
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
738
                         "1.")
739 740

        sleep_dir = astring.string_to_safe_path("1-60")
741
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
742
                                 sleep_dir, "debug.log")
743
        debug_log = open(debug_log).read()
744 745 746 747 748 749 750
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
751

752
    def tearDown(self):
753 754
        self.pass_script.remove()
        self.fail_script.remove()
755
        shutil.rmtree(self.tmpdir)
756 757


758
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
759 760

    def setUp(self):
761
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
762 763 764
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
765
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
766 767 768 769
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
770
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
771 772
        self.fail_script.save()

773
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
774
        os.chdir(basedir)
775
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
776 777
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
778
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
779 780 781 782
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

783
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
784
        os.chdir(basedir)
785
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
786 787
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
788
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
789 790 791 792
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

793
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
794
        os.chdir(basedir)
795 796
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
797 798
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
799 800
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
801
        self.assertIn(expected_output, result.stderr)
802
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
803 804 805 806 807 808 809 810 811
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
812 813
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
814 815
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
816 817 818 819 820 821 822 823 824 825
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


826
class AbsPluginsTest(object):
827

828
    def setUp(self):
829
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
830

831 832 833 834 835 836
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

837 838 839 840
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
841
        expected_rc = exit_codes.AVOCADO_ALL_OK
842 843 844 845 846 847
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

848 849 850 851 852
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
853
        expected_rc = exit_codes.AVOCADO_ALL_OK
854 855 856 857 858
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

859 860 861 862 863
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
864
        expected_rc = exit_codes.AVOCADO_FAIL
865 866 867
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
868
        self.assertIn("Unable to resolve reference", output)
869

870 871 872 873 874
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
875
        expected_rc = exit_codes.AVOCADO_ALL_OK
876 877 878
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
879 880
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
881

882
    def test_config_plugin(self):
883
        os.chdir(basedir)
884
        cmd_line = './scripts/avocado config --paginator off'
885 886
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
887
        expected_rc = exit_codes.AVOCADO_ALL_OK
888 889 890 891 892 893 894
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
895
        cmd_line = './scripts/avocado config --datadir --paginator off'
896 897
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
898
        expected_rc = exit_codes.AVOCADO_ALL_OK
899 900 901 902 903
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
    def test_disable_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
            cmd_line = './scripts/avocado --config %s plugins' % config
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

926 927 928 929 930 931 932 933 934 935 936 937 938
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
            cmd = ('./scripts/avocado --config %s run passtest.py --archive '
939 940
                   '--job-results-dir %s --sysinfo=off'
                   % (config_path, self.base_outputdir))
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
        try:
            pkg_resources.require('avocado_result_html')
            result_plugins.append("html")
            result_outputs.append("html/results.html")
        except pkg_resources.DistributionNotFound:
            pass

        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
980 981
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
982 983 984 985 986 987 988 989 990 991 992
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

993 994 995 996 997
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
998
        expected_rc = exit_codes.AVOCADO_ALL_OK
999 1000 1001 1002 1003
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

1004

1005 1006 1007 1008
class ParseXMLError(Exception):
    pass


1009
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1010

1011
    def setUp(self):
1012
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1013 1014
        super(PluginsXunitTest, self).setUp()

1015
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1016
                      e_nnotfound, e_nfailures, e_nskip):
1017
        os.chdir(basedir)
L
Lukáš Doktor 已提交
1018 1019
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
1020 1021 1022 1023 1024 1025 1026
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1027
        except Exception as detail:
1028 1029 1030 1031 1032 1033 1034
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1035 1036
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1054
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1055 1056 1057 1058
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1059
    def test_xunit_plugin_passtest(self):
1060
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1061
                           1, 0, 0, 0, 0)
1062 1063

    def test_xunit_plugin_failtest(self):
1064
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1065
                           1, 0, 0, 1, 0)
1066

1067
    def test_xunit_plugin_skiponsetuptest(self):
1068
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1069
                           1, 0, 0, 0, 1)
1070

1071
    def test_xunit_plugin_errortest(self):
1072
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1073
                           1, 1, 0, 0, 0)
1074

1075 1076 1077 1078
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1079 1080 1081 1082 1083

class ParseJSONError(Exception):
    pass


1084
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1085

1086
    def setUp(self):
1087
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1088 1089
        super(PluginsJSONTest, self).setUp()

1090
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1091
                      e_nfailures, e_nskip, external_runner=None):
1092
        os.chdir(basedir)
1093 1094
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
1095 1096
        if external_runner is not None:
            cmd_line += " --external-runner '%s'" % external_runner
1097 1098 1099 1100 1101 1102 1103
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1104
        except Exception as detail:
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1122
        return json_data
1123

1124
    def test_json_plugin_passtest(self):
1125
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1126
                           1, 0, 0, 0)
1127 1128

    def test_json_plugin_failtest(self):
1129
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1130
                           1, 0, 1, 0)
1131

1132
    def test_json_plugin_skiponsetuptest(self):
1133
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1134
                           1, 0, 0, 1)
1135

1136
    def test_json_plugin_errortest(self):
1137
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1138
                           1, 1, 0, 0)
1139

A
Amador Pahim 已提交
1140
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
1141
    def test_ugly_echo_cmd(self):
1142
        data = self.run_and_check('"-ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
1143
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
1144
                                  0, 0, ECHO_BINARY)
1145 1146
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1147
                         '1--ne foo\\\\n\\\'\\"\\\\nbar/baz')
1148 1149
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1150
                         '1--ne foo\\\\n\\\'\\"\\\\nbar_baz')
1151

1152 1153 1154 1155
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1156

1157 1158
if __name__ == '__main__':
    unittest.main()