test_basic.py 50.3 KB
Newer Older
1
# This Python file uses the following encoding: utf-8
2 3
import aexpect
import glob
4
import json
5
import os
6
import re
7
import shutil
8
import signal
9
import sys
10
import tempfile
11
import time
12
import xml.dom.minidom
13
import zipfile
14
import unittest
15 16

import pkg_resources
17

18
from avocado.core import exit_codes
19
from avocado.utils import astring
20 21
from avocado.utils import process
from avocado.utils import script
22
from avocado.utils import path as utils_path
23

24
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
25 26 27
basedir = os.path.abspath(basedir)


28 29 30 31
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
32 33
PASS_SHELL_CONTENTS = "exit 0"

34 35 36 37
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
38 39
FAIL_SHELL_CONTENTS = "exit 1"

40 41 42 43 44 45 46 47 48 49 50 51 52 53
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

54 55 56 57 58 59 60 61 62 63 64 65
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
        self.status = 'not supported'

    def test(self):
        pass
'''

66 67 68 69 70 71 72 73 74 75 76
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

77

78 79 80 81 82 83 84 85 86 87
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

88

89 90 91 92 93 94 95 96 97 98 99
DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


A
Amador Pahim 已提交
100
def probe_binary(binary):
101
    try:
A
Amador Pahim 已提交
102
        return utils_path.find_command(binary)
103
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
104 105
        return None

L
Lukáš Doktor 已提交
106

A
Amador Pahim 已提交
107 108 109 110
CC_BINARY = probe_binary('cc')
ECHO_BINARY = probe_binary('echo')
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
111 112


113 114
class RunnerOperationTest(unittest.TestCase):

115
    def setUp(self):
116
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
117

118 119 120
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
121 122 123
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
124

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

159 160
    def test_runner_all_ok(self):
        os.chdir(basedir)
161 162
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
163 164
        process.run(cmd_line)

165 166 167
    def test_runner_failfast(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
168 169
                    'passtest.py failtest.py passtest.py --failfast on' %
                    self.tmpdir)
170 171 172 173 174 175 176
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
177
    @unittest.skipIf(not CC_BINARY,
178
                     "C compiler is required by the underlying datadir.py test")
179 180
    def test_datadir_alias(self):
        os.chdir(basedir)
181 182 183 184 185 186 187 188 189
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
190 191
        process.run(cmd_line)

A
Amador Pahim 已提交
192
    @unittest.skipIf(not CC_BINARY,
193
                     "C compiler is required by the underlying datadir.py test")
194 195
    def test_datadir_noalias(self):
        os.chdir(basedir)
196 197
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
198 199
        process.run(cmd_line)

200 201
    def test_runner_noalias(self):
        os.chdir(basedir)
202 203
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
204 205
        process.run(cmd_line)

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

221 222 223 224 225 226 227 228 229 230 231 232 233
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
234
            self.assertIn("Runner error occurred: Test reports unsupported",
235 236
                          results["tests"][0]["fail_reason"])

237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

273 274
    def test_runner_tests_fail(self):
        os.chdir(basedir)
275 276
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
277
        result = process.run(cmd_line, ignore_status=True)
278
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
279 280 281 282 283
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
284 285
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
286
        result = process.run(cmd_line, ignore_status=True)
287 288
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
289 290 291 292 293
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

294 295
    def test_runner_doublefail(self):
        os.chdir(basedir)
296 297
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
298 299
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
300 301
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
302 303 304 305
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
306
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
307
                      "Cleanup exception not printed to log output")
308
        self.assertIn("TestFail: This test is supposed to fail",
309
                      output,
310
                      "Test did not fail with action exception:\n%s" % output)
311

312 313 314
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
315
                    "--json - uncaught_exception.py" % self.tmpdir)
316
        result = process.run(cmd_line, ignore_status=True)
317
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
318 319 320 321 322
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

323
    def test_fail_on_exception(self):
324 325
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
326
                    "--json - fail_on_exception.py" % self.tmpdir)
327
        result = process.run(cmd_line, ignore_status=True)
328
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
329 330 331 332 333
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

334 335
    def test_runner_timeout(self):
        os.chdir(basedir)
336 337
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
338 339
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
340
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
341
        unexpected_rc = exit_codes.AVOCADO_FAIL
342 343 344 345
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
346
        self.assertIn("Runner error occurred: Timeout reached", output,
347
                      "Timeout reached message not found in the output:\n%s" % output)
348 349
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
350

351 352 353
    @unittest.skipIf(os.environ.get("AVOCADO_CHECK_FULL") != "1",
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
354 355
    def test_runner_abort(self):
        os.chdir(basedir)
356 357
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
358
        result = process.run(cmd_line, ignore_status=True)
359
        output = result.stdout
360
        excerpt = 'Test died without reporting the status.'
361 362
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
363 364 365 366
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
367
        self.assertIn(excerpt, output)
368

369 370
    def test_silent_output(self):
        os.chdir(basedir)
371 372
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
373
        result = process.run(cmd_line, ignore_status=True)
374
        expected_rc = exit_codes.AVOCADO_ALL_OK
375 376
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
377
        self.assertEqual(result.stdout, expected_output)
378

379 380 381 382
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
383
        expected_rc = exit_codes.AVOCADO_FAIL
384
        expected_output = 'error: too few arguments'
385
        self.assertEqual(result.exit_status, expected_rc)
386
        self.assertIn(expected_output, result.stderr)
387

388 389
    def test_empty_test_list(self):
        os.chdir(basedir)
390
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s' % self.tmpdir
391
        result = process.run(cmd_line, ignore_status=True)
392
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
393 394
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
395
        self.assertEqual(result.exit_status, expected_rc)
396
        self.assertIn(expected_output, result.stderr)
397

398 399
    def test_not_found(self):
        os.chdir(basedir)
400
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s sbrubles' % self.tmpdir
401
        result = process.run(cmd_line, ignore_status=True)
402
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
403
        self.assertEqual(result.exit_status, expected_rc)
404 405
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
406

407
    def test_invalid_unique_id(self):
408 409
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s --force-job-id foobar passtest.py' % self.tmpdir)
410
        result = process.run(cmd_line, ignore_status=True)
411
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
412
        self.assertIn('needs to be a 40 digit hex', result.stderr)
413
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
414 415

    def test_valid_unique_id(self):
416
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
417 418
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
419
        result = process.run(cmd_line, ignore_status=True)
420
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
421
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
422
        self.assertIn('PASS', result.stdout)
423

424
    def test_automatic_unique_id(self):
425 426
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
427
        result = process.run(cmd_line, ignore_status=True)
428
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
429 430 431 432
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

433 434 435
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
436
                    "--json - skip_outside_setup.py" % self.tmpdir)
437
        result = process.run(cmd_line, ignore_status=True)
438
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
439 440 441 442 443
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

444 445 446 447 448
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
449 450
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
451 452 453 454 455 456
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
457
                avocado_process.wait()
458 459 460 461
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

462 463
    def test_dry_run(self):
        os.chdir(basedir)
464
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
465
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
466
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
467 468 469 470 471 472 473 474
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
475 476
        self.assertEqual(result['skip'], 4)
        for i in xrange(4):
477 478 479 480 481 482 483 484
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
485
            self.assertEqual(log.count(line), 4)
486

487 488 489 490
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
491 492
        cmd_line = ('./scripts/avocado --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (self.tmpdir, test)
493 494 495 496 497
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
498 499
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
500

A
Amador Pahim 已提交
501
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
L
Lukáš Doktor 已提交
502 503
    def test_read(self):
        os.chdir(basedir)
504 505
        cmd = "./scripts/avocado run --sysinfo=off --job-results-dir %s %s"
        cmd %= (self.tmpdir, READ_BINARY)
506
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
507 508 509 510 511
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

512 513 514
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

515

516 517 518
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
519
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
520 521 522

    def test_output_pass(self):
        os.chdir(basedir)
523 524
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
525
        result = process.run(cmd_line, ignore_status=True)
526
        expected_rc = exit_codes.AVOCADO_ALL_OK
527 528 529 530 531 532 533
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
534 535
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
536
        result = process.run(cmd_line, ignore_status=True)
537
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
538 539 540 541 542 543 544
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
545 546
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
547
        result = process.run(cmd_line, ignore_status=True)
548
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
549 550 551 552 553 554 555
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
556 557
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'skiponsetup.py' % self.tmpdir)
558
        result = process.run(cmd_line, ignore_status=True)
559
        expected_rc = exit_codes.AVOCADO_ALL_OK
560 561 562
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
563 564
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
565

A
Amador Pahim 已提交
566
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
567 568
    def test_ugly_echo_cmd(self):
        os.chdir(basedir)
569 570
        cmd_line = ('./scripts/avocado run --external-runner "%s -ne" '
                    '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
571 572
                    ' --sysinfo=off  --show-job-log' %
                    (ECHO_BINARY, self.tmpdir))
573 574 575 576 577
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
578 579 580
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
581 582
        self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
583 584 585 586 587 588 589
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
590
                         '1-foo\\\\n\\\'\\"\\\\nbar_baz')
591

592
    def test_replay_skip_skipped(self):
593 594 595
        cmd = ("./scripts/avocado run --job-results-dir %s --json - "
               "skiponsetup.py" % self.tmpdir)
        result = process.run(cmd)
596
        result = json.loads(result.stdout)
597
        jobid = str(result["job_id"])
598 599
        cmd = ("./scripts/avocado run --job-results-dir %s "
               "--replay %s --replay-test-status PASS") % (self.tmpdir, jobid)
600
        process.run(cmd)
601

602 603 604
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

605

606
class RunnerSimpleTest(unittest.TestCase):
607 608

    def setUp(self):
609
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
610
        self.pass_script = script.TemporaryScript(
611
            'ʊʋʉʈɑ ʅʛʌ',
612
            PASS_SCRIPT_CONTENTS,
613
            'avocado_simpletest_functional')
614
        self.pass_script.save()
L
Lukáš Doktor 已提交
615 616 617 618
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
619
        self.fail_script.save()
620

621
    def test_simpletest_pass(self):
622
        os.chdir(basedir)
L
Lukáš Doktor 已提交
623
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
624
                    ' "%s"' % (self.tmpdir, self.pass_script.path))
625
        result = process.run(cmd_line, ignore_status=True)
626
        expected_rc = exit_codes.AVOCADO_ALL_OK
627 628 629 630
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

631
    def test_simpletest_fail(self):
632
        os.chdir(basedir)
L
Lukáš Doktor 已提交
633 634
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
635
        result = process.run(cmd_line, ignore_status=True)
636
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
637 638 639 640
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

641 642
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
643
        We can be pretty sure that a failtest should return immediately. Let's
644
        run 100 of them and assure they not take more than 30 seconds to run.
645

646 647
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
648 649
        """
        os.chdir(basedir)
650
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
651 652
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
653 654 655
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
656
        self.assertLess(actual_time, 30.0)
657
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
658 659 660 661 662 663 664 665 666
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
667 668
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
669 670
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
671 672 673
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
674
        self.assertLess(actual_time, 33.0)
675
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
676 677 678
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

679 680 681 682 683
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
684 685
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
686
        result = process.run(cmd_line, ignore_status=True)
687 688 689 690
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
691 692
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
693
        self.assertIn('WARN | Warning message (should cause this test to '
694
                      'finish with warning)', result.stdout, result)
695
        self.assertIn('ERROR| Error message (ordinary message not changing '
696
                      'the results)', result.stdout, result)
697

698 699 700 701 702 703
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
704
                    ' "%s"' % (avocado_path, self.tmpdir, test_file_name))
705 706 707 708 709 710
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
711
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
712
    def test_kill_stopped_sleep(self):
713 714 715
        proc = aexpect.Expect("./scripts/avocado run 60 --job-results-dir %s "
                              "--external-runner %s --sysinfo=off --job-timeout 3"
                              % (self.tmpdir, SLEEP_BINARY))
716 717 718 719 720
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
721
        deadline = time.time() + 9
722 723 724
        while time.time() < deadline:
            if not proc.is_alive():
                break
725
            time.sleep(0.1)
726 727
        else:
            proc.kill(signal.SIGKILL)
728
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
729 730 731 732 733 734 735
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
736
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
737
                         "1.")
738 739

        sleep_dir = astring.string_to_safe_path("1-60")
740
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
741
                                 sleep_dir, "debug.log")
742
        debug_log = open(debug_log).read()
743 744 745 746 747 748 749
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
750

751
    def tearDown(self):
752 753
        self.pass_script.remove()
        self.fail_script.remove()
754
        shutil.rmtree(self.tmpdir)
755 756


757
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
758 759

    def setUp(self):
760
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
761 762 763
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
764
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
765 766 767 768
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
769
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
770 771
        self.fail_script.save()

772
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
773
        os.chdir(basedir)
774
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
775 776
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
777
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
778 779 780 781
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

782
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
783
        os.chdir(basedir)
784
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
785 786
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
787
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
788 789 790 791
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

792
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
793
        os.chdir(basedir)
794 795
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
796 797
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
798 799
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
800
        self.assertIn(expected_output, result.stderr)
801
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
802 803 804 805 806 807 808 809 810
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
811 812
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
813 814
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
815 816 817 818 819 820 821 822 823 824
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


825
class AbsPluginsTest(object):
826

827
    def setUp(self):
828
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
829

830 831 832 833 834 835
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

836 837 838 839
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
840
        expected_rc = exit_codes.AVOCADO_ALL_OK
841 842 843 844 845 846
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

847 848 849 850 851
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
852
        expected_rc = exit_codes.AVOCADO_ALL_OK
853 854 855 856 857
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

858 859 860 861 862
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
863
        expected_rc = exit_codes.AVOCADO_FAIL
864 865 866
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
867
        self.assertIn("Unable to resolve reference", output)
868

869 870 871 872 873
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
874
        expected_rc = exit_codes.AVOCADO_ALL_OK
875 876 877
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
878 879
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
880

881
    def test_config_plugin(self):
882
        os.chdir(basedir)
883
        cmd_line = './scripts/avocado config --paginator off'
884 885
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
886
        expected_rc = exit_codes.AVOCADO_ALL_OK
887 888 889 890 891 892 893
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
894
        cmd_line = './scripts/avocado config --datadir --paginator off'
895 896
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
897
        expected_rc = exit_codes.AVOCADO_ALL_OK
898 899 900 901 902
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
    def test_disable_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
            cmd_line = './scripts/avocado --config %s plugins' % config
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

925 926 927 928 929 930 931 932 933 934 935 936 937
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
            cmd = ('./scripts/avocado --config %s run passtest.py --archive '
938 939
                   '--job-results-dir %s --sysinfo=off'
                   % (config_path, self.base_outputdir))
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
        try:
            pkg_resources.require('avocado_result_html')
            result_plugins.append("html")
            result_outputs.append("html/results.html")
        except pkg_resources.DistributionNotFound:
            pass

        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
979 980
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
981 982 983 984 985 986 987 988 989 990 991
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

992 993 994 995 996
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
997
        expected_rc = exit_codes.AVOCADO_ALL_OK
998 999 1000 1001 1002
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

1003

1004 1005 1006 1007
class ParseXMLError(Exception):
    pass


1008
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1009

1010
    def setUp(self):
1011
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1012 1013
        super(PluginsXunitTest, self).setUp()

1014
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1015
                      e_nnotfound, e_nfailures, e_nskip):
1016
        os.chdir(basedir)
L
Lukáš Doktor 已提交
1017 1018
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
1019 1020 1021 1022 1023 1024 1025
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1026
        except Exception as detail:
1027 1028 1029 1030 1031 1032 1033
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1034 1035
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1053
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1054 1055 1056 1057
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1058
    def test_xunit_plugin_passtest(self):
1059
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1060
                           1, 0, 0, 0, 0)
1061 1062

    def test_xunit_plugin_failtest(self):
1063
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1064
                           1, 0, 0, 1, 0)
1065

1066
    def test_xunit_plugin_skiponsetuptest(self):
1067
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1068
                           1, 0, 0, 0, 1)
1069

1070
    def test_xunit_plugin_errortest(self):
1071
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1072
                           1, 1, 0, 0, 0)
1073

1074 1075 1076 1077
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1078 1079 1080 1081 1082

class ParseJSONError(Exception):
    pass


1083
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1084

1085
    def setUp(self):
1086
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1087 1088
        super(PluginsJSONTest, self).setUp()

1089
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1090
                      e_nfailures, e_nskip, external_runner=None):
1091
        os.chdir(basedir)
1092 1093
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
1094 1095
        if external_runner is not None:
            cmd_line += " --external-runner '%s'" % external_runner
1096 1097 1098 1099 1100 1101 1102
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1103
        except Exception as detail:
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1121
        return json_data
1122

1123
    def test_json_plugin_passtest(self):
1124
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1125
                           1, 0, 0, 0)
1126 1127

    def test_json_plugin_failtest(self):
1128
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1129
                           1, 0, 1, 0)
1130

1131
    def test_json_plugin_skiponsetuptest(self):
1132
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1133
                           1, 0, 0, 1)
1134

1135
    def test_json_plugin_errortest(self):
1136
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1137
                           1, 1, 0, 0)
1138

A
Amador Pahim 已提交
1139
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
1140
    def test_ugly_echo_cmd(self):
1141
        data = self.run_and_check('"-ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
1142
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
1143
                                  0, 0, ECHO_BINARY)
1144 1145
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1146
                         '1--ne foo\\\\n\\\'\\"\\\\nbar/baz')
1147 1148
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1149
                         '1--ne foo\\\\n\\\'\\"\\\\nbar_baz')
1150

1151 1152 1153 1154
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1155

1156 1157
if __name__ == '__main__':
    unittest.main()