test_basic.py 57.3 KB
Newer Older
1
# This Python file uses the following encoding: utf-8
2 3
import aexpect
import glob
4
import json
5
import os
6
import re
7
import shutil
8
import signal
9
import sys
10
import tempfile
11
import time
12
import xml.dom.minidom
13
import zipfile
14
import unittest
15
import psutil
16
import pkg_resources
17

18 19 20
from lxml import etree
from StringIO import StringIO

21
from avocado.core import exit_codes
22
from avocado.utils import astring
23 24
from avocado.utils import process
from avocado.utils import script
25
from avocado.utils import path as utils_path
26

27
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
28 29
basedir = os.path.abspath(basedir)

30 31
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")

32 33 34 35
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
36 37
PASS_SHELL_CONTENTS = "exit 0"

38 39 40 41
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
42 43
FAIL_SHELL_CONTENTS = "exit 1"

44 45 46 47 48 49 50 51 52 53 54 55 56 57
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

58 59 60 61 62 63
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
64 65
        # Please do NOT ever use this, it's for unittesting only.
        self._Test__status = 'not supported'
66 67 68 69 70

    def test(self):
        pass
'''

71 72 73 74 75 76 77 78 79 80 81
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

82

83 84 85 86 87 88 89 90 91 92 93 94
VALID_PYTHON_TEST_WITH_TAGS = '''
from avocado import Test

class MyTest(Test):
    def test(self):
         """
         :avocado: tags=BIG_TAG_NAME
         """
         pass
'''


95 96 97 98 99 100 101
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
102
         time.sleep(70)
103 104
'''

105

106 107 108 109 110 111 112 113 114 115 116
DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


117 118 119 120 121 122 123 124 125 126 127 128 129
RAISE_CUSTOM_PATH_EXCEPTION_CONTENT = '''import os
import sys

from avocado import Test

class SharedLibTest(Test):
    def test(self):
        sys.path.append(os.path.join(os.path.dirname(__file__), "shared_lib"))
        from mylib import CancelExc
        raise CancelExc("This should not crash on unpickling in runner")
'''


A
Amador Pahim 已提交
130
def probe_binary(binary):
131
    try:
A
Amador Pahim 已提交
132
        return utils_path.find_command(binary)
133
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
134 135
        return None

L
Lukáš Doktor 已提交
136

137
TRUE_CMD = probe_binary('true')
A
Amador Pahim 已提交
138
CC_BINARY = probe_binary('cc')
139

L
Lukáš Doktor 已提交
140
# On macOS, the default GNU core-utils installation (brew)
141 142 143 144 145
# installs the gnu utility versions with a g prefix. It still has the
# BSD versions of the core utilities installed on their expected paths
# but their behavior and flags are in most cases different.
GNU_ECHO_BINARY = probe_binary('echo')
if GNU_ECHO_BINARY is not None:
146 147 148 149
    if probe_binary('man') is not None:
        echo_manpage = process.run('man %s' % os.path.basename(GNU_ECHO_BINARY)).stdout
        if '-e' not in echo_manpage:
            GNU_ECHO_BINARY = probe_binary('gecho')
A
Amador Pahim 已提交
150 151
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
152 153


154 155
class RunnerOperationTest(unittest.TestCase):

156
    def setUp(self):
157
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
158

159
    def test_show_version(self):
160
        result = process.run('%s -v' % AVOCADO, ignore_status=True)
161
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
162 163 164
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
165

166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
189
        cmd = '%s --config %s config --datadir' % (AVOCADO, config_file)
190 191 192 193 194 195 196 197 198 199
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

200 201
    def test_runner_all_ok(self):
        os.chdir(basedir)
202 203
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % (AVOCADO, self.tmpdir))
204 205
        process.run(cmd_line)

206 207
    def test_runner_failfast(self):
        os.chdir(basedir)
208 209 210
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py --failfast on'
                    % (AVOCADO, self.tmpdir))
211 212 213 214 215 216 217
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
    def test_runner_ignore_missing_references_one_missing(self):
        os.chdir(basedir)
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py badtest.py --ignore-missing-references on'
                    % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn("Unable to resolve reference(s) 'badtest.py'", result.stderr)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 0 | SKIP 0', result.stdout)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_ignore_missing_references_all_missing(self):
        os.chdir(basedir)
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'badtest.py badtest2.py --ignore-missing-references on'
                    % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn("Unable to resolve reference(s) 'badtest.py', 'badtest2.py'",
                      result.stderr)
        self.assertEqual('', result.stdout)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
243
    @unittest.skipIf(not CC_BINARY,
244
                     "C compiler is required by the underlying datadir.py test")
245 246
    def test_datadir_alias(self):
        os.chdir(basedir)
247 248
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % (AVOCADO, self.tmpdir))
249 250 251 252 253
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
254 255
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % (AVOCADO, self.tmpdir))
256 257
        process.run(cmd_line)

A
Amador Pahim 已提交
258
    @unittest.skipIf(not CC_BINARY,
259
                     "C compiler is required by the underlying datadir.py test")
260 261
    def test_datadir_noalias(self):
        os.chdir(basedir)
262 263
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % (AVOCADO, self.tmpdir))
264 265
        process.run(cmd_line)

266 267
    def test_runner_noalias(self):
        os.chdir(basedir)
268 269
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % (AVOCADO, self.tmpdir))
270 271
        process.run(cmd_line)

272 273 274 275 276 277 278 279 280 281 282
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
283 284
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "%s" % (AVOCADO, self.tmpdir, mytest))
285 286
        process.run(cmd_line)

287 288 289 290 291
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
292 293 294
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s"
                              " --json -" % (AVOCADO, self.tmpdir, tst),
                              ignore_status=True)
295 296 297 298 299
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
300
            self.assertIn("Runner error occurred: Test reports unsupported",
301 302
                          results["tests"][0]["fail_reason"])

303 304 305
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
306 307 308 309 310 311
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
312
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
313
                              "--json - --job-timeout 1" % (AVOCADO, self.tmpdir, tst),
314
                              ignore_status=True)
315 316 317 318 319 320 321
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
322 323 324 325 326
            # Currently it should finish up to 1s after the job-timeout
            # but the prep and postprocess could take a bit longer on
            # some environments, so let's just check it does not take
            # > 60s, which is the deadline for force-finishing the test.
            self.assertLess(res.duration, 55, "Test execution took too long, "
327 328 329 330 331 332 333 334
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
335 336 337
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
                              "--json -" % (AVOCADO, self.tmpdir, tst),
                              ignore_status=True)
338 339 340 341 342 343 344 345
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

346 347
    def test_runner_tests_fail(self):
        os.chdir(basedir)
348 349
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s passtest.py '
                    'failtest.py passtest.py' % (AVOCADO, self.tmpdir))
350
        result = process.run(cmd_line, ignore_status=True)
351
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
352 353 354 355 356
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
357 358
        cmd_line = ('%s run --sysinfo=off --job-results-dir '
                    '%s bogustest' % (AVOCADO, self.tmpdir))
359
        result = process.run(cmd_line, ignore_status=True)
360 361
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
362 363 364 365 366
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

367 368
    def test_runner_doublefail(self):
        os.chdir(basedir)
369 370
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % (AVOCADO, self.tmpdir))
371 372
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
373 374
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
375 376 377 378
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
379
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
380
                      "Cleanup exception not printed to log output")
381
        self.assertIn("TestFail: This test is supposed to fail",
382
                      output,
383
                      "Test did not fail with action exception:\n%s" % output)
384

385 386
    def test_uncaught_exception(self):
        os.chdir(basedir)
387 388
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception.py" % (AVOCADO, self.tmpdir))
389
        result = process.run(cmd_line, ignore_status=True)
390
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
391 392 393 394 395
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

396
    def test_fail_on_exception(self):
397
        os.chdir(basedir)
398 399
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "--json - fail_on_exception.py" % (AVOCADO, self.tmpdir))
400
        result = process.run(cmd_line, ignore_status=True)
401
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
402 403 404 405 406
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
    def test_exception_not_in_path(self):
        os.chdir(basedir)
        os.mkdir(os.path.join(self.tmpdir, "shared_lib"))
        mylib = script.Script(os.path.join(self.tmpdir, "shared_lib",
                                           "mylib.py"),
                              "from avocado import TestCancel\n\n"
                              "class CancelExc(TestCancel):\n"
                              "    pass")
        mylib.save()
        mytest = script.Script(os.path.join(self.tmpdir, "mytest.py"),
                               RAISE_CUSTOM_PATH_EXCEPTION_CONTENT)
        mytest.save()
        result = process.run("%s --show test run --sysinfo=off "
                             "--job-results-dir %s %s"
                             % (AVOCADO, self.tmpdir, mytest))
        self.assertIn("mytest.py:SharedLibTest.test -> CancelExc: This "
                      "should not crash on unpickling in runner",
                      result.stdout)
        self.assertNotIn("Failed to read queue", result.stdout)

427 428
    def test_runner_timeout(self):
        os.chdir(basedir)
429 430
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % (AVOCADO, self.tmpdir))
431 432
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
433
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
434
        unexpected_rc = exit_codes.AVOCADO_FAIL
435 436 437 438
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
439
        self.assertIn("Runner error occurred: Timeout reached", output,
440
                      "Timeout reached message not found in the output:\n%s" % output)
441 442
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
443

444
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
445 446
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
447 448
    def test_runner_abort(self):
        os.chdir(basedir)
449 450
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % (AVOCADO, self.tmpdir))
451
        result = process.run(cmd_line, ignore_status=True)
452
        output = result.stdout
453
        excerpt = 'Test died without reporting the status.'
454 455
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
456 457 458 459
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
460
        self.assertIn(excerpt, output)
461

462 463
    def test_silent_output(self):
        os.chdir(basedir)
464 465
        cmd_line = ('%s --silent run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % (AVOCADO, self.tmpdir))
466
        result = process.run(cmd_line, ignore_status=True)
467
        expected_rc = exit_codes.AVOCADO_ALL_OK
468 469
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
470
        self.assertEqual(result.stdout, expected_output)
471

472 473
    def test_empty_args_list(self):
        os.chdir(basedir)
474
        cmd_line = AVOCADO
475
        result = process.run(cmd_line, ignore_status=True)
476
        expected_rc = exit_codes.AVOCADO_FAIL
477
        expected_output = 'error: too few arguments'
478
        self.assertEqual(result.exit_status, expected_rc)
479
        self.assertIn(expected_output, result.stderr)
480

481 482
    def test_empty_test_list(self):
        os.chdir(basedir)
483 484
        cmd_line = '%s run --sysinfo=off --job-results-dir %s' % (AVOCADO,
                                                                  self.tmpdir)
485
        result = process.run(cmd_line, ignore_status=True)
486
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
487 488
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
489
        self.assertEqual(result.exit_status, expected_rc)
490
        self.assertIn(expected_output, result.stderr)
491

492 493
    def test_not_found(self):
        os.chdir(basedir)
494 495
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s sbrubles'
                    % (AVOCADO, self.tmpdir))
496
        result = process.run(cmd_line, ignore_status=True)
497
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
498
        self.assertEqual(result.exit_status, expected_rc)
499 500
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
501

502
    def test_invalid_unique_id(self):
503 504
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s --force-job-id '
                    'foobar passtest.py' % (AVOCADO, self.tmpdir))
505
        result = process.run(cmd_line, ignore_status=True)
506
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
507
        self.assertIn('needs to be a 40 digit hex', result.stderr)
508
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
509 510

    def test_valid_unique_id(self):
511
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
512
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
513
                    'passtest.py' % (AVOCADO, self.tmpdir))
514
        result = process.run(cmd_line, ignore_status=True)
515
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
516
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
517
        self.assertIn('PASS', result.stdout)
518

519
    def test_automatic_unique_id(self):
520 521
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % (AVOCADO, self.tmpdir))
522
        result = process.run(cmd_line, ignore_status=True)
523
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
524 525 526 527
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

528 529 530 531 532
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
533 534
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % (AVOCADO, self.tmpdir))
535 536 537 538 539 540
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
541
                avocado_process.wait()
542 543 544 545
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

546 547
    def test_dry_run(self):
        os.chdir(basedir)
548
        cmd = ("%s run --sysinfo=off passtest.py failtest.py "
549
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
550
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run" % AVOCADO)
551 552 553 554 555
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
556
        self.assertIn(tempfile.gettempdir(), debuglog)   # Use tmp dir, not default location
557 558
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
559
        self.assertEqual(result['cancel'], 4)
560
        for i in xrange(4):
561 562
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
563
                             u'Test cancelled due to --dry-run')
564 565 566 567 568
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
569
            self.assertEqual(log.count(line), 4)
570

571 572 573 574
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
575 576
        cmd_line = ('%s --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (AVOCADO, self.tmpdir, test)
577 578 579 580 581
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
582 583
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
584

A
Amador Pahim 已提交
585
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
586 587 588
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
L
Lukáš Doktor 已提交
589 590
    def test_read(self):
        os.chdir(basedir)
591
        cmd = "%s run --sysinfo=off --job-results-dir %%s %%s" % AVOCADO
592
        cmd %= (self.tmpdir, READ_BINARY)
593
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
594 595 596 597 598
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

599 600 601
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

602

603 604 605
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
606
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
607 608 609

    def test_output_pass(self):
        os.chdir(basedir)
610 611
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % (AVOCADO, self.tmpdir))
612
        result = process.run(cmd_line, ignore_status=True)
613
        expected_rc = exit_codes.AVOCADO_ALL_OK
614 615 616 617 618 619 620
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
621 622
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % (AVOCADO, self.tmpdir))
623
        result = process.run(cmd_line, ignore_status=True)
624
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
625 626 627 628 629 630 631
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
632 633
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % (AVOCADO, self.tmpdir))
634
        result = process.run(cmd_line, ignore_status=True)
635
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
636 637 638 639 640
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

A
Amador Pahim 已提交
641
    def test_output_cancel(self):
642
        os.chdir(basedir)
643 644
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'cancelonsetup.py' % (AVOCADO, self.tmpdir))
645
        result = process.run(cmd_line, ignore_status=True)
646
        expected_rc = exit_codes.AVOCADO_ALL_OK
647 648 649
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
A
Amador Pahim 已提交
650 651
        self.assertIn('PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1',
                      result.stdout)
652

653 654
    @unittest.skipIf(not GNU_ECHO_BINARY,
                     'GNU style echo binary not available')
655 656
    def test_ugly_echo_cmd(self):
        os.chdir(basedir)
657
        cmd_line = ('%s run --external-runner "%s -ne" '
658
                    '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
659
                    ' --sysinfo=off  --show-job-log' %
660
                    (AVOCADO, GNU_ECHO_BINARY, self.tmpdir))
661 662 663 664 665
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
666 667 668
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
669 670
        self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
671 672 673 674 675 676 677
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
678
                         '1-foo\\\\n\\\'\\"\\\\nbar_baz')
679

680
    def test_replay_skip_skipped(self):
681 682
        cmd = ("%s run --job-results-dir %s --json - "
               "cancelonsetup.py" % (AVOCADO, self.tmpdir))
683
        result = process.run(cmd)
684
        result = json.loads(result.stdout)
685
        jobid = str(result["job_id"])
686 687
        cmd = ("%s run --job-results-dir %s --replay %s "
               "--replay-test-status PASS" % (AVOCADO, self.tmpdir, jobid))
688
        process.run(cmd)
689

690 691 692
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

693

694
class RunnerSimpleTest(unittest.TestCase):
695 696

    def setUp(self):
697
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
698
        self.pass_script = script.TemporaryScript(
699
            'ʊʋʉʈɑ ʅʛʌ',
700
            PASS_SCRIPT_CONTENTS,
701
            'avocado_simpletest_functional')
702
        self.pass_script.save()
L
Lukáš Doktor 已提交
703 704 705 706
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
707
        self.fail_script.save()
708

709
    def test_simpletest_pass(self):
710
        os.chdir(basedir)
711 712
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' "%s"' % (AVOCADO, self.tmpdir, self.pass_script.path))
713
        result = process.run(cmd_line, ignore_status=True)
714
        expected_rc = exit_codes.AVOCADO_ALL_OK
715 716 717 718
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

719
    def test_simpletest_fail(self):
720
        os.chdir(basedir)
721 722
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (AVOCADO, self.tmpdir, self.fail_script.path))
723
        result = process.run(cmd_line, ignore_status=True)
724
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
725 726 727 728
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

729
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
730 731
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
732 733
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
734
        We can be pretty sure that a failtest should return immediately. Let's
735
        run 100 of them and assure they not take more than 30 seconds to run.
736

737 738
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
739 740
        """
        os.chdir(basedir)
741
        one_hundred = 'failtest.py ' * 100
742 743
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
                    % (AVOCADO, self.tmpdir, one_hundred))
744 745 746
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
747
        self.assertLess(actual_time, 30.0)
748
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
749 750 751
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

752 753 754
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
755 756 757 758 759 760
    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
761 762
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
763 764
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
                    % (AVOCADO, self.tmpdir, sleep_fail_sleep))
765 766 767
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
768
        self.assertLess(actual_time, 33.0)
769
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
770 771 772
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

773 774 775 776 777
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
778 779 780
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log'
                    % (AVOCADO, self.tmpdir))
781
        result = process.run(cmd_line, ignore_status=True)
782 783 784 785
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
786 787
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
788
        self.assertIn('WARN | Warning message (should cause this test to '
789
                      'finish with warning)', result.stdout, result)
790
        self.assertIn('ERROR| Error message (ordinary message not changing '
791
                      'the results)', result.stdout, result)
792

793 794 795 796 797 798
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
799
                    ' "%s"' % (avocado_path, self.tmpdir, test_file_name))
800 801 802 803 804 805
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
806
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
807 808 809
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
810
    def test_kill_stopped_sleep(self):
811 812 813 814
        proc = aexpect.Expect("%s run 60 --job-results-dir %s "
                              "--external-runner %s --sysinfo=off "
                              "--job-timeout 3"
                              % (AVOCADO, self.tmpdir, SLEEP_BINARY))
815 816
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
817 818 819 820
        # We need pid of the avocado process, not the shell executing it
        avocado_shell = psutil.Process(proc.get_pid())
        avocado_proc = avocado_shell.children()[0]
        pid = avocado_proc.pid
821
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
822
        deadline = time.time() + 9
823 824 825
        while time.time() < deadline:
            if not proc.is_alive():
                break
826
            time.sleep(0.1)
827 828
        else:
            proc.kill(signal.SIGKILL)
829
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
830 831 832 833 834 835 836
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
837
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
838
                         "1.")
839 840

        sleep_dir = astring.string_to_safe_path("1-60")
841
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
842
                                 sleep_dir, "debug.log")
843
        debug_log = open(debug_log).read()
844 845 846 847 848 849 850
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
851

852
    def tearDown(self):
853 854
        self.pass_script.remove()
        self.fail_script.remove()
855
        shutil.rmtree(self.tmpdir)
856 857


858
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
859 860

    def setUp(self):
861
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
862 863 864
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
865
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
866 867 868 869
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
870
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
871 872
        self.fail_script.save()

873
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
874
        os.chdir(basedir)
875 876 877
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh %s'
                    % (AVOCADO, self.tmpdir, self.pass_script.path))
C
Cleber Rosa 已提交
878
        result = process.run(cmd_line, ignore_status=True)
879
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
880 881 882 883
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

884
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
885
        os.chdir(basedir)
886 887 888
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh %s'
                    % (AVOCADO, self.tmpdir, self.fail_script.path))
C
Cleber Rosa 已提交
889
        result = process.run(cmd_line, ignore_status=True)
890
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
891 892 893 894
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

895
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
896
        os.chdir(basedir)
897 898 899
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh --external-runner-chdir=test %s'
                    % (AVOCADO, self.tmpdir, self.pass_script.path))
C
Cleber Rosa 已提交
900
        result = process.run(cmd_line, ignore_status=True)
901 902
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
903
        self.assertIn(expected_output, result.stderr)
904
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
905 906 907 908 909 910
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
911 912
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=%s' % (AVOCADO, self.tmpdir, TRUE_CMD))
913
        result = process.run(cmd_line, ignore_status=True)
914 915
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
916 917
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
918 919 920 921 922 923 924 925 926 927
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


928
class AbsPluginsTest(object):
929

930
    def setUp(self):
931
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
932

933 934 935 936 937 938
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

939 940
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
941
        cmd_line = '%s sysinfo %s' % (AVOCADO, self.base_outputdir)
942
        result = process.run(cmd_line, ignore_status=True)
943
        expected_rc = exit_codes.AVOCADO_ALL_OK
944 945 946 947 948 949
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

950 951
    def test_list_plugin(self):
        os.chdir(basedir)
952
        cmd_line = '%s list' % AVOCADO
953 954
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
955
        expected_rc = exit_codes.AVOCADO_ALL_OK
956 957 958 959 960
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

961 962
    def test_list_error_output(self):
        os.chdir(basedir)
963
        cmd_line = '%s list sbrubles' % AVOCADO
964 965
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
966
        expected_rc = exit_codes.AVOCADO_FAIL
967 968 969
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
970
        self.assertIn("Unable to resolve reference", output)
971

972 973 974 975 976 977 978 979
    def test_list_no_file_loader(self):
        os.chdir(basedir)
        cmd_line = ("%s list --loaders external --verbose -- "
                    "this-wont-be-matched" % AVOCADO)
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
                         "Avocado did not return rc %d:\n%s"
                         % (exit_codes.AVOCADO_ALL_OK, result))
980 981
        exp = ("Type    Test                 Tag(s)\n"
               "MISSING this-wont-be-matched \n\n"
982 983
               "TEST TYPES SUMMARY\n"
               "==================\n"
984
               "EXTERNAL: 0\n"
985 986 987 988
               "MISSING: 1\n")
        self.assertEqual(exp, result.stdout, "Stdout mismatch:\n%s\n\n%s"
                         % (exp, result))

989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
    def test_list_verbose_tags(self):
        """
        Runs list verbosely and check for tag related output
        """
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.base_outputdir, 'test.py'),
                                  VALID_PYTHON_TEST_WITH_TAGS)
        cmd_line = ("%s list --loaders file --verbose %s" % (AVOCADO,
                                                             test))
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
                         "Avocado did not return rc %d:\n%s"
                         % (exit_codes.AVOCADO_ALL_OK, result))
        stdout_lines = result.stdout.splitlines()
        self.assertIn("Tag(s)", stdout_lines[0])
        full_test_name = "%s:MyTest.test" % test
        self.assertEquals("INSTRUMENTED %s BIG_TAG_NAME" % full_test_name,
                          stdout_lines[1])
1007 1008 1009 1010
        self.assertIn("TEST TYPES SUMMARY", stdout_lines)
        self.assertIn("INSTRUMENTED: 1", stdout_lines)
        self.assertIn("TEST TAGS SUMMARY", stdout_lines)
        self.assertEquals("BIG_TAG_NAME: 1", stdout_lines[-1])
1011

1012 1013
    def test_plugin_list(self):
        os.chdir(basedir)
1014
        cmd_line = '%s plugins' % AVOCADO
1015 1016
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
1017
        expected_rc = exit_codes.AVOCADO_ALL_OK
1018 1019 1020
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1021 1022
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
1023

1024
    def test_config_plugin(self):
1025
        os.chdir(basedir)
1026
        cmd_line = '%s config --paginator off' % AVOCADO
1027 1028
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
1029
        expected_rc = exit_codes.AVOCADO_ALL_OK
1030 1031 1032 1033 1034 1035 1036
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
1037
        cmd_line = '%s config --datadir --paginator off' % AVOCADO
1038 1039
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
1040
        expected_rc = exit_codes.AVOCADO_ALL_OK
1041 1042 1043 1044 1045
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

1046 1047
    def test_disable_plugin(self):
        os.chdir(basedir)
1048
        cmd_line = '%s plugins' % AVOCADO
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
1060
            cmd_line = '%s --config %s plugins' % (AVOCADO, config)
1061 1062 1063 1064 1065 1066 1067
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
1080
            cmd = ('%s --config %s run passtest.py --archive '
1081
                   '--job-results-dir %s --sysinfo=off'
1082
                   % (AVOCADO, config_path, self.base_outputdir))
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
        try:
            pkg_resources.require('avocado_result_html')
            result_plugins.append("html")
            result_outputs.append("html/results.html")
        except pkg_resources.DistributionNotFound:
            pass

        os.chdir(basedir)
1099
        cmd_line = '%s plugins' % AVOCADO
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
1122 1123
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

1135 1136
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
1137
        cmd_line = '%s plugins' % AVOCADO
1138 1139
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
1140
        expected_rc = exit_codes.AVOCADO_ALL_OK
1141 1142 1143 1144 1145
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

1146

1147 1148 1149 1150
class ParseXMLError(Exception):
    pass


1151
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1152

1153
    def setUp(self):
1154
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1155 1156
        self.junit = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                     os.path.pardir, ".data", 'junit-4.xsd'))
1157 1158
        super(PluginsXunitTest, self).setUp()

1159
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1160
                      e_nnotfound, e_nfailures, e_nskip):
1161
        os.chdir(basedir)
1162 1163
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (AVOCADO, self.tmpdir, testname))
1164 1165 1166 1167 1168 1169 1170
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1171
        except Exception as detail:
1172 1173 1174
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

1175 1176 1177 1178 1179 1180 1181 1182
        with open(self.junit, 'r') as f:
            xmlschema = etree.XMLSchema(etree.parse(f))

        self.assertTrue(xmlschema.validate(etree.parse(StringIO(xml_output))),
                        "Failed to validate against %s, message:\n%s" %
                        (self.junit,
                         xmlschema.error_log.filter_from_errors()))

1183 1184 1185 1186
        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1187 1188
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1206
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1207 1208 1209 1210
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1211
    def test_xunit_plugin_passtest(self):
1212
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1213
                           1, 0, 0, 0, 0)
1214 1215

    def test_xunit_plugin_failtest(self):
1216
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1217
                           1, 0, 0, 1, 0)
1218

1219
    def test_xunit_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1220
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1221
                           1, 0, 0, 0, 1)
1222

1223
    def test_xunit_plugin_errortest(self):
1224
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1225
                           1, 1, 0, 0, 0)
1226

1227 1228 1229 1230
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1231 1232 1233 1234 1235

class ParseJSONError(Exception):
    pass


1236
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1237

1238
    def setUp(self):
1239
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1240 1241
        super(PluginsJSONTest, self).setUp()

1242
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1243
                      e_nfailures, e_nskip, e_ncancel=0, external_runner=None):
1244
        os.chdir(basedir)
1245 1246
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off --json - '
                    '--archive %s' % (AVOCADO, self.tmpdir, testname))
1247 1248
        if external_runner is not None:
            cmd_line += " --external-runner '%s'" % external_runner
1249 1250 1251 1252 1253 1254 1255
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1256
        except Exception as detail:
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1274 1275
        n_cancel = json_data['cancel']
        self.assertEqual(n_cancel, e_ncancel)
1276
        return json_data
1277

1278
    def test_json_plugin_passtest(self):
1279
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1280
                           1, 0, 0, 0)
1281 1282

    def test_json_plugin_failtest(self):
1283
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1284
                           1, 0, 1, 0)
1285

1286
    def test_json_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1287
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1288
                           1, 0, 0, 0, 1)
1289

1290
    def test_json_plugin_errortest(self):
1291
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1292
                           1, 1, 0, 0)
1293

1294
    @unittest.skipIf(not GNU_ECHO_BINARY, 'echo binary not available')
1295
    def test_ugly_echo_cmd(self):
1296
        data = self.run_and_check('"-ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
1297
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
1298
                                  0, 0, external_runner=GNU_ECHO_BINARY)
1299 1300
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1301
                         '1--ne foo\\\\n\\\'\\"\\\\nbar/baz')
1302 1303
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1304
                         '1--ne foo\\\\n\\\'\\"\\\\nbar_baz')
1305

1306 1307 1308 1309
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1310

1311 1312
if __name__ == '__main__':
    unittest.main()