test_basic.py 58.3 KB
Newer Older
1
# This Python file uses the following encoding: utf-8
2 3
import aexpect
import glob
4
import json
5
import os
6
import re
7
import shutil
8
import signal
9
import sys
10
import tempfile
11
import time
12
import xml.dom.minidom
13
import zipfile
14
import unittest
15
import psutil
16
import pkg_resources
17

18 19
from StringIO import StringIO

20 21
from lxml import etree
from six import iteritems
22
from six.moves import xrange as range
23

24
from avocado.core import exit_codes
25
from avocado.utils import astring
26 27
from avocado.utils import process
from avocado.utils import script
28
from avocado.utils import path as utils_path
29

30
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
31 32
basedir = os.path.abspath(basedir)

33 34
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")

35 36 37 38 39 40 41 42 43
LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

44 45 46 47 48 49
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
50 51
        # Please do NOT ever use this, it's for unittesting only.
        self._Test__status = 'not supported'
52 53 54 55 56

    def test(self):
        pass
'''

57 58 59 60 61 62 63 64 65 66 67
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

68

69 70 71 72 73 74 75 76 77 78 79 80
VALID_PYTHON_TEST_WITH_TAGS = '''
from avocado import Test

class MyTest(Test):
    def test(self):
         """
         :avocado: tags=BIG_TAG_NAME
         """
         pass
'''


81 82 83 84 85 86 87
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
88
         time.sleep(70)
89 90
'''

91

92 93 94 95 96 97 98 99 100 101 102
DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


103 104 105 106 107 108 109 110 111 112 113 114 115
RAISE_CUSTOM_PATH_EXCEPTION_CONTENT = '''import os
import sys

from avocado import Test

class SharedLibTest(Test):
    def test(self):
        sys.path.append(os.path.join(os.path.dirname(__file__), "shared_lib"))
        from mylib import CancelExc
        raise CancelExc("This should not crash on unpickling in runner")
'''


A
Amador Pahim 已提交
116
def probe_binary(binary):
117
    try:
A
Amador Pahim 已提交
118
        return utils_path.find_command(binary)
119
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
120 121
        return None

L
Lukáš Doktor 已提交
122

123
TRUE_CMD = probe_binary('true')
A
Amador Pahim 已提交
124
CC_BINARY = probe_binary('cc')
125

L
Lukáš Doktor 已提交
126
# On macOS, the default GNU core-utils installation (brew)
127 128 129 130 131
# installs the gnu utility versions with a g prefix. It still has the
# BSD versions of the core utilities installed on their expected paths
# but their behavior and flags are in most cases different.
GNU_ECHO_BINARY = probe_binary('echo')
if GNU_ECHO_BINARY is not None:
132 133 134 135
    if probe_binary('man') is not None:
        echo_manpage = process.run('man %s' % os.path.basename(GNU_ECHO_BINARY)).stdout
        if '-e' not in echo_manpage:
            GNU_ECHO_BINARY = probe_binary('gecho')
A
Amador Pahim 已提交
136 137
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
138 139


140 141 142 143 144 145 146 147
def html_capable():
    try:
        pkg_resources.require('avocado-framework-plugin-result-html')
        return True
    except pkg_resources.DistributionNotFound:
        return False


148 149
class RunnerOperationTest(unittest.TestCase):

150
    def setUp(self):
151
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
152
        os.chdir(basedir)
153

154
    def test_show_version(self):
155
        result = process.run('%s -v' % AVOCADO, ignore_status=True)
156
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
157 158 159
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
160

161 162 163 164 165 166 167 168 169 170 171 172 173 174
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
175
        for key, value in iteritems(mapping):
176 177 178 179 180 181 182
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

183
        cmd = '%s --config %s config --datadir' % (AVOCADO, config_file)
184 185 186 187 188 189 190 191 192 193
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

194
    def test_runner_all_ok(self):
195 196
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % (AVOCADO, self.tmpdir))
197
        process.run(cmd_line)
198 199 200 201 202
        # Also check whether jobdata contains correct mux_path
        variants = open(os.path.join(self.tmpdir, "latest", "jobdata",
                        "variants.json")).read()
        self.assertIn('["/run/*"]', variants, "mux_path stored in jobdata "
                      "does not contains [\"/run/*\"]\n%s" % variants)
203

204
    def test_runner_failfast(self):
205 206 207
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py --failfast on'
                    % (AVOCADO, self.tmpdir))
208 209 210 211 212 213 214
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
    def test_runner_ignore_missing_references_one_missing(self):
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py badtest.py --ignore-missing-references on'
                    % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn("Unable to resolve reference(s) 'badtest.py'", result.stderr)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 0 | SKIP 0', result.stdout)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_ignore_missing_references_all_missing(self):
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'badtest.py badtest2.py --ignore-missing-references on'
                    % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn("Unable to resolve reference(s) 'badtest.py', 'badtest2.py'",
                      result.stderr)
        self.assertEqual('', result.stdout)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
238
    @unittest.skipIf(not CC_BINARY,
239
                     "C compiler is required by the underlying datadir.py test")
240
    def test_datadir_alias(self):
241 242
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % (AVOCADO, self.tmpdir))
243 244 245 246
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
247 248
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % (AVOCADO, self.tmpdir))
249 250
        process.run(cmd_line)

A
Amador Pahim 已提交
251
    @unittest.skipIf(not CC_BINARY,
252
                     "C compiler is required by the underlying datadir.py test")
253
    def test_datadir_noalias(self):
254 255
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % (AVOCADO, self.tmpdir))
256 257
        process.run(cmd_line)

258
    def test_runner_noalias(self):
259 260
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % (AVOCADO, self.tmpdir))
261 262
        process.run(cmd_line)

263 264 265
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
266
            "def hello():\n    return 'Hello world'",
267 268 269 270 271 272
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        mytest.save()
273 274
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "%s" % (AVOCADO, self.tmpdir, mytest))
275 276
        process.run(cmd_line)

277 278 279 280
    def test_unsupported_status(self):
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
281 282 283
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s"
                              " --json -" % (AVOCADO, self.tmpdir, tst),
                              ignore_status=True)
284 285 286 287 288
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
289
            self.assertIn("Runner error occurred: Test reports unsupported",
290 291
                          results["tests"][0]["fail_reason"])

292 293 294
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
295 296 297 298 299
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
300
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
301
                              "--json - --job-timeout 1" % (AVOCADO, self.tmpdir, tst),
302
                              ignore_status=True)
303 304 305 306 307 308 309
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
310 311 312 313 314
            # Currently it should finish up to 1s after the job-timeout
            # but the prep and postprocess could take a bit longer on
            # some environments, so let's just check it does not take
            # > 60s, which is the deadline for force-finishing the test.
            self.assertLess(res.duration, 55, "Test execution took too long, "
315 316 317 318 319 320 321
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
322 323 324
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
                              "--json -" % (AVOCADO, self.tmpdir, tst),
                              ignore_status=True)
325 326 327 328 329 330 331 332
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

333
    def test_runner_tests_fail(self):
334 335
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s passtest.py '
                    'failtest.py passtest.py' % (AVOCADO, self.tmpdir))
336
        result = process.run(cmd_line, ignore_status=True)
337
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
338 339 340 341
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
342 343
        cmd_line = ('%s run --sysinfo=off --job-results-dir '
                    '%s bogustest' % (AVOCADO, self.tmpdir))
344
        result = process.run(cmd_line, ignore_status=True)
345 346
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
347 348 349 350 351
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

352
    def test_runner_doublefail(self):
353 354
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % (AVOCADO, self.tmpdir))
355 356
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
357 358
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
359 360 361 362
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
363
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
364
                      "Cleanup exception not printed to log output")
365
        self.assertIn("TestFail: This test is supposed to fail",
366
                      output,
367
                      "Test did not fail with action exception:\n%s" % output)
368

369
    def test_uncaught_exception(self):
370 371
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception.py" % (AVOCADO, self.tmpdir))
372
        result = process.run(cmd_line, ignore_status=True)
373
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
374 375 376 377 378
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

379
    def test_fail_on_exception(self):
380 381
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "--json - fail_on_exception.py" % (AVOCADO, self.tmpdir))
382
        result = process.run(cmd_line, ignore_status=True)
383
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
384 385 386 387 388
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
    def test_exception_not_in_path(self):
        os.mkdir(os.path.join(self.tmpdir, "shared_lib"))
        mylib = script.Script(os.path.join(self.tmpdir, "shared_lib",
                                           "mylib.py"),
                              "from avocado import TestCancel\n\n"
                              "class CancelExc(TestCancel):\n"
                              "    pass")
        mylib.save()
        mytest = script.Script(os.path.join(self.tmpdir, "mytest.py"),
                               RAISE_CUSTOM_PATH_EXCEPTION_CONTENT)
        mytest.save()
        result = process.run("%s --show test run --sysinfo=off "
                             "--job-results-dir %s %s"
                             % (AVOCADO, self.tmpdir, mytest))
        self.assertIn("mytest.py:SharedLibTest.test -> CancelExc: This "
                      "should not crash on unpickling in runner",
                      result.stdout)
        self.assertNotIn("Failed to read queue", result.stdout)

408
    def test_runner_timeout(self):
409 410
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % (AVOCADO, self.tmpdir))
411 412
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
413
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
414
        unexpected_rc = exit_codes.AVOCADO_FAIL
415 416 417 418
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
419
        self.assertIn("Runner error occurred: Timeout reached", output,
420
                      "Timeout reached message not found in the output:\n%s" % output)
421 422
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
423

424
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
425 426
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
427
    def test_runner_abort(self):
428 429
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % (AVOCADO, self.tmpdir))
430
        result = process.run(cmd_line, ignore_status=True)
431
        output = result.stdout
432
        excerpt = 'Test died without reporting the status.'
433 434
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
435 436 437 438
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
439
        self.assertIn(excerpt, output)
440

441
    def test_silent_output(self):
442 443
        cmd_line = ('%s --silent run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % (AVOCADO, self.tmpdir))
444
        result = process.run(cmd_line, ignore_status=True)
445
        expected_rc = exit_codes.AVOCADO_ALL_OK
446 447
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
448
        self.assertEqual(result.stdout, expected_output)
449

450
    def test_empty_args_list(self):
451
        cmd_line = AVOCADO
452
        result = process.run(cmd_line, ignore_status=True)
453
        expected_rc = exit_codes.AVOCADO_FAIL
454
        expected_output = 'error: too few arguments'
455
        self.assertEqual(result.exit_status, expected_rc)
456
        self.assertIn(expected_output, result.stderr)
457

458
    def test_empty_test_list(self):
459 460
        cmd_line = '%s run --sysinfo=off --job-results-dir %s' % (AVOCADO,
                                                                  self.tmpdir)
461
        result = process.run(cmd_line, ignore_status=True)
462
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
463 464
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
465
        self.assertEqual(result.exit_status, expected_rc)
466
        self.assertIn(expected_output, result.stderr)
467

468
    def test_not_found(self):
469 470
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s sbrubles'
                    % (AVOCADO, self.tmpdir))
471
        result = process.run(cmd_line, ignore_status=True)
472
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
473
        self.assertEqual(result.exit_status, expected_rc)
474 475
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
476

477
    def test_invalid_unique_id(self):
478 479
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s --force-job-id '
                    'foobar passtest.py' % (AVOCADO, self.tmpdir))
480
        result = process.run(cmd_line, ignore_status=True)
481
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
482
        self.assertIn('needs to be a 40 digit hex', result.stderr)
483
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
484 485

    def test_valid_unique_id(self):
486
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
487
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
488
                    'passtest.py' % (AVOCADO, self.tmpdir))
489
        result = process.run(cmd_line, ignore_status=True)
490
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
491
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
492
        self.assertIn('PASS', result.stdout)
493

494
    def test_automatic_unique_id(self):
495 496
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % (AVOCADO, self.tmpdir))
497
        result = process.run(cmd_line, ignore_status=True)
498
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
499 500 501 502
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

503 504 505 506
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
507 508
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % (AVOCADO, self.tmpdir))
509
        avocado_process = process.SubProcess(cmd_line)
510 511 512 513 514 515 516 517 518 519 520 521
        try:
            avocado_process.start()
            link = os.path.join(self.tmpdir, 'latest')
            for trial in range(0, 50):
                time.sleep(0.1)
                if os.path.exists(link) and os.path.islink(link):
                    avocado_process.wait()
                    break
            self.assertTrue(os.path.exists(link))
            self.assertTrue(os.path.islink(link))
        finally:
            avocado_process.wait()
522

523
    def test_dry_run(self):
524
        cmd = ("%s run --sysinfo=off passtest.py failtest.py "
525
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
526
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run" % AVOCADO)
527 528 529 530 531
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
532
        self.assertIn(tempfile.gettempdir(), debuglog)   # Use tmp dir, not default location
533 534
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
535
        self.assertEqual(result['cancel'], 4)
536
        for i in range(4):
537 538
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
539
                             u'Test cancelled due to --dry-run')
540 541 542 543 544
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
545
            self.assertEqual(log.count(line), 4)
546

547 548 549
    def test_invalid_python(self):
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
550 551
        cmd_line = ('%s --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (AVOCADO, self.tmpdir, test)
552 553 554 555 556
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
557 558
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
559

A
Amador Pahim 已提交
560
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
561 562 563
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
L
Lukáš Doktor 已提交
564
    def test_read(self):
565
        cmd = "%s run --sysinfo=off --job-results-dir %%s %%s" % AVOCADO
566
        cmd %= (self.tmpdir, READ_BINARY)
567
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
568 569 570 571 572
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

573 574 575
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

576

577 578 579
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
580
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
581
        os.chdir(basedir)
582 583

    def test_output_pass(self):
584 585
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % (AVOCADO, self.tmpdir))
586
        result = process.run(cmd_line, ignore_status=True)
587
        expected_rc = exit_codes.AVOCADO_ALL_OK
588 589 590 591 592 593
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
594 595
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % (AVOCADO, self.tmpdir))
596
        result = process.run(cmd_line, ignore_status=True)
597
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
598 599 600 601 602 603
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
604 605
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % (AVOCADO, self.tmpdir))
606
        result = process.run(cmd_line, ignore_status=True)
607
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
608 609 610 611 612
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

A
Amador Pahim 已提交
613
    def test_output_cancel(self):
614 615
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'cancelonsetup.py' % (AVOCADO, self.tmpdir))
616
        result = process.run(cmd_line, ignore_status=True)
617
        expected_rc = exit_codes.AVOCADO_ALL_OK
618 619 620
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
A
Amador Pahim 已提交
621 622
        self.assertIn('PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1',
                      result.stdout)
623

624 625
    @unittest.skipIf(not GNU_ECHO_BINARY,
                     'GNU style echo binary not available')
626
    def test_ugly_echo_cmd(self):
627
        cmd_line = ('%s run --external-runner "%s -ne" '
628
                    '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
629
                    ' --sysinfo=off  --show-job-log' %
630
                    (AVOCADO, GNU_ECHO_BINARY, self.tmpdir))
631 632 633 634 635
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
636 637 638
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
639 640
        self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
641 642 643 644 645 646 647
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
648
                         "1-foo__n_'____nbar_baz")
649

650
    def test_replay_skip_skipped(self):
651 652
        cmd = ("%s run --job-results-dir %s --json - "
               "cancelonsetup.py" % (AVOCADO, self.tmpdir))
653
        result = process.run(cmd)
654
        result = json.loads(result.stdout)
655
        jobid = str(result["job_id"])
656 657
        cmd = ("%s run --job-results-dir %s --replay %s "
               "--replay-test-status PASS" % (AVOCADO, self.tmpdir, jobid))
658
        process.run(cmd)
659

660 661 662
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

663

664
class RunnerSimpleTest(unittest.TestCase):
665 666

    def setUp(self):
667
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
668
        self.pass_script = script.TemporaryScript(
669
            'ʊʋʉʈɑ ʅʛʌ',
670
            "#!/bin/sh\ntrue",
671
            'avocado_simpletest_functional')
672
        self.pass_script.save()
L
Lukáš Doktor 已提交
673
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
674
                                                  "#!/bin/sh\nfalse",
L
Lukáš Doktor 已提交
675 676
                                                  'avocado_simpletest_'
                                                  'functional')
677
        self.fail_script.save()
678
        os.chdir(basedir)
679

680
    def test_simpletest_pass(self):
681 682
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' "%s"' % (AVOCADO, self.tmpdir, self.pass_script.path))
683
        result = process.run(cmd_line, ignore_status=True)
684
        expected_rc = exit_codes.AVOCADO_ALL_OK
685 686 687 688
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

689
    def test_simpletest_fail(self):
690 691
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (AVOCADO, self.tmpdir, self.fail_script.path))
692
        result = process.run(cmd_line, ignore_status=True)
693
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
694 695 696 697
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

698
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
699 700
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
701 702
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
703
        We can be pretty sure that a failtest should return immediately. Let's
704
        run 100 of them and assure they not take more than 30 seconds to run.
705

706 707
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
708
        """
709
        one_hundred = 'failtest.py ' * 100
710 711
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
                    % (AVOCADO, self.tmpdir, one_hundred))
712 713 714
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
715
        self.assertLess(actual_time, 30.0)
716
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
717 718 719
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

720 721 722
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
723 724 725 726 727
    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
728 729
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
730 731
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
                    % (AVOCADO, self.tmpdir, sleep_fail_sleep))
732 733 734
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
735
        self.assertLess(actual_time, 33.0)
736
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
737 738 739
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

740 741 742 743
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
744 745 746 747 748
        # simplewarning.sh calls "avocado" without specifying a path
        os.environ['PATH'] += ":" + os.path.join(basedir, 'scripts')
        # simplewarning.sh calls "avocado exec-path" which hasn't
        # access to an installed location for the libexec scripts
        os.environ['PATH'] += ":" + os.path.join(basedir, 'libexec')
749 750 751
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log'
                    % (AVOCADO, self.tmpdir))
752
        result = process.run(cmd_line, ignore_status=True)
753 754 755 756
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
757 758
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
759
        self.assertIn('WARN | Warning message (should cause this test to '
760
                      'finish with warning)', result.stdout, result)
761
        self.assertIn('ERROR| Error message (ordinary message not changing '
762
                      'the results)', result.stdout, result)
763

764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
    @unittest.skipIf(not GNU_ECHO_BINARY, "Uses echo as test")
    def test_fs_unfriendly_run(self):
        os.chdir(basedir)
        commands_path = os.path.join(self.tmpdir, "commands")
        script.make_script(commands_path, "echo '\"\\/|?*<>'")
        config_path = os.path.join(self.tmpdir, "config.conf")
        script.make_script(config_path,
                           "[sysinfo.collectibles]\ncommands = %s"
                           % commands_path)
        cmd_line = ("%s --show all --config %s run --job-results-dir %s "
                    "--sysinfo=on --external-runner %s -- \"'\\\"\\/|?*<>'\""
                    % (AVOCADO, config_path, self.tmpdir, GNU_ECHO_BINARY))
        result = process.run(cmd_line)
        self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "latest",
                                                    "test-results",
                                                    "1-\'________\'/")))
        self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "latest",
                                                    "sysinfo", "pre",
                                                    "echo \'________\'")))

        if html_capable():
            with open(os.path.join(self.tmpdir, "latest",
                                   "results.html")) as html_res:
                html_results = html_res.read()
            # test results should replace odd chars with "_"
            self.assertIn(os.path.join("test-results", "1-'________'"),
                          html_results)
            # sysinfo replaces "_" with " "
            self.assertIn("echo '________'", html_results)

794 795 796 797
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        os.chdir(test_base_dir)
798
        test_file_name = os.path.basename(self.pass_script.path)
799
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
800
                    ' "%s"' % (avocado_path, self.tmpdir, test_file_name))
801 802 803 804 805 806
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
807
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
808 809 810
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
811
    def test_kill_stopped_sleep(self):
812 813 814 815
        proc = aexpect.Expect("%s run 60 --job-results-dir %s "
                              "--external-runner %s --sysinfo=off "
                              "--job-timeout 3"
                              % (AVOCADO, self.tmpdir, SLEEP_BINARY))
816 817
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
818 819 820 821
        # We need pid of the avocado process, not the shell executing it
        avocado_shell = psutil.Process(proc.get_pid())
        avocado_proc = avocado_shell.children()[0]
        pid = avocado_proc.pid
822
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
823
        deadline = time.time() + 9
824 825 826
        while time.time() < deadline:
            if not proc.is_alive():
                break
827
            time.sleep(0.1)
828 829
        else:
            proc.kill(signal.SIGKILL)
830
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
831 832 833 834 835 836 837
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
838
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
839
                         "1.")
840 841

        sleep_dir = astring.string_to_safe_path("1-60")
842
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
843
                                 sleep_dir, "debug.log")
844
        debug_log = open(debug_log).read()
845 846 847 848 849 850 851
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
852

853
    def tearDown(self):
854 855
        self.pass_script.remove()
        self.fail_script.remove()
856
        shutil.rmtree(self.tmpdir)
857 858


859
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
860 861

    def setUp(self):
862
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
863 864
        self.pass_script = script.TemporaryScript(
            'pass',
865
            "exit 0",
866
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
867 868 869
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
870
            "exit 1",
871
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
872
        self.fail_script.save()
873
        os.chdir(basedir)
C
Cleber Rosa 已提交
874

875
    def test_externalrunner_pass(self):
876 877 878
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh %s'
                    % (AVOCADO, self.tmpdir, self.pass_script.path))
C
Cleber Rosa 已提交
879
        result = process.run(cmd_line, ignore_status=True)
880
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
881 882 883 884
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

885
    def test_externalrunner_fail(self):
886 887 888
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh %s'
                    % (AVOCADO, self.tmpdir, self.fail_script.path))
C
Cleber Rosa 已提交
889
        result = process.run(cmd_line, ignore_status=True)
890
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
891 892 893 894
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

895
    def test_externalrunner_chdir_no_testdir(self):
896 897 898
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh --external-runner-chdir=test %s'
                    % (AVOCADO, self.tmpdir, self.pass_script.path))
C
Cleber Rosa 已提交
899
        result = process.run(cmd_line, ignore_status=True)
900 901
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
902
        self.assertIn(expected_output, result.stderr)
903
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
904 905 906 907 908
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
909 910
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=%s' % (AVOCADO, self.tmpdir, TRUE_CMD))
911
        result = process.run(cmd_line, ignore_status=True)
912 913
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
914 915
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
916 917 918 919 920 921 922 923 924 925
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


926
class AbsPluginsTest(object):
927

928
    def setUp(self):
929
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
930
        os.chdir(basedir)
931

932 933 934 935 936 937
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

938
    def test_sysinfo_plugin(self):
939
        cmd_line = '%s sysinfo %s' % (AVOCADO, self.base_outputdir)
940
        result = process.run(cmd_line, ignore_status=True)
941
        expected_rc = exit_codes.AVOCADO_ALL_OK
942 943 944 945 946 947
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

948
    def test_list_plugin(self):
949
        cmd_line = '%s list' % AVOCADO
950 951
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
952
        expected_rc = exit_codes.AVOCADO_ALL_OK
953 954 955 956 957
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

958
    def test_list_error_output(self):
959
        cmd_line = '%s list sbrubles' % AVOCADO
960 961
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
962
        expected_rc = exit_codes.AVOCADO_FAIL
963 964 965
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
966
        self.assertIn("Unable to resolve reference", output)
967

968 969 970 971 972 973 974
    def test_list_no_file_loader(self):
        cmd_line = ("%s list --loaders external --verbose -- "
                    "this-wont-be-matched" % AVOCADO)
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
                         "Avocado did not return rc %d:\n%s"
                         % (exit_codes.AVOCADO_ALL_OK, result))
975 976
        exp = ("Type    Test                 Tag(s)\n"
               "MISSING this-wont-be-matched \n\n"
977 978
               "TEST TYPES SUMMARY\n"
               "==================\n"
979
               "EXTERNAL: 0\n"
980 981 982 983
               "MISSING: 1\n")
        self.assertEqual(exp, result.stdout, "Stdout mismatch:\n%s\n\n%s"
                         % (exp, result))

984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
    def test_list_verbose_tags(self):
        """
        Runs list verbosely and check for tag related output
        """
        test = script.make_script(os.path.join(self.base_outputdir, 'test.py'),
                                  VALID_PYTHON_TEST_WITH_TAGS)
        cmd_line = ("%s list --loaders file --verbose %s" % (AVOCADO,
                                                             test))
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
                         "Avocado did not return rc %d:\n%s"
                         % (exit_codes.AVOCADO_ALL_OK, result))
        stdout_lines = result.stdout.splitlines()
        self.assertIn("Tag(s)", stdout_lines[0])
        full_test_name = "%s:MyTest.test" % test
999 1000
        self.assertEqual("INSTRUMENTED %s BIG_TAG_NAME" % full_test_name,
                         stdout_lines[1])
1001 1002 1003
        self.assertIn("TEST TYPES SUMMARY", stdout_lines)
        self.assertIn("INSTRUMENTED: 1", stdout_lines)
        self.assertIn("TEST TAGS SUMMARY", stdout_lines)
1004
        self.assertEqual("BIG_TAG_NAME: 1", stdout_lines[-1])
1005

1006
    def test_plugin_list(self):
1007
        cmd_line = '%s plugins' % AVOCADO
1008 1009
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
1010
        expected_rc = exit_codes.AVOCADO_ALL_OK
1011 1012 1013
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1014 1015
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
1016

1017
    def test_config_plugin(self):
1018
        cmd_line = '%s config --paginator off' % AVOCADO
1019 1020
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
1021
        expected_rc = exit_codes.AVOCADO_ALL_OK
1022 1023 1024 1025 1026 1027
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
1028
        cmd_line = '%s config --datadir --paginator off' % AVOCADO
1029 1030
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
1031
        expected_rc = exit_codes.AVOCADO_ALL_OK
1032 1033 1034 1035 1036
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

1037
    def test_disable_plugin(self):
1038
        cmd_line = '%s plugins' % AVOCADO
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
1050
            cmd_line = '%s --config %s plugins' % (AVOCADO, config)
1051 1052 1053 1054 1055 1056 1057
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
1070
            cmd = ('%s --config %s run passtest.py --archive '
1071
                   '--job-results-dir %s --sysinfo=off'
1072
                   % (AVOCADO, config_path, self.base_outputdir))
1073 1074 1075 1076 1077 1078 1079 1080
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
1081
        if html_capable():
1082
            result_plugins.append("html")
1083
            result_outputs.append("results.html")
1084

1085
        cmd_line = '%s plugins' % AVOCADO
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
1108 1109
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

1121
    def test_Namespace_object_has_no_attribute(self):
1122
        cmd_line = '%s plugins' % AVOCADO
1123 1124
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
1125
        expected_rc = exit_codes.AVOCADO_ALL_OK
1126 1127 1128 1129 1130
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

1131

1132 1133 1134 1135
class ParseXMLError(Exception):
    pass


1136
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1137

1138
    def setUp(self):
1139
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
L
Lucas Meneghel Rodrigues 已提交
1140 1141 1142
        junit_xsd = os.path.join(os.path.dirname(__file__),
                                 os.path.pardir, ".data", 'junit-4.xsd')
        self.junit = os.path.abspath(junit_xsd)
1143 1144
        super(PluginsXunitTest, self).setUp()

1145
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1146
                      e_nnotfound, e_nfailures, e_nskip):
1147 1148
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (AVOCADO, self.tmpdir, testname))
1149 1150 1151 1152 1153 1154 1155
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1156
        except Exception as detail:
1157 1158 1159
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

1160 1161 1162 1163 1164 1165 1166 1167
        with open(self.junit, 'r') as f:
            xmlschema = etree.XMLSchema(etree.parse(f))

        self.assertTrue(xmlschema.validate(etree.parse(StringIO(xml_output))),
                        "Failed to validate against %s, message:\n%s" %
                        (self.junit,
                         xmlschema.error_log.filter_from_errors()))

1168 1169 1170 1171
        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1172 1173
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1191
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1192 1193 1194 1195
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1196
    def test_xunit_plugin_passtest(self):
1197
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1198
                           1, 0, 0, 0, 0)
1199 1200

    def test_xunit_plugin_failtest(self):
1201
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1202
                           1, 0, 0, 1, 0)
1203

1204
    def test_xunit_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1205
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1206
                           1, 0, 0, 0, 1)
1207

1208
    def test_xunit_plugin_errortest(self):
1209
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1210
                           1, 1, 0, 0, 0)
1211

1212 1213 1214 1215
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1216 1217 1218 1219 1220

class ParseJSONError(Exception):
    pass


1221
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1222

1223
    def setUp(self):
1224
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1225 1226
        super(PluginsJSONTest, self).setUp()

1227
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1228
                      e_nfailures, e_nskip, e_ncancel=0, external_runner=None):
1229 1230
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off --json - '
                    '--archive %s' % (AVOCADO, self.tmpdir, testname))
1231 1232
        if external_runner is not None:
            cmd_line += " --external-runner '%s'" % external_runner
1233 1234 1235 1236 1237 1238 1239
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1240
        except Exception as detail:
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1258 1259
        n_cancel = json_data['cancel']
        self.assertEqual(n_cancel, e_ncancel)
1260
        return json_data
1261

1262
    def test_json_plugin_passtest(self):
1263
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1264
                           1, 0, 0, 0)
1265 1266

    def test_json_plugin_failtest(self):
1267
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1268
                           1, 0, 1, 0)
1269

1270
    def test_json_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1271
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1272
                           1, 0, 0, 0, 1)
1273

1274
    def test_json_plugin_errortest(self):
1275
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1276
                           1, 1, 0, 0)
1277

1278
    @unittest.skipIf(not GNU_ECHO_BINARY, 'echo binary not available')
1279
    def test_ugly_echo_cmd(self):
1280
        data = self.run_and_check('"-ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
1281
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
1282
                                  0, 0, external_runner=GNU_ECHO_BINARY)
1283 1284
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1285
                         '1--ne foo\\\\n\\\'\\"\\\\nbar/baz')
1286 1287
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1288
                         "1--ne foo__n_'____nbar_baz")
1289

1290 1291 1292 1293
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1294

1295 1296
if __name__ == '__main__':
    unittest.main()