test_basic.py 58.2 KB
Newer Older
1
# This Python file uses the following encoding: utf-8
2 3
import aexpect
import glob
4
import json
5
import os
6
import re
7
import shutil
8
import signal
9
import sys
10
import tempfile
11
import time
12
import xml.dom.minidom
13
import zipfile
14
import unittest
15
import psutil
16
import pkg_resources
17

18 19
from StringIO import StringIO

20 21
from lxml import etree
from six import iteritems
22
from six.moves import xrange as range
23

24
from avocado.core import exit_codes
25
from avocado.utils import astring
26 27
from avocado.utils import process
from avocado.utils import script
28
from avocado.utils import path as utils_path
29

30
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
31 32
basedir = os.path.abspath(basedir)

33 34
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")

35 36 37 38 39 40 41 42 43
LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

44 45 46 47 48 49
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
50 51
        # Please do NOT ever use this, it's for unittesting only.
        self._Test__status = 'not supported'
52 53 54 55 56

    def test(self):
        pass
'''

57 58 59 60 61 62 63 64 65 66 67
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

68

69 70 71 72 73 74 75 76 77 78 79 80
VALID_PYTHON_TEST_WITH_TAGS = '''
from avocado import Test

class MyTest(Test):
    def test(self):
         """
         :avocado: tags=BIG_TAG_NAME
         """
         pass
'''


81 82 83 84 85 86 87
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
88
         time.sleep(70)
89 90
'''

91

92 93 94 95 96 97 98 99 100 101 102
DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


103 104 105 106 107 108 109 110 111 112 113 114 115
RAISE_CUSTOM_PATH_EXCEPTION_CONTENT = '''import os
import sys

from avocado import Test

class SharedLibTest(Test):
    def test(self):
        sys.path.append(os.path.join(os.path.dirname(__file__), "shared_lib"))
        from mylib import CancelExc
        raise CancelExc("This should not crash on unpickling in runner")
'''


A
Amador Pahim 已提交
116
def probe_binary(binary):
117
    try:
A
Amador Pahim 已提交
118
        return utils_path.find_command(binary)
119
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
120 121
        return None

L
Lukáš Doktor 已提交
122

123
TRUE_CMD = probe_binary('true')
A
Amador Pahim 已提交
124
CC_BINARY = probe_binary('cc')
125

L
Lukáš Doktor 已提交
126
# On macOS, the default GNU core-utils installation (brew)
127 128 129 130 131
# installs the gnu utility versions with a g prefix. It still has the
# BSD versions of the core utilities installed on their expected paths
# but their behavior and flags are in most cases different.
GNU_ECHO_BINARY = probe_binary('echo')
if GNU_ECHO_BINARY is not None:
132 133 134 135
    if probe_binary('man') is not None:
        echo_manpage = process.run('man %s' % os.path.basename(GNU_ECHO_BINARY)).stdout
        if '-e' not in echo_manpage:
            GNU_ECHO_BINARY = probe_binary('gecho')
A
Amador Pahim 已提交
136 137
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
138 139


140 141 142 143 144 145 146 147
def html_capable():
    try:
        pkg_resources.require('avocado-framework-plugin-result-html')
        return True
    except pkg_resources.DistributionNotFound:
        return False


148 149
class RunnerOperationTest(unittest.TestCase):

150
    def setUp(self):
151
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
152
        os.chdir(basedir)
153

154
    def test_show_version(self):
155
        result = process.run('%s -v' % AVOCADO, ignore_status=True)
156
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
157 158 159
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
160

161 162 163 164 165 166 167 168 169 170 171 172 173 174
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
175
        for key, value in iteritems(mapping):
176 177 178 179 180 181 182
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

183
        cmd = '%s --config %s config --datadir' % (AVOCADO, config_file)
184 185 186 187 188 189 190 191 192 193
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

194
    def test_runner_all_ok(self):
195 196
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % (AVOCADO, self.tmpdir))
197
        process.run(cmd_line)
198 199 200 201 202
        # Also check whether jobdata contains correct mux_path
        variants = open(os.path.join(self.tmpdir, "latest", "jobdata",
                        "variants.json")).read()
        self.assertIn('["/run/*"]', variants, "mux_path stored in jobdata "
                      "does not contains [\"/run/*\"]\n%s" % variants)
203

204
    def test_runner_failfast(self):
205 206 207
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py --failfast on'
                    % (AVOCADO, self.tmpdir))
208 209 210 211 212 213 214
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
    def test_runner_ignore_missing_references_one_missing(self):
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py badtest.py --ignore-missing-references on'
                    % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn("Unable to resolve reference(s) 'badtest.py'", result.stderr)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 0 | SKIP 0', result.stdout)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_ignore_missing_references_all_missing(self):
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'badtest.py badtest2.py --ignore-missing-references on'
                    % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn("Unable to resolve reference(s) 'badtest.py', 'badtest2.py'",
                      result.stderr)
        self.assertEqual('', result.stdout)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
238
    @unittest.skipIf(not CC_BINARY,
239
                     "C compiler is required by the underlying datadir.py test")
240
    def test_datadir_alias(self):
241 242
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % (AVOCADO, self.tmpdir))
243 244 245 246
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
247 248
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % (AVOCADO, self.tmpdir))
249 250
        process.run(cmd_line)

A
Amador Pahim 已提交
251
    @unittest.skipIf(not CC_BINARY,
252
                     "C compiler is required by the underlying datadir.py test")
253
    def test_datadir_noalias(self):
254 255
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % (AVOCADO, self.tmpdir))
256 257
        process.run(cmd_line)

258
    def test_runner_noalias(self):
259 260
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % (AVOCADO, self.tmpdir))
261 262
        process.run(cmd_line)

263 264 265
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
266
            "def hello():\n    return 'Hello world'",
267 268 269 270 271 272
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        mytest.save()
273 274
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "%s" % (AVOCADO, self.tmpdir, mytest))
275 276
        process.run(cmd_line)

277 278 279 280
    def test_unsupported_status(self):
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
281 282 283
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s"
                              " --json -" % (AVOCADO, self.tmpdir, tst),
                              ignore_status=True)
284 285 286 287 288
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
289
            self.assertIn("Runner error occurred: Test reports unsupported",
290 291
                          results["tests"][0]["fail_reason"])

292 293 294
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
295 296 297 298 299
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
300
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
301
                              "--json - --job-timeout 1" % (AVOCADO, self.tmpdir, tst),
302
                              ignore_status=True)
303 304 305 306 307 308 309
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
310 311 312 313 314
            # Currently it should finish up to 1s after the job-timeout
            # but the prep and postprocess could take a bit longer on
            # some environments, so let's just check it does not take
            # > 60s, which is the deadline for force-finishing the test.
            self.assertLess(res.duration, 55, "Test execution took too long, "
315 316 317 318 319 320 321
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
322 323 324
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
                              "--json -" % (AVOCADO, self.tmpdir, tst),
                              ignore_status=True)
325 326 327 328 329 330 331 332
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

333
    def test_runner_tests_fail(self):
334 335
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s passtest.py '
                    'failtest.py passtest.py' % (AVOCADO, self.tmpdir))
336
        result = process.run(cmd_line, ignore_status=True)
337
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
338 339 340 341
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
342 343
        cmd_line = ('%s run --sysinfo=off --job-results-dir '
                    '%s bogustest' % (AVOCADO, self.tmpdir))
344
        result = process.run(cmd_line, ignore_status=True)
345 346
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
347 348 349 350 351
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

352
    def test_runner_doublefail(self):
353 354
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % (AVOCADO, self.tmpdir))
355 356
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
357 358
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
359 360 361 362
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
363
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
364
                      "Cleanup exception not printed to log output")
365
        self.assertIn("TestFail: This test is supposed to fail",
366
                      output,
367
                      "Test did not fail with action exception:\n%s" % output)
368

369
    def test_uncaught_exception(self):
370 371
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception.py" % (AVOCADO, self.tmpdir))
372
        result = process.run(cmd_line, ignore_status=True)
373
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
374 375 376 377 378
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

379
    def test_fail_on_exception(self):
380 381
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "--json - fail_on_exception.py" % (AVOCADO, self.tmpdir))
382
        result = process.run(cmd_line, ignore_status=True)
383
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
384 385 386 387 388
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
    def test_exception_not_in_path(self):
        os.mkdir(os.path.join(self.tmpdir, "shared_lib"))
        mylib = script.Script(os.path.join(self.tmpdir, "shared_lib",
                                           "mylib.py"),
                              "from avocado import TestCancel\n\n"
                              "class CancelExc(TestCancel):\n"
                              "    pass")
        mylib.save()
        mytest = script.Script(os.path.join(self.tmpdir, "mytest.py"),
                               RAISE_CUSTOM_PATH_EXCEPTION_CONTENT)
        mytest.save()
        result = process.run("%s --show test run --sysinfo=off "
                             "--job-results-dir %s %s"
                             % (AVOCADO, self.tmpdir, mytest))
        self.assertIn("mytest.py:SharedLibTest.test -> CancelExc: This "
                      "should not crash on unpickling in runner",
                      result.stdout)
        self.assertNotIn("Failed to read queue", result.stdout)

408
    def test_runner_timeout(self):
409 410
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % (AVOCADO, self.tmpdir))
411 412
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
413
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
414
        unexpected_rc = exit_codes.AVOCADO_FAIL
415 416 417 418
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
419
        self.assertIn("Runner error occurred: Timeout reached", output,
420
                      "Timeout reached message not found in the output:\n%s" % output)
421 422
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
423

424
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
425 426
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
427
    def test_runner_abort(self):
428 429
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % (AVOCADO, self.tmpdir))
430
        result = process.run(cmd_line, ignore_status=True)
431
        output = result.stdout
432
        excerpt = 'Test died without reporting the status.'
433 434
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
435 436 437 438
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
439
        self.assertIn(excerpt, output)
440

441
    def test_silent_output(self):
442 443
        cmd_line = ('%s --silent run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % (AVOCADO, self.tmpdir))
444
        result = process.run(cmd_line, ignore_status=True)
445
        expected_rc = exit_codes.AVOCADO_ALL_OK
446 447
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
448
        self.assertEqual(result.stdout, expected_output)
449

450
    def test_empty_args_list(self):
451
        cmd_line = AVOCADO
452
        result = process.run(cmd_line, ignore_status=True)
453
        expected_rc = exit_codes.AVOCADO_FAIL
454
        expected_output = 'error: too few arguments'
455
        self.assertEqual(result.exit_status, expected_rc)
456
        self.assertIn(expected_output, result.stderr)
457

458
    def test_empty_test_list(self):
459 460
        cmd_line = '%s run --sysinfo=off --job-results-dir %s' % (AVOCADO,
                                                                  self.tmpdir)
461
        result = process.run(cmd_line, ignore_status=True)
462
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
463 464
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
465
        self.assertEqual(result.exit_status, expected_rc)
466
        self.assertIn(expected_output, result.stderr)
467

468
    def test_not_found(self):
469 470
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s sbrubles'
                    % (AVOCADO, self.tmpdir))
471
        result = process.run(cmd_line, ignore_status=True)
472
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
473
        self.assertEqual(result.exit_status, expected_rc)
474 475
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
476

477
    def test_invalid_unique_id(self):
478 479
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s --force-job-id '
                    'foobar passtest.py' % (AVOCADO, self.tmpdir))
480
        result = process.run(cmd_line, ignore_status=True)
481
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
482
        self.assertIn('needs to be a 40 digit hex', result.stderr)
483
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
484 485

    def test_valid_unique_id(self):
486
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
487
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
488
                    'passtest.py' % (AVOCADO, self.tmpdir))
489
        result = process.run(cmd_line, ignore_status=True)
490
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
491
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
492
        self.assertIn('PASS', result.stdout)
493

494
    def test_automatic_unique_id(self):
495 496
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % (AVOCADO, self.tmpdir))
497
        result = process.run(cmd_line, ignore_status=True)
498
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
499 500 501 502
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

503 504 505 506
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
507 508
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % (AVOCADO, self.tmpdir))
509 510 511
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
512
        for trial in range(0, 50):
513 514
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
515
                avocado_process.wait()
516 517 518 519
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

520
    def test_dry_run(self):
521
        cmd = ("%s run --sysinfo=off passtest.py failtest.py "
522
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
523
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run" % AVOCADO)
524 525 526 527 528
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
529
        self.assertIn(tempfile.gettempdir(), debuglog)   # Use tmp dir, not default location
530 531
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
532
        self.assertEqual(result['cancel'], 4)
533
        for i in range(4):
534 535
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
536
                             u'Test cancelled due to --dry-run')
537 538 539 540 541
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
542
            self.assertEqual(log.count(line), 4)
543

544 545 546
    def test_invalid_python(self):
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
547 548
        cmd_line = ('%s --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (AVOCADO, self.tmpdir, test)
549 550 551 552 553
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
554 555
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
556

A
Amador Pahim 已提交
557
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
558 559 560
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
L
Lukáš Doktor 已提交
561
    def test_read(self):
562
        cmd = "%s run --sysinfo=off --job-results-dir %%s %%s" % AVOCADO
563
        cmd %= (self.tmpdir, READ_BINARY)
564
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
565 566 567 568 569
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

570 571 572
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

573

574 575 576
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
577
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
578
        os.chdir(basedir)
579 580

    def test_output_pass(self):
581 582
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % (AVOCADO, self.tmpdir))
583
        result = process.run(cmd_line, ignore_status=True)
584
        expected_rc = exit_codes.AVOCADO_ALL_OK
585 586 587 588 589 590
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
591 592
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % (AVOCADO, self.tmpdir))
593
        result = process.run(cmd_line, ignore_status=True)
594
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
595 596 597 598 599 600
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
601 602
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % (AVOCADO, self.tmpdir))
603
        result = process.run(cmd_line, ignore_status=True)
604
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
605 606 607 608 609
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

A
Amador Pahim 已提交
610
    def test_output_cancel(self):
611 612
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'cancelonsetup.py' % (AVOCADO, self.tmpdir))
613
        result = process.run(cmd_line, ignore_status=True)
614
        expected_rc = exit_codes.AVOCADO_ALL_OK
615 616 617
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
A
Amador Pahim 已提交
618 619
        self.assertIn('PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1',
                      result.stdout)
620

621 622
    @unittest.skipIf(not GNU_ECHO_BINARY,
                     'GNU style echo binary not available')
623
    def test_ugly_echo_cmd(self):
624
        cmd_line = ('%s run --external-runner "%s -ne" '
625
                    '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
626
                    ' --sysinfo=off  --show-job-log' %
627
                    (AVOCADO, GNU_ECHO_BINARY, self.tmpdir))
628 629 630 631 632
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
633 634 635
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
636 637
        self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
638 639 640 641 642 643 644
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
645
                         "1-foo__n_'____nbar_baz")
646

647
    def test_replay_skip_skipped(self):
648 649
        cmd = ("%s run --job-results-dir %s --json - "
               "cancelonsetup.py" % (AVOCADO, self.tmpdir))
650
        result = process.run(cmd)
651
        result = json.loads(result.stdout)
652
        jobid = str(result["job_id"])
653 654
        cmd = ("%s run --job-results-dir %s --replay %s "
               "--replay-test-status PASS" % (AVOCADO, self.tmpdir, jobid))
655
        process.run(cmd)
656

657 658 659
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

660

661
class RunnerSimpleTest(unittest.TestCase):
662 663

    def setUp(self):
664
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
665
        self.pass_script = script.TemporaryScript(
666
            'ʊʋʉʈɑ ʅʛʌ',
667
            "#!/bin/sh\ntrue",
668
            'avocado_simpletest_functional')
669
        self.pass_script.save()
L
Lukáš Doktor 已提交
670
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
671
                                                  "#!/bin/sh\nfalse",
L
Lukáš Doktor 已提交
672 673
                                                  'avocado_simpletest_'
                                                  'functional')
674
        self.fail_script.save()
675
        os.chdir(basedir)
676

677
    def test_simpletest_pass(self):
678 679
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' "%s"' % (AVOCADO, self.tmpdir, self.pass_script.path))
680
        result = process.run(cmd_line, ignore_status=True)
681
        expected_rc = exit_codes.AVOCADO_ALL_OK
682 683 684 685
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

686
    def test_simpletest_fail(self):
687 688
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (AVOCADO, self.tmpdir, self.fail_script.path))
689
        result = process.run(cmd_line, ignore_status=True)
690
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
691 692 693 694
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

695
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
696 697
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
698 699
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
700
        We can be pretty sure that a failtest should return immediately. Let's
701
        run 100 of them and assure they not take more than 30 seconds to run.
702

703 704
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
705
        """
706
        one_hundred = 'failtest.py ' * 100
707 708
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
                    % (AVOCADO, self.tmpdir, one_hundred))
709 710 711
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
712
        self.assertLess(actual_time, 30.0)
713
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
714 715 716
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

717 718 719
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
720 721 722 723 724
    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
725 726
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
727 728
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
                    % (AVOCADO, self.tmpdir, sleep_fail_sleep))
729 730 731
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
732
        self.assertLess(actual_time, 33.0)
733
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
734 735 736
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

737 738 739 740
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
741 742 743 744 745
        # simplewarning.sh calls "avocado" without specifying a path
        os.environ['PATH'] += ":" + os.path.join(basedir, 'scripts')
        # simplewarning.sh calls "avocado exec-path" which hasn't
        # access to an installed location for the libexec scripts
        os.environ['PATH'] += ":" + os.path.join(basedir, 'libexec')
746 747 748
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log'
                    % (AVOCADO, self.tmpdir))
749
        result = process.run(cmd_line, ignore_status=True)
750 751 752 753
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
754 755
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
756
        self.assertIn('WARN | Warning message (should cause this test to '
757
                      'finish with warning)', result.stdout, result)
758
        self.assertIn('ERROR| Error message (ordinary message not changing '
759
                      'the results)', result.stdout, result)
760

761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
    @unittest.skipIf(not GNU_ECHO_BINARY, "Uses echo as test")
    def test_fs_unfriendly_run(self):
        os.chdir(basedir)
        commands_path = os.path.join(self.tmpdir, "commands")
        script.make_script(commands_path, "echo '\"\\/|?*<>'")
        config_path = os.path.join(self.tmpdir, "config.conf")
        script.make_script(config_path,
                           "[sysinfo.collectibles]\ncommands = %s"
                           % commands_path)
        cmd_line = ("%s --show all --config %s run --job-results-dir %s "
                    "--sysinfo=on --external-runner %s -- \"'\\\"\\/|?*<>'\""
                    % (AVOCADO, config_path, self.tmpdir, GNU_ECHO_BINARY))
        result = process.run(cmd_line)
        self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "latest",
                                                    "test-results",
                                                    "1-\'________\'/")))
        self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "latest",
                                                    "sysinfo", "pre",
                                                    "echo \'________\'")))

        if html_capable():
            with open(os.path.join(self.tmpdir, "latest",
                                   "results.html")) as html_res:
                html_results = html_res.read()
            # test results should replace odd chars with "_"
            self.assertIn(os.path.join("test-results", "1-'________'"),
                          html_results)
            # sysinfo replaces "_" with " "
            self.assertIn("echo '________'", html_results)

791 792 793 794
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        os.chdir(test_base_dir)
795
        test_file_name = os.path.basename(self.pass_script.path)
796
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
797
                    ' "%s"' % (avocado_path, self.tmpdir, test_file_name))
798 799 800 801 802 803
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
804
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
805 806 807
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
808
    def test_kill_stopped_sleep(self):
809 810 811 812
        proc = aexpect.Expect("%s run 60 --job-results-dir %s "
                              "--external-runner %s --sysinfo=off "
                              "--job-timeout 3"
                              % (AVOCADO, self.tmpdir, SLEEP_BINARY))
813 814
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
815 816 817 818
        # We need pid of the avocado process, not the shell executing it
        avocado_shell = psutil.Process(proc.get_pid())
        avocado_proc = avocado_shell.children()[0]
        pid = avocado_proc.pid
819
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
820
        deadline = time.time() + 9
821 822 823
        while time.time() < deadline:
            if not proc.is_alive():
                break
824
            time.sleep(0.1)
825 826
        else:
            proc.kill(signal.SIGKILL)
827
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
828 829 830 831 832 833 834
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
835
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
836
                         "1.")
837 838

        sleep_dir = astring.string_to_safe_path("1-60")
839
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
840
                                 sleep_dir, "debug.log")
841
        debug_log = open(debug_log).read()
842 843 844 845 846 847 848
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
849

850
    def tearDown(self):
851 852
        self.pass_script.remove()
        self.fail_script.remove()
853
        shutil.rmtree(self.tmpdir)
854 855


856
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
857 858

    def setUp(self):
859
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
860 861
        self.pass_script = script.TemporaryScript(
            'pass',
862
            "exit 0",
863
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
864 865 866
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
867
            "exit 1",
868
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
869
        self.fail_script.save()
870
        os.chdir(basedir)
C
Cleber Rosa 已提交
871

872
    def test_externalrunner_pass(self):
873 874 875
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh %s'
                    % (AVOCADO, self.tmpdir, self.pass_script.path))
C
Cleber Rosa 已提交
876
        result = process.run(cmd_line, ignore_status=True)
877
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
878 879 880 881
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

882
    def test_externalrunner_fail(self):
883 884 885
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh %s'
                    % (AVOCADO, self.tmpdir, self.fail_script.path))
C
Cleber Rosa 已提交
886
        result = process.run(cmd_line, ignore_status=True)
887
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
888 889 890 891
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

892
    def test_externalrunner_chdir_no_testdir(self):
893 894 895
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh --external-runner-chdir=test %s'
                    % (AVOCADO, self.tmpdir, self.pass_script.path))
C
Cleber Rosa 已提交
896
        result = process.run(cmd_line, ignore_status=True)
897 898
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
899
        self.assertIn(expected_output, result.stderr)
900
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
901 902 903 904 905
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
906 907
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=%s' % (AVOCADO, self.tmpdir, TRUE_CMD))
908
        result = process.run(cmd_line, ignore_status=True)
909 910
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
911 912
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
913 914 915 916 917 918 919 920 921 922
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


923
class AbsPluginsTest(object):
924

925
    def setUp(self):
926
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
927
        os.chdir(basedir)
928

929 930 931 932 933 934
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

935
    def test_sysinfo_plugin(self):
936
        cmd_line = '%s sysinfo %s' % (AVOCADO, self.base_outputdir)
937
        result = process.run(cmd_line, ignore_status=True)
938
        expected_rc = exit_codes.AVOCADO_ALL_OK
939 940 941 942 943 944
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

945
    def test_list_plugin(self):
946
        cmd_line = '%s list' % AVOCADO
947 948
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
949
        expected_rc = exit_codes.AVOCADO_ALL_OK
950 951 952 953 954
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

955
    def test_list_error_output(self):
956
        cmd_line = '%s list sbrubles' % AVOCADO
957 958
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
959
        expected_rc = exit_codes.AVOCADO_FAIL
960 961 962
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
963
        self.assertIn("Unable to resolve reference", output)
964

965 966 967 968 969 970 971
    def test_list_no_file_loader(self):
        cmd_line = ("%s list --loaders external --verbose -- "
                    "this-wont-be-matched" % AVOCADO)
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
                         "Avocado did not return rc %d:\n%s"
                         % (exit_codes.AVOCADO_ALL_OK, result))
972 973
        exp = ("Type    Test                 Tag(s)\n"
               "MISSING this-wont-be-matched \n\n"
974 975
               "TEST TYPES SUMMARY\n"
               "==================\n"
976
               "EXTERNAL: 0\n"
977 978 979 980
               "MISSING: 1\n")
        self.assertEqual(exp, result.stdout, "Stdout mismatch:\n%s\n\n%s"
                         % (exp, result))

981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
    def test_list_verbose_tags(self):
        """
        Runs list verbosely and check for tag related output
        """
        test = script.make_script(os.path.join(self.base_outputdir, 'test.py'),
                                  VALID_PYTHON_TEST_WITH_TAGS)
        cmd_line = ("%s list --loaders file --verbose %s" % (AVOCADO,
                                                             test))
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
                         "Avocado did not return rc %d:\n%s"
                         % (exit_codes.AVOCADO_ALL_OK, result))
        stdout_lines = result.stdout.splitlines()
        self.assertIn("Tag(s)", stdout_lines[0])
        full_test_name = "%s:MyTest.test" % test
996 997
        self.assertEqual("INSTRUMENTED %s BIG_TAG_NAME" % full_test_name,
                         stdout_lines[1])
998 999 1000
        self.assertIn("TEST TYPES SUMMARY", stdout_lines)
        self.assertIn("INSTRUMENTED: 1", stdout_lines)
        self.assertIn("TEST TAGS SUMMARY", stdout_lines)
1001
        self.assertEqual("BIG_TAG_NAME: 1", stdout_lines[-1])
1002

1003
    def test_plugin_list(self):
1004
        cmd_line = '%s plugins' % AVOCADO
1005 1006
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
1007
        expected_rc = exit_codes.AVOCADO_ALL_OK
1008 1009 1010
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1011 1012
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
1013

1014
    def test_config_plugin(self):
1015
        cmd_line = '%s config --paginator off' % AVOCADO
1016 1017
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
1018
        expected_rc = exit_codes.AVOCADO_ALL_OK
1019 1020 1021 1022 1023 1024
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
1025
        cmd_line = '%s config --datadir --paginator off' % AVOCADO
1026 1027
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
1028
        expected_rc = exit_codes.AVOCADO_ALL_OK
1029 1030 1031 1032 1033
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

1034
    def test_disable_plugin(self):
1035
        cmd_line = '%s plugins' % AVOCADO
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
1047
            cmd_line = '%s --config %s plugins' % (AVOCADO, config)
1048 1049 1050 1051 1052 1053 1054
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
1067
            cmd = ('%s --config %s run passtest.py --archive '
1068
                   '--job-results-dir %s --sysinfo=off'
1069
                   % (AVOCADO, config_path, self.base_outputdir))
1070 1071 1072 1073 1074 1075 1076 1077
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
1078
        if html_capable():
1079
            result_plugins.append("html")
1080
            result_outputs.append("results.html")
1081

1082
        cmd_line = '%s plugins' % AVOCADO
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
1105 1106
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

1118
    def test_Namespace_object_has_no_attribute(self):
1119
        cmd_line = '%s plugins' % AVOCADO
1120 1121
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
1122
        expected_rc = exit_codes.AVOCADO_ALL_OK
1123 1124 1125 1126 1127
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

1128

1129 1130 1131 1132
class ParseXMLError(Exception):
    pass


1133
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1134

1135
    def setUp(self):
1136
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
L
Lucas Meneghel Rodrigues 已提交
1137 1138 1139
        junit_xsd = os.path.join(os.path.dirname(__file__),
                                 os.path.pardir, ".data", 'junit-4.xsd')
        self.junit = os.path.abspath(junit_xsd)
1140 1141
        super(PluginsXunitTest, self).setUp()

1142
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1143
                      e_nnotfound, e_nfailures, e_nskip):
1144 1145
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (AVOCADO, self.tmpdir, testname))
1146 1147 1148 1149 1150 1151 1152
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1153
        except Exception as detail:
1154 1155 1156
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

1157 1158 1159 1160 1161 1162 1163 1164
        with open(self.junit, 'r') as f:
            xmlschema = etree.XMLSchema(etree.parse(f))

        self.assertTrue(xmlschema.validate(etree.parse(StringIO(xml_output))),
                        "Failed to validate against %s, message:\n%s" %
                        (self.junit,
                         xmlschema.error_log.filter_from_errors()))

1165 1166 1167 1168
        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1169 1170
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1188
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1189 1190 1191 1192
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1193
    def test_xunit_plugin_passtest(self):
1194
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1195
                           1, 0, 0, 0, 0)
1196 1197

    def test_xunit_plugin_failtest(self):
1198
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1199
                           1, 0, 0, 1, 0)
1200

1201
    def test_xunit_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1202
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1203
                           1, 0, 0, 0, 1)
1204

1205
    def test_xunit_plugin_errortest(self):
1206
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1207
                           1, 1, 0, 0, 0)
1208

1209 1210 1211 1212
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1213 1214 1215 1216 1217

class ParseJSONError(Exception):
    pass


1218
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1219

1220
    def setUp(self):
1221
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1222 1223
        super(PluginsJSONTest, self).setUp()

1224
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1225
                      e_nfailures, e_nskip, e_ncancel=0, external_runner=None):
1226 1227
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off --json - '
                    '--archive %s' % (AVOCADO, self.tmpdir, testname))
1228 1229
        if external_runner is not None:
            cmd_line += " --external-runner '%s'" % external_runner
1230 1231 1232 1233 1234 1235 1236
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1237
        except Exception as detail:
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1255 1256
        n_cancel = json_data['cancel']
        self.assertEqual(n_cancel, e_ncancel)
1257
        return json_data
1258

1259
    def test_json_plugin_passtest(self):
1260
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1261
                           1, 0, 0, 0)
1262 1263

    def test_json_plugin_failtest(self):
1264
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1265
                           1, 0, 1, 0)
1266

1267
    def test_json_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1268
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1269
                           1, 0, 0, 0, 1)
1270

1271
    def test_json_plugin_errortest(self):
1272
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1273
                           1, 1, 0, 0)
1274

1275
    @unittest.skipIf(not GNU_ECHO_BINARY, 'echo binary not available')
1276
    def test_ugly_echo_cmd(self):
1277
        data = self.run_and_check('"-ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
1278
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
1279
                                  0, 0, external_runner=GNU_ECHO_BINARY)
1280 1281
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1282
                         '1--ne foo\\\\n\\\'\\"\\\\nbar/baz')
1283 1284
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1285
                         "1--ne foo__n_'____nbar_baz")
1286

1287 1288 1289 1290
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1291

1292 1293
if __name__ == '__main__':
    unittest.main()