test_basic.py 58.3 KB
Newer Older
1
# This Python file uses the following encoding: utf-8
2 3
import aexpect
import glob
4
import json
5
import os
6
import re
7
import shutil
8
import signal
9
import sys
10
import tempfile
11
import time
12
import xml.dom.minidom
13
import zipfile
14
import unittest
15
import psutil
16
import pkg_resources
17

18 19
from StringIO import StringIO

20 21
from lxml import etree
from six import iteritems
22
from six.moves import xrange as range
23

24
from avocado.core import exit_codes
25
from avocado.utils import astring
26
from avocado.utils import genio
27 28
from avocado.utils import process
from avocado.utils import script
29
from avocado.utils import path as utils_path
30

31
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
32 33
basedir = os.path.abspath(basedir)

34 35
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")

36 37 38 39 40 41 42 43 44
LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

45 46 47 48 49 50
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
51 52
        # Please do NOT ever use this, it's for unittesting only.
        self._Test__status = 'not supported'
53 54 55 56 57

    def test(self):
        pass
'''

58 59 60 61 62 63 64 65 66 67 68
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

69

70 71 72 73 74 75 76 77 78 79 80 81
VALID_PYTHON_TEST_WITH_TAGS = '''
from avocado import Test

class MyTest(Test):
    def test(self):
         """
         :avocado: tags=BIG_TAG_NAME
         """
         pass
'''


82 83 84 85 86 87 88
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
89
         time.sleep(70)
90 91
'''

92

93 94 95 96 97 98 99 100 101 102 103
DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


104 105 106 107 108 109 110 111 112 113 114 115 116
RAISE_CUSTOM_PATH_EXCEPTION_CONTENT = '''import os
import sys

from avocado import Test

class SharedLibTest(Test):
    def test(self):
        sys.path.append(os.path.join(os.path.dirname(__file__), "shared_lib"))
        from mylib import CancelExc
        raise CancelExc("This should not crash on unpickling in runner")
'''


A
Amador Pahim 已提交
117
def probe_binary(binary):
118
    try:
A
Amador Pahim 已提交
119
        return utils_path.find_command(binary)
120
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
121 122
        return None

L
Lukáš Doktor 已提交
123

124
TRUE_CMD = probe_binary('true')
A
Amador Pahim 已提交
125
CC_BINARY = probe_binary('cc')
126

L
Lukáš Doktor 已提交
127
# On macOS, the default GNU core-utils installation (brew)
128 129 130 131 132
# installs the gnu utility versions with a g prefix. It still has the
# BSD versions of the core utilities installed on their expected paths
# but their behavior and flags are in most cases different.
GNU_ECHO_BINARY = probe_binary('echo')
if GNU_ECHO_BINARY is not None:
133 134 135 136
    if probe_binary('man') is not None:
        echo_manpage = process.run('man %s' % os.path.basename(GNU_ECHO_BINARY)).stdout
        if '-e' not in echo_manpage:
            GNU_ECHO_BINARY = probe_binary('gecho')
A
Amador Pahim 已提交
137 138
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
139 140


141 142 143 144 145 146 147 148
def html_capable():
    try:
        pkg_resources.require('avocado-framework-plugin-result-html')
        return True
    except pkg_resources.DistributionNotFound:
        return False


149 150
class RunnerOperationTest(unittest.TestCase):

151
    def setUp(self):
152
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
153
        os.chdir(basedir)
154

155
    def test_show_version(self):
156
        result = process.run('%s -v' % AVOCADO, ignore_status=True)
157
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
158 159 160
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
161

162 163 164 165 166 167 168 169 170 171 172 173 174 175
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
176
        for key, value in iteritems(mapping):
177 178 179 180 181 182 183
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

184
        cmd = '%s --config %s config --datadir' % (AVOCADO, config_file)
185 186 187 188 189 190 191 192 193 194
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

195
    def test_runner_all_ok(self):
196 197
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % (AVOCADO, self.tmpdir))
198
        process.run(cmd_line)
199 200 201 202 203
        # Also check whether jobdata contains correct mux_path
        variants = open(os.path.join(self.tmpdir, "latest", "jobdata",
                        "variants.json")).read()
        self.assertIn('["/run/*"]', variants, "mux_path stored in jobdata "
                      "does not contains [\"/run/*\"]\n%s" % variants)
204

205
    def test_runner_failfast(self):
206 207 208
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py --failfast on'
                    % (AVOCADO, self.tmpdir))
209 210 211 212 213 214 215
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
    def test_runner_ignore_missing_references_one_missing(self):
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py badtest.py --ignore-missing-references on'
                    % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn("Unable to resolve reference(s) 'badtest.py'", result.stderr)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 0 | SKIP 0', result.stdout)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_ignore_missing_references_all_missing(self):
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'badtest.py badtest2.py --ignore-missing-references on'
                    % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn("Unable to resolve reference(s) 'badtest.py', 'badtest2.py'",
                      result.stderr)
        self.assertEqual('', result.stdout)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
239
    @unittest.skipIf(not CC_BINARY,
240
                     "C compiler is required by the underlying datadir.py test")
241
    def test_datadir_alias(self):
242 243
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % (AVOCADO, self.tmpdir))
244 245 246 247
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
248 249
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % (AVOCADO, self.tmpdir))
250 251
        process.run(cmd_line)

A
Amador Pahim 已提交
252
    @unittest.skipIf(not CC_BINARY,
253
                     "C compiler is required by the underlying datadir.py test")
254
    def test_datadir_noalias(self):
255 256
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % (AVOCADO, self.tmpdir))
257 258
        process.run(cmd_line)

259
    def test_runner_noalias(self):
260 261
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % (AVOCADO, self.tmpdir))
262 263
        process.run(cmd_line)

264 265 266
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
267
            "def hello():\n    return 'Hello world'",
268 269 270 271 272 273
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        mytest.save()
274 275
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "%s" % (AVOCADO, self.tmpdir, mytest))
276 277
        process.run(cmd_line)

278 279 280 281
    def test_unsupported_status(self):
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
282 283 284
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s"
                              " --json -" % (AVOCADO, self.tmpdir, tst),
                              ignore_status=True)
285 286 287 288 289
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
290
            self.assertIn("Runner error occurred: Test reports unsupported",
291 292
                          results["tests"][0]["fail_reason"])

293 294 295
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
296 297 298 299 300
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
301
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
302
                              "--json - --job-timeout 1" % (AVOCADO, self.tmpdir, tst),
303
                              ignore_status=True)
304 305 306 307 308 309 310
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
311 312 313 314 315
            # Currently it should finish up to 1s after the job-timeout
            # but the prep and postprocess could take a bit longer on
            # some environments, so let's just check it does not take
            # > 60s, which is the deadline for force-finishing the test.
            self.assertLess(res.duration, 55, "Test execution took too long, "
316 317 318 319 320 321 322
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
323 324 325
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
                              "--json -" % (AVOCADO, self.tmpdir, tst),
                              ignore_status=True)
326 327 328 329 330 331 332 333
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

334
    def test_runner_tests_fail(self):
335 336
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s passtest.py '
                    'failtest.py passtest.py' % (AVOCADO, self.tmpdir))
337
        result = process.run(cmd_line, ignore_status=True)
338
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
339 340 341 342
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
343 344
        cmd_line = ('%s run --sysinfo=off --job-results-dir '
                    '%s bogustest' % (AVOCADO, self.tmpdir))
345
        result = process.run(cmd_line, ignore_status=True)
346 347
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
348 349 350 351 352
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

353
    def test_runner_doublefail(self):
354 355
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % (AVOCADO, self.tmpdir))
356 357
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
358 359
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
360 361 362 363
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
364
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
365
                      "Cleanup exception not printed to log output")
366
        self.assertIn("TestFail: This test is supposed to fail",
367
                      output,
368
                      "Test did not fail with action exception:\n%s" % output)
369

370
    def test_uncaught_exception(self):
371 372
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception.py" % (AVOCADO, self.tmpdir))
373
        result = process.run(cmd_line, ignore_status=True)
374
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
375 376 377 378 379
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

380
    def test_fail_on_exception(self):
381 382
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "--json - fail_on_exception.py" % (AVOCADO, self.tmpdir))
383
        result = process.run(cmd_line, ignore_status=True)
384
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
385 386 387 388 389
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
    def test_exception_not_in_path(self):
        os.mkdir(os.path.join(self.tmpdir, "shared_lib"))
        mylib = script.Script(os.path.join(self.tmpdir, "shared_lib",
                                           "mylib.py"),
                              "from avocado import TestCancel\n\n"
                              "class CancelExc(TestCancel):\n"
                              "    pass")
        mylib.save()
        mytest = script.Script(os.path.join(self.tmpdir, "mytest.py"),
                               RAISE_CUSTOM_PATH_EXCEPTION_CONTENT)
        mytest.save()
        result = process.run("%s --show test run --sysinfo=off "
                             "--job-results-dir %s %s"
                             % (AVOCADO, self.tmpdir, mytest))
        self.assertIn("mytest.py:SharedLibTest.test -> CancelExc: This "
                      "should not crash on unpickling in runner",
                      result.stdout)
        self.assertNotIn("Failed to read queue", result.stdout)

409
    def test_runner_timeout(self):
410 411
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % (AVOCADO, self.tmpdir))
412 413
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
414
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
415
        unexpected_rc = exit_codes.AVOCADO_FAIL
416 417 418 419
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
420
        self.assertIn("Runner error occurred: Timeout reached", output,
421
                      "Timeout reached message not found in the output:\n%s" % output)
422 423
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
424

425
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
426 427
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
428
    def test_runner_abort(self):
429 430
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % (AVOCADO, self.tmpdir))
431
        result = process.run(cmd_line, ignore_status=True)
432
        output = result.stdout
433
        excerpt = 'Test died without reporting the status.'
434 435
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
436 437 438 439
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
440
        self.assertIn(excerpt, output)
441

442
    def test_silent_output(self):
443 444
        cmd_line = ('%s --silent run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % (AVOCADO, self.tmpdir))
445
        result = process.run(cmd_line, ignore_status=True)
446
        expected_rc = exit_codes.AVOCADO_ALL_OK
447 448
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
449
        self.assertEqual(result.stdout, expected_output)
450

451
    def test_empty_args_list(self):
452
        cmd_line = AVOCADO
453
        result = process.run(cmd_line, ignore_status=True)
454
        expected_rc = exit_codes.AVOCADO_FAIL
455
        expected_output = 'error: too few arguments'
456
        self.assertEqual(result.exit_status, expected_rc)
457
        self.assertIn(expected_output, result.stderr)
458

459
    def test_empty_test_list(self):
460 461
        cmd_line = '%s run --sysinfo=off --job-results-dir %s' % (AVOCADO,
                                                                  self.tmpdir)
462
        result = process.run(cmd_line, ignore_status=True)
463
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
464 465
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
466
        self.assertEqual(result.exit_status, expected_rc)
467
        self.assertIn(expected_output, result.stderr)
468

469
    def test_not_found(self):
470 471
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s sbrubles'
                    % (AVOCADO, self.tmpdir))
472
        result = process.run(cmd_line, ignore_status=True)
473
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
474
        self.assertEqual(result.exit_status, expected_rc)
475 476
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
477

478
    def test_invalid_unique_id(self):
479 480
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s --force-job-id '
                    'foobar passtest.py' % (AVOCADO, self.tmpdir))
481
        result = process.run(cmd_line, ignore_status=True)
482
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
483
        self.assertIn('needs to be a 40 digit hex', result.stderr)
484
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
485 486

    def test_valid_unique_id(self):
487
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
488
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
489
                    'passtest.py' % (AVOCADO, self.tmpdir))
490
        result = process.run(cmd_line, ignore_status=True)
491
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
492
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
493
        self.assertIn('PASS', result.stdout)
494

495
    def test_automatic_unique_id(self):
496 497
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % (AVOCADO, self.tmpdir))
498
        result = process.run(cmd_line, ignore_status=True)
499
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
500 501 502 503
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

504 505 506 507
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
508 509
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % (AVOCADO, self.tmpdir))
510
        avocado_process = process.SubProcess(cmd_line)
511 512 513 514 515 516 517 518 519 520 521 522
        try:
            avocado_process.start()
            link = os.path.join(self.tmpdir, 'latest')
            for trial in range(0, 50):
                time.sleep(0.1)
                if os.path.exists(link) and os.path.islink(link):
                    avocado_process.wait()
                    break
            self.assertTrue(os.path.exists(link))
            self.assertTrue(os.path.islink(link))
        finally:
            avocado_process.wait()
523

524
    def test_dry_run(self):
525
        cmd = ("%s run --sysinfo=off passtest.py failtest.py "
526
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
527
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run" % AVOCADO)
528 529
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
530
        log = genio.read_file(debuglog)
531 532
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
533
        self.assertIn(tempfile.gettempdir(), debuglog)   # Use tmp dir, not default location
534 535
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
536
        self.assertEqual(result['cancel'], 4)
537
        for i in range(4):
538 539
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
540
                             u'Test cancelled due to --dry-run')
541 542 543 544 545
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
546
            self.assertEqual(log.count(line), 4)
547

548 549 550
    def test_invalid_python(self):
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
551 552
        cmd_line = ('%s --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (AVOCADO, self.tmpdir, test)
553 554 555 556 557
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
558 559
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
560

A
Amador Pahim 已提交
561
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
562 563 564
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
L
Lukáš Doktor 已提交
565
    def test_read(self):
566
        cmd = "%s run --sysinfo=off --job-results-dir %%s %%s" % AVOCADO
567
        cmd %= (self.tmpdir, READ_BINARY)
568
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
569 570 571 572 573
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

574 575 576
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

577

578 579 580
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
581
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
582
        os.chdir(basedir)
583 584

    def test_output_pass(self):
585 586
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % (AVOCADO, self.tmpdir))
587
        result = process.run(cmd_line, ignore_status=True)
588
        expected_rc = exit_codes.AVOCADO_ALL_OK
589 590 591 592 593 594
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
595 596
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % (AVOCADO, self.tmpdir))
597
        result = process.run(cmd_line, ignore_status=True)
598
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
599 600 601 602 603 604
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
605 606
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % (AVOCADO, self.tmpdir))
607
        result = process.run(cmd_line, ignore_status=True)
608
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
609 610 611 612 613
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

A
Amador Pahim 已提交
614
    def test_output_cancel(self):
615 616
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'cancelonsetup.py' % (AVOCADO, self.tmpdir))
617
        result = process.run(cmd_line, ignore_status=True)
618
        expected_rc = exit_codes.AVOCADO_ALL_OK
619 620 621
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
A
Amador Pahim 已提交
622 623
        self.assertIn('PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1',
                      result.stdout)
624

625 626
    @unittest.skipIf(not GNU_ECHO_BINARY,
                     'GNU style echo binary not available')
627
    def test_ugly_echo_cmd(self):
628
        cmd_line = ('%s run --external-runner "%s -ne" '
629
                    '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
630
                    ' --sysinfo=off  --show-job-log' %
631
                    (AVOCADO, GNU_ECHO_BINARY, self.tmpdir))
632 633 634 635 636
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
637 638 639
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
640 641
        self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
642 643 644 645 646 647 648
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
649
                         "1-foo__n_'____nbar_baz")
650

651
    def test_replay_skip_skipped(self):
652 653
        cmd = ("%s run --job-results-dir %s --json - "
               "cancelonsetup.py" % (AVOCADO, self.tmpdir))
654
        result = process.run(cmd)
655
        result = json.loads(result.stdout)
656
        jobid = str(result["job_id"])
657 658
        cmd = ("%s run --job-results-dir %s --replay %s "
               "--replay-test-status PASS" % (AVOCADO, self.tmpdir, jobid))
659
        process.run(cmd)
660

661 662 663
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

664

665
class RunnerSimpleTest(unittest.TestCase):
666 667

    def setUp(self):
668
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
669
        self.pass_script = script.TemporaryScript(
670
            'ʊʋʉʈɑ ʅʛʌ',
671
            "#!/bin/sh\ntrue",
672
            'avocado_simpletest_functional')
673
        self.pass_script.save()
L
Lukáš Doktor 已提交
674
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
675
                                                  "#!/bin/sh\nfalse",
L
Lukáš Doktor 已提交
676 677
                                                  'avocado_simpletest_'
                                                  'functional')
678
        self.fail_script.save()
679
        os.chdir(basedir)
680

681
    def test_simpletest_pass(self):
682 683
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' "%s"' % (AVOCADO, self.tmpdir, self.pass_script.path))
684
        result = process.run(cmd_line, ignore_status=True)
685
        expected_rc = exit_codes.AVOCADO_ALL_OK
686 687 688 689
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

690
    def test_simpletest_fail(self):
691 692
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (AVOCADO, self.tmpdir, self.fail_script.path))
693
        result = process.run(cmd_line, ignore_status=True)
694
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
695 696 697 698
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

699
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
700 701
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
702 703
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
704
        We can be pretty sure that a failtest should return immediately. Let's
705
        run 100 of them and assure they not take more than 30 seconds to run.
706

707 708
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
709
        """
710
        one_hundred = 'failtest.py ' * 100
711 712
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
                    % (AVOCADO, self.tmpdir, one_hundred))
713 714 715
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
716
        self.assertLess(actual_time, 30.0)
717
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
718 719 720
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

721 722 723
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
724 725 726 727 728
    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
729 730
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
731 732
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
                    % (AVOCADO, self.tmpdir, sleep_fail_sleep))
733 734 735
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
736
        self.assertLess(actual_time, 33.0)
737
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
738 739 740
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

741 742 743 744
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
745 746 747 748 749
        # simplewarning.sh calls "avocado" without specifying a path
        os.environ['PATH'] += ":" + os.path.join(basedir, 'scripts')
        # simplewarning.sh calls "avocado exec-path" which hasn't
        # access to an installed location for the libexec scripts
        os.environ['PATH'] += ":" + os.path.join(basedir, 'libexec')
750 751 752
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log'
                    % (AVOCADO, self.tmpdir))
753
        result = process.run(cmd_line, ignore_status=True)
754 755 756 757
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
758 759
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
760
        self.assertIn('WARN | Warning message (should cause this test to '
761
                      'finish with warning)', result.stdout, result)
762
        self.assertIn('ERROR| Error message (ordinary message not changing '
763
                      'the results)', result.stdout, result)
764

765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
    @unittest.skipIf(not GNU_ECHO_BINARY, "Uses echo as test")
    def test_fs_unfriendly_run(self):
        os.chdir(basedir)
        commands_path = os.path.join(self.tmpdir, "commands")
        script.make_script(commands_path, "echo '\"\\/|?*<>'")
        config_path = os.path.join(self.tmpdir, "config.conf")
        script.make_script(config_path,
                           "[sysinfo.collectibles]\ncommands = %s"
                           % commands_path)
        cmd_line = ("%s --show all --config %s run --job-results-dir %s "
                    "--sysinfo=on --external-runner %s -- \"'\\\"\\/|?*<>'\""
                    % (AVOCADO, config_path, self.tmpdir, GNU_ECHO_BINARY))
        result = process.run(cmd_line)
        self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "latest",
                                                    "test-results",
                                                    "1-\'________\'/")))
        self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "latest",
                                                    "sysinfo", "pre",
                                                    "echo \'________\'")))

        if html_capable():
            with open(os.path.join(self.tmpdir, "latest",
                                   "results.html")) as html_res:
                html_results = html_res.read()
            # test results should replace odd chars with "_"
            self.assertIn(os.path.join("test-results", "1-'________'"),
                          html_results)
            # sysinfo replaces "_" with " "
            self.assertIn("echo '________'", html_results)

795 796 797 798
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        os.chdir(test_base_dir)
799
        test_file_name = os.path.basename(self.pass_script.path)
800
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
801
                    ' "%s"' % (avocado_path, self.tmpdir, test_file_name))
802 803 804 805 806 807
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
808
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
809 810 811
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
812
    def test_kill_stopped_sleep(self):
813 814 815 816
        proc = aexpect.Expect("%s run 60 --job-results-dir %s "
                              "--external-runner %s --sysinfo=off "
                              "--job-timeout 3"
                              % (AVOCADO, self.tmpdir, SLEEP_BINARY))
817 818
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
819 820 821 822
        # We need pid of the avocado process, not the shell executing it
        avocado_shell = psutil.Process(proc.get_pid())
        avocado_proc = avocado_shell.children()[0]
        pid = avocado_proc.pid
823
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
824
        deadline = time.time() + 9
825 826 827
        while time.time() < deadline:
            if not proc.is_alive():
                break
828
            time.sleep(0.1)
829 830
        else:
            proc.kill(signal.SIGKILL)
831
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
832 833 834 835 836 837 838
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
839
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
840
                         "1.")
841 842

        sleep_dir = astring.string_to_safe_path("1-60")
843 844 845 846
        debug_log_path = os.path.join(self.tmpdir, "latest", "test-results",
                                      sleep_dir, "debug.log")

        debug_log = genio.read_file(debug_log_path)
847 848 849 850 851 852 853
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
854

855
    def tearDown(self):
856 857
        self.pass_script.remove()
        self.fail_script.remove()
858
        shutil.rmtree(self.tmpdir)
859 860


861
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
862 863

    def setUp(self):
864
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
865 866
        self.pass_script = script.TemporaryScript(
            'pass',
867
            "exit 0",
868
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
869 870 871
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
872
            "exit 1",
873
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
874
        self.fail_script.save()
875
        os.chdir(basedir)
C
Cleber Rosa 已提交
876

877
    def test_externalrunner_pass(self):
878 879 880
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh %s'
                    % (AVOCADO, self.tmpdir, self.pass_script.path))
C
Cleber Rosa 已提交
881
        result = process.run(cmd_line, ignore_status=True)
882
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
883 884 885 886
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

887
    def test_externalrunner_fail(self):
888 889 890
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh %s'
                    % (AVOCADO, self.tmpdir, self.fail_script.path))
C
Cleber Rosa 已提交
891
        result = process.run(cmd_line, ignore_status=True)
892
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
893 894 895 896
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

897
    def test_externalrunner_chdir_no_testdir(self):
898 899 900
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh --external-runner-chdir=test %s'
                    % (AVOCADO, self.tmpdir, self.pass_script.path))
C
Cleber Rosa 已提交
901
        result = process.run(cmd_line, ignore_status=True)
902 903
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
904
        self.assertIn(expected_output, result.stderr)
905
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
906 907 908 909 910
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
911 912
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=%s' % (AVOCADO, self.tmpdir, TRUE_CMD))
913
        result = process.run(cmd_line, ignore_status=True)
914 915
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
916 917
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
918 919 920 921 922 923 924 925 926 927
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


928
class AbsPluginsTest(object):
929

930
    def setUp(self):
931
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
932
        os.chdir(basedir)
933

934 935 936 937 938 939
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

940
    def test_sysinfo_plugin(self):
941
        cmd_line = '%s sysinfo %s' % (AVOCADO, self.base_outputdir)
942
        result = process.run(cmd_line, ignore_status=True)
943
        expected_rc = exit_codes.AVOCADO_ALL_OK
944 945 946 947 948 949
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

950
    def test_list_plugin(self):
951
        cmd_line = '%s list' % AVOCADO
952 953
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
954
        expected_rc = exit_codes.AVOCADO_ALL_OK
955 956 957 958 959
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

960
    def test_list_error_output(self):
961
        cmd_line = '%s list sbrubles' % AVOCADO
962 963
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
964
        expected_rc = exit_codes.AVOCADO_FAIL
965 966 967
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
968
        self.assertIn("Unable to resolve reference", output)
969

970 971 972 973 974 975 976
    def test_list_no_file_loader(self):
        cmd_line = ("%s list --loaders external --verbose -- "
                    "this-wont-be-matched" % AVOCADO)
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
                         "Avocado did not return rc %d:\n%s"
                         % (exit_codes.AVOCADO_ALL_OK, result))
977 978
        exp = ("Type    Test                 Tag(s)\n"
               "MISSING this-wont-be-matched \n\n"
979 980
               "TEST TYPES SUMMARY\n"
               "==================\n"
981
               "EXTERNAL: 0\n"
982 983 984 985
               "MISSING: 1\n")
        self.assertEqual(exp, result.stdout, "Stdout mismatch:\n%s\n\n%s"
                         % (exp, result))

986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
    def test_list_verbose_tags(self):
        """
        Runs list verbosely and check for tag related output
        """
        test = script.make_script(os.path.join(self.base_outputdir, 'test.py'),
                                  VALID_PYTHON_TEST_WITH_TAGS)
        cmd_line = ("%s list --loaders file --verbose %s" % (AVOCADO,
                                                             test))
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
                         "Avocado did not return rc %d:\n%s"
                         % (exit_codes.AVOCADO_ALL_OK, result))
        stdout_lines = result.stdout.splitlines()
        self.assertIn("Tag(s)", stdout_lines[0])
        full_test_name = "%s:MyTest.test" % test
1001 1002
        self.assertEqual("INSTRUMENTED %s BIG_TAG_NAME" % full_test_name,
                         stdout_lines[1])
1003 1004 1005
        self.assertIn("TEST TYPES SUMMARY", stdout_lines)
        self.assertIn("INSTRUMENTED: 1", stdout_lines)
        self.assertIn("TEST TAGS SUMMARY", stdout_lines)
1006
        self.assertEqual("BIG_TAG_NAME: 1", stdout_lines[-1])
1007

1008
    def test_plugin_list(self):
1009
        cmd_line = '%s plugins' % AVOCADO
1010 1011
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
1012
        expected_rc = exit_codes.AVOCADO_ALL_OK
1013 1014 1015
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1016 1017
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
1018

1019
    def test_config_plugin(self):
1020
        cmd_line = '%s config --paginator off' % AVOCADO
1021 1022
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
1023
        expected_rc = exit_codes.AVOCADO_ALL_OK
1024 1025 1026 1027 1028 1029
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
1030
        cmd_line = '%s config --datadir --paginator off' % AVOCADO
1031 1032
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
1033
        expected_rc = exit_codes.AVOCADO_ALL_OK
1034 1035 1036 1037 1038
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

1039
    def test_disable_plugin(self):
1040
        cmd_line = '%s plugins' % AVOCADO
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
1052
            cmd_line = '%s --config %s plugins' % (AVOCADO, config)
1053 1054 1055 1056 1057 1058 1059
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
1072
            cmd = ('%s --config %s run passtest.py --archive '
1073
                   '--job-results-dir %s --sysinfo=off'
1074
                   % (AVOCADO, config_path, self.base_outputdir))
1075 1076 1077 1078 1079 1080 1081 1082
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
1083
        if html_capable():
1084
            result_plugins.append("html")
1085
            result_outputs.append("results.html")
1086

1087
        cmd_line = '%s plugins' % AVOCADO
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
1110 1111
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

1123
    def test_Namespace_object_has_no_attribute(self):
1124
        cmd_line = '%s plugins' % AVOCADO
1125 1126
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
1127
        expected_rc = exit_codes.AVOCADO_ALL_OK
1128 1129 1130 1131 1132
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

1133

1134 1135 1136 1137
class ParseXMLError(Exception):
    pass


1138
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1139

1140
    def setUp(self):
1141
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
L
Lucas Meneghel Rodrigues 已提交
1142 1143 1144
        junit_xsd = os.path.join(os.path.dirname(__file__),
                                 os.path.pardir, ".data", 'junit-4.xsd')
        self.junit = os.path.abspath(junit_xsd)
1145 1146
        super(PluginsXunitTest, self).setUp()

1147
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1148
                      e_nnotfound, e_nfailures, e_nskip):
1149 1150
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (AVOCADO, self.tmpdir, testname))
1151 1152 1153 1154 1155 1156 1157
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1158
        except Exception as detail:
1159 1160 1161
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

1162 1163 1164 1165 1166 1167 1168 1169
        with open(self.junit, 'r') as f:
            xmlschema = etree.XMLSchema(etree.parse(f))

        self.assertTrue(xmlschema.validate(etree.parse(StringIO(xml_output))),
                        "Failed to validate against %s, message:\n%s" %
                        (self.junit,
                         xmlschema.error_log.filter_from_errors()))

1170 1171 1172 1173
        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1174 1175
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1193
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1194 1195 1196 1197
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1198
    def test_xunit_plugin_passtest(self):
1199
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1200
                           1, 0, 0, 0, 0)
1201 1202

    def test_xunit_plugin_failtest(self):
1203
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1204
                           1, 0, 0, 1, 0)
1205

1206
    def test_xunit_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1207
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1208
                           1, 0, 0, 0, 1)
1209

1210
    def test_xunit_plugin_errortest(self):
1211
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1212
                           1, 1, 0, 0, 0)
1213

1214 1215 1216 1217
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1218 1219 1220 1221 1222

class ParseJSONError(Exception):
    pass


1223
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1224

1225
    def setUp(self):
1226
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1227 1228
        super(PluginsJSONTest, self).setUp()

1229
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1230
                      e_nfailures, e_nskip, e_ncancel=0, external_runner=None):
1231 1232
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off --json - '
                    '--archive %s' % (AVOCADO, self.tmpdir, testname))
1233 1234
        if external_runner is not None:
            cmd_line += " --external-runner '%s'" % external_runner
1235 1236 1237 1238 1239 1240 1241
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1242
        except Exception as detail:
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1260 1261
        n_cancel = json_data['cancel']
        self.assertEqual(n_cancel, e_ncancel)
1262
        return json_data
1263

1264
    def test_json_plugin_passtest(self):
1265
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1266
                           1, 0, 0, 0)
1267 1268

    def test_json_plugin_failtest(self):
1269
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1270
                           1, 0, 1, 0)
1271

1272
    def test_json_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1273
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1274
                           1, 0, 0, 0, 1)
1275

1276
    def test_json_plugin_errortest(self):
1277
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1278
                           1, 1, 0, 0)
1279

1280
    @unittest.skipIf(not GNU_ECHO_BINARY, 'echo binary not available')
1281
    def test_ugly_echo_cmd(self):
1282
        data = self.run_and_check('"-ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
1283
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
1284
                                  0, 0, external_runner=GNU_ECHO_BINARY)
1285 1286
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1287
                         '1--ne foo\\\\n\\\'\\"\\\\nbar/baz')
1288 1289
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1290
                         "1--ne foo__n_'____nbar_baz")
1291

1292 1293 1294 1295
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1296

1297 1298
if __name__ == '__main__':
    unittest.main()