test_basic.py 58.8 KB
Newer Older
1
# This Python file uses the following encoding: utf-8
2 3
import aexpect
import glob
4
import json
5
import os
6
import re
7
import shutil
8
import signal
9
import sys
10
import tempfile
11
import time
12
import xml.dom.minidom
13
import zipfile
14
import unittest
15
import psutil
16
import pkg_resources
17

18 19 20 21
try:
    from io import BytesIO
except:
    from BytesIO import BytesIO
22

23 24
from lxml import etree
from six import iteritems
25
from six.moves import xrange as range
26

27
from avocado.core import exit_codes
28
from avocado.utils import astring
29
from avocado.utils import genio
30 31
from avocado.utils import process
from avocado.utils import script
32
from avocado.utils import path as utils_path
33

34
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
35 36
basedir = os.path.abspath(basedir)

37 38
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")

39 40 41 42 43 44 45 46 47
LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

48 49 50 51 52 53
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
54 55
        # Please do NOT ever use this, it's for unittesting only.
        self._Test__status = 'not supported'
56 57 58 59 60

    def test(self):
        pass
'''

61 62 63 64 65 66 67 68 69 70 71
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

72

73 74 75 76 77 78 79 80 81 82 83 84
VALID_PYTHON_TEST_WITH_TAGS = '''
from avocado import Test

class MyTest(Test):
    def test(self):
         """
         :avocado: tags=BIG_TAG_NAME
         """
         pass
'''


85 86 87 88 89 90 91
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
92
         time.sleep(70)
93 94
'''

95

96 97 98 99 100 101 102 103 104 105 106
DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


107 108 109 110 111 112 113 114 115 116 117 118 119
RAISE_CUSTOM_PATH_EXCEPTION_CONTENT = '''import os
import sys

from avocado import Test

class SharedLibTest(Test):
    def test(self):
        sys.path.append(os.path.join(os.path.dirname(__file__), "shared_lib"))
        from mylib import CancelExc
        raise CancelExc("This should not crash on unpickling in runner")
'''


A
Amador Pahim 已提交
120
def probe_binary(binary):
121
    try:
A
Amador Pahim 已提交
122
        return utils_path.find_command(binary)
123
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
124 125
        return None

L
Lukáš Doktor 已提交
126

127
TRUE_CMD = probe_binary('true')
A
Amador Pahim 已提交
128
CC_BINARY = probe_binary('cc')
129

L
Lukáš Doktor 已提交
130
# On macOS, the default GNU core-utils installation (brew)
131 132 133 134 135
# installs the gnu utility versions with a g prefix. It still has the
# BSD versions of the core utilities installed on their expected paths
# but their behavior and flags are in most cases different.
GNU_ECHO_BINARY = probe_binary('echo')
if GNU_ECHO_BINARY is not None:
136 137 138 139
    if probe_binary('man') is not None:
        echo_manpage = process.run('man %s' % os.path.basename(GNU_ECHO_BINARY)).stdout
        if '-e' not in echo_manpage:
            GNU_ECHO_BINARY = probe_binary('gecho')
A
Amador Pahim 已提交
140 141
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
142 143


144 145 146 147 148 149 150 151
def html_capable():
    try:
        pkg_resources.require('avocado-framework-plugin-result-html')
        return True
    except pkg_resources.DistributionNotFound:
        return False


152 153
class RunnerOperationTest(unittest.TestCase):

154
    def setUp(self):
155
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
156
        os.chdir(basedir)
157

158
    def test_show_version(self):
159
        result = process.run('%s -v' % AVOCADO, ignore_status=True)
160
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
161 162 163
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
164

165 166 167 168 169 170 171 172 173 174 175 176 177 178
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
179
        for key, value in iteritems(mapping):
180 181 182 183 184 185 186
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

187
        cmd = '%s --config %s config --datadir' % (AVOCADO, config_file)
188 189 190 191 192 193 194 195 196
        result = process.run(cmd)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

197
    def test_runner_all_ok(self):
198 199
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % (AVOCADO, self.tmpdir))
200
        process.run(cmd_line)
201
        # Also check whether jobdata contains correct parameter paths
202 203
        variants = open(os.path.join(self.tmpdir, "latest", "jobdata",
                        "variants.json")).read()
204
        self.assertIn('["/run/*"]', variants, "paths stored in jobdata "
205
                      "does not contains [\"/run/*\"]\n%s" % variants)
206

207
    def test_runner_failfast(self):
208 209 210
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py --failfast on'
                    % (AVOCADO, self.tmpdir))
211 212 213 214 215 216 217
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
    def test_runner_ignore_missing_references_one_missing(self):
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py badtest.py --ignore-missing-references on'
                    % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn("Unable to resolve reference(s) 'badtest.py'", result.stderr)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 0 | SKIP 0', result.stdout)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_ignore_missing_references_all_missing(self):
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'badtest.py badtest2.py --ignore-missing-references on'
                    % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn("Unable to resolve reference(s) 'badtest.py', 'badtest2.py'",
                      result.stderr)
        self.assertEqual('', result.stdout)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

241 242 243
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
244
            "def hello():\n    return 'Hello world'",
245 246 247 248 249 250
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        mytest.save()
251 252
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "%s" % (AVOCADO, self.tmpdir, mytest))
253 254
        process.run(cmd_line)

255 256 257 258
    def test_unsupported_status(self):
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
259 260 261
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s"
                              " --json -" % (AVOCADO, self.tmpdir, tst),
                              ignore_status=True)
262 263 264 265 266
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
267
            self.assertIn("Runner error occurred: Test reports unsupported",
268 269
                          results["tests"][0]["fail_reason"])

270 271 272
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
273 274 275 276 277
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
278
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
279
                              "--json - --job-timeout 1" % (AVOCADO, self.tmpdir, tst),
280
                              ignore_status=True)
281 282 283 284 285 286 287
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
288 289 290 291 292
            # Currently it should finish up to 1s after the job-timeout
            # but the prep and postprocess could take a bit longer on
            # some environments, so let's just check it does not take
            # > 60s, which is the deadline for force-finishing the test.
            self.assertLess(res.duration, 55, "Test execution took too long, "
293 294 295 296 297 298 299
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
300 301 302
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
                              "--json -" % (AVOCADO, self.tmpdir, tst),
                              ignore_status=True)
303 304 305 306 307 308 309 310
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

311
    def test_runner_tests_fail(self):
312 313
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s passtest.py '
                    'failtest.py passtest.py' % (AVOCADO, self.tmpdir))
314
        result = process.run(cmd_line, ignore_status=True)
315
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
316 317 318 319
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
320 321
        cmd_line = ('%s run --sysinfo=off --job-results-dir '
                    '%s bogustest' % (AVOCADO, self.tmpdir))
322
        result = process.run(cmd_line, ignore_status=True)
323 324
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
325 326 327 328 329
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

330
    def test_runner_doublefail(self):
331 332
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % (AVOCADO, self.tmpdir))
333 334
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
335 336
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
337 338 339 340
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
341
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
342
                      "Cleanup exception not printed to log output")
343
        self.assertIn("TestFail: This test is supposed to fail",
344
                      output,
345
                      "Test did not fail with action exception:\n%s" % output)
346

347
    def test_uncaught_exception(self):
348 349
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception.py" % (AVOCADO, self.tmpdir))
350
        result = process.run(cmd_line, ignore_status=True)
351
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
352 353 354 355 356
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

357
    def test_fail_on_exception(self):
358 359
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "--json - fail_on_exception.py" % (AVOCADO, self.tmpdir))
360
        result = process.run(cmd_line, ignore_status=True)
361
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
362 363 364 365 366
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
    def test_exception_not_in_path(self):
        os.mkdir(os.path.join(self.tmpdir, "shared_lib"))
        mylib = script.Script(os.path.join(self.tmpdir, "shared_lib",
                                           "mylib.py"),
                              "from avocado import TestCancel\n\n"
                              "class CancelExc(TestCancel):\n"
                              "    pass")
        mylib.save()
        mytest = script.Script(os.path.join(self.tmpdir, "mytest.py"),
                               RAISE_CUSTOM_PATH_EXCEPTION_CONTENT)
        mytest.save()
        result = process.run("%s --show test run --sysinfo=off "
                             "--job-results-dir %s %s"
                             % (AVOCADO, self.tmpdir, mytest))
        self.assertIn("mytest.py:SharedLibTest.test -> CancelExc: This "
                      "should not crash on unpickling in runner",
                      result.stdout)
        self.assertNotIn("Failed to read queue", result.stdout)

386
    def test_runner_timeout(self):
387 388
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % (AVOCADO, self.tmpdir))
389 390
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
391
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
392
        unexpected_rc = exit_codes.AVOCADO_FAIL
393 394 395 396
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
397
        self.assertIn("Runner error occurred: Timeout reached", output,
398
                      "Timeout reached message not found in the output:\n%s" % output)
399 400
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
401

402
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
403 404
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
405
    def test_runner_abort(self):
406 407
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % (AVOCADO, self.tmpdir))
408
        result = process.run(cmd_line, ignore_status=True)
409
        excerpt = 'Test died without reporting the status.'
410 411
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
412 413 414 415
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
416
        self.assertIn(excerpt, result.stdout)
417

418
    def test_silent_output(self):
419 420
        cmd_line = ('%s --silent run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % (AVOCADO, self.tmpdir))
421
        result = process.run(cmd_line, ignore_status=True)
422 423
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
        self.assertEqual(result.stdout, '')
424

425
    def test_empty_args_list(self):
426
        cmd_line = AVOCADO
427
        result = process.run(cmd_line, ignore_status=True)
428 429
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL)
        self.assertIn('error: too few arguments', result.stderr)
430

431
    def test_empty_test_list(self):
432 433
        cmd_line = '%s run --sysinfo=off --job-results-dir %s' % (AVOCADO,
                                                                  self.tmpdir)
434
        result = process.run(cmd_line, ignore_status=True)
435 436 437
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_JOB_FAIL)
        self.assertIn('No test references provided nor any other arguments '
                      'resolved into tests', result.stderr)
438

439
    def test_not_found(self):
440 441
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s sbrubles'
                    % (AVOCADO, self.tmpdir))
442
        result = process.run(cmd_line, ignore_status=True)
443
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_JOB_FAIL)
444 445
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
446

447
    def test_invalid_unique_id(self):
448 449
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s --force-job-id '
                    'foobar passtest.py' % (AVOCADO, self.tmpdir))
450
        result = process.run(cmd_line, ignore_status=True)
451
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
452
        self.assertIn('needs to be a 40 digit hex', result.stderr)
453
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
454 455

    def test_valid_unique_id(self):
456
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
457
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
458
                    'passtest.py' % (AVOCADO, self.tmpdir))
459
        result = process.run(cmd_line, ignore_status=True)
460
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
461
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
462
        self.assertIn('PASS', result.stdout)
463

464
    def test_automatic_unique_id(self):
465 466
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % (AVOCADO, self.tmpdir))
467
        result = process.run(cmd_line, ignore_status=True)
468
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
469 470 471 472
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

473 474 475 476
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
477 478
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % (AVOCADO, self.tmpdir))
479
        avocado_process = process.SubProcess(cmd_line)
480 481 482 483 484 485 486 487 488 489 490 491
        try:
            avocado_process.start()
            link = os.path.join(self.tmpdir, 'latest')
            for trial in range(0, 50):
                time.sleep(0.1)
                if os.path.exists(link) and os.path.islink(link):
                    avocado_process.wait()
                    break
            self.assertTrue(os.path.exists(link))
            self.assertTrue(os.path.islink(link))
        finally:
            avocado_process.wait()
492

493
    def test_dry_run(self):
494
        cmd = ("%s run --sysinfo=off passtest.py failtest.py "
495
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
496
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run" % AVOCADO)
497 498
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
499
        log = genio.read_file(debuglog)
500 501
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
502
        self.assertIn(tempfile.gettempdir(), debuglog)   # Use tmp dir, not default location
503 504
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
505
        self.assertEqual(result['cancel'], 4)
506
        for i in range(4):
507 508
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
509
                             u'Test cancelled due to --dry-run')
510 511 512 513 514
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
515
            self.assertEqual(log.count(line), 4)
516

517 518 519
    def test_invalid_python(self):
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
520 521
        cmd_line = ('%s --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (AVOCADO, self.tmpdir, test)
522 523 524 525 526
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
527 528
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
529

A
Amador Pahim 已提交
530
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
531 532 533
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
L
Lukáš Doktor 已提交
534
    def test_read(self):
535
        cmd = "%s run --sysinfo=off --job-results-dir %%s %%s" % AVOCADO
536
        cmd %= (self.tmpdir, READ_BINARY)
537
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
538 539 540 541 542
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

543 544 545
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

546

547 548 549
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
550
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
551
        os.chdir(basedir)
552 553

    def test_output_pass(self):
554 555
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % (AVOCADO, self.tmpdir))
556
        result = process.run(cmd_line, ignore_status=True)
557
        expected_rc = exit_codes.AVOCADO_ALL_OK
558 559 560 561 562 563
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
564 565
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % (AVOCADO, self.tmpdir))
566
        result = process.run(cmd_line, ignore_status=True)
567
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
568 569 570 571 572 573
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
574 575
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % (AVOCADO, self.tmpdir))
576
        result = process.run(cmd_line, ignore_status=True)
577
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
578 579 580 581 582
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

A
Amador Pahim 已提交
583
    def test_output_cancel(self):
584 585
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'cancelonsetup.py' % (AVOCADO, self.tmpdir))
586
        result = process.run(cmd_line, ignore_status=True)
587
        expected_rc = exit_codes.AVOCADO_ALL_OK
588 589 590
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
A
Amador Pahim 已提交
591 592
        self.assertIn('PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1',
                      result.stdout)
593

594 595
    @unittest.skipIf(not GNU_ECHO_BINARY,
                     'GNU style echo binary not available')
596
    def test_ugly_echo_cmd(self):
597
        cmd_line = ('%s run --external-runner "%s -ne" '
598
                    '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
599
                    ' --sysinfo=off  --show-job-log' %
600
                    (AVOCADO, GNU_ECHO_BINARY, self.tmpdir))
601 602 603 604 605
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
606 607 608
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
609 610
        self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
611 612 613 614 615 616 617
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
618
                         "1-foo__n_'____nbar_baz")
619

620
    def test_replay_skip_skipped(self):
621 622
        cmd = ("%s run --job-results-dir %s --json - "
               "cancelonsetup.py" % (AVOCADO, self.tmpdir))
623
        result = process.run(cmd)
624
        result = json.loads(result.stdout)
625
        jobid = str(result["job_id"])
626 627
        cmd = ("%s run --job-results-dir %s --replay %s "
               "--replay-test-status PASS" % (AVOCADO, self.tmpdir, jobid))
628
        process.run(cmd)
629

630 631 632
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

633

634
class RunnerSimpleTest(unittest.TestCase):
635 636

    def setUp(self):
637
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
638
        self.pass_script = script.TemporaryScript(
639
            'ʊʋʉʈɑ ʅʛʌ',
640
            "#!/bin/sh\ntrue",
641
            'avocado_simpletest_functional')
642
        self.pass_script.save()
L
Lukáš Doktor 已提交
643
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
644
                                                  "#!/bin/sh\nfalse",
L
Lukáš Doktor 已提交
645 646
                                                  'avocado_simpletest_'
                                                  'functional')
647
        self.fail_script.save()
648
        os.chdir(basedir)
649

650
    def test_simpletest_pass(self):
651 652
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' "%s"' % (AVOCADO, self.tmpdir, self.pass_script.path))
653
        result = process.run(cmd_line, ignore_status=True)
654
        expected_rc = exit_codes.AVOCADO_ALL_OK
655 656 657 658
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

659
    def test_simpletest_fail(self):
660 661
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (AVOCADO, self.tmpdir, self.fail_script.path))
662
        result = process.run(cmd_line, ignore_status=True)
663
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
664 665 666 667
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

668
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
669 670
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
671 672
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
673
        We can be pretty sure that a failtest should return immediately. Let's
674
        run 100 of them and assure they not take more than 30 seconds to run.
675

676 677
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
678
        """
679
        one_hundred = 'failtest.py ' * 100
680 681
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
                    % (AVOCADO, self.tmpdir, one_hundred))
682 683 684
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
685
        self.assertLess(actual_time, 30.0)
686
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
687 688 689
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

690 691 692
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
693 694 695 696 697
    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
698 699
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
700 701
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
                    % (AVOCADO, self.tmpdir, sleep_fail_sleep))
702 703 704
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
705
        self.assertLess(actual_time, 33.0)
706
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
707 708 709
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

710 711 712 713
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
714 715 716 717 718
        # simplewarning.sh calls "avocado" without specifying a path
        os.environ['PATH'] += ":" + os.path.join(basedir, 'scripts')
        # simplewarning.sh calls "avocado exec-path" which hasn't
        # access to an installed location for the libexec scripts
        os.environ['PATH'] += ":" + os.path.join(basedir, 'libexec')
719 720 721
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log'
                    % (AVOCADO, self.tmpdir))
722
        result = process.run(cmd_line, ignore_status=True)
723 724 725 726
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
727 728
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
729
        self.assertIn('WARN | Warning message (should cause this test to '
730
                      'finish with warning)', result.stdout, result)
731
        self.assertIn('ERROR| Error message (ordinary message not changing '
732
                      'the results)', result.stdout, result)
733

734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
    @unittest.skipIf(not GNU_ECHO_BINARY, "Uses echo as test")
    def test_fs_unfriendly_run(self):
        os.chdir(basedir)
        commands_path = os.path.join(self.tmpdir, "commands")
        script.make_script(commands_path, "echo '\"\\/|?*<>'")
        config_path = os.path.join(self.tmpdir, "config.conf")
        script.make_script(config_path,
                           "[sysinfo.collectibles]\ncommands = %s"
                           % commands_path)
        cmd_line = ("%s --show all --config %s run --job-results-dir %s "
                    "--sysinfo=on --external-runner %s -- \"'\\\"\\/|?*<>'\""
                    % (AVOCADO, config_path, self.tmpdir, GNU_ECHO_BINARY))
        result = process.run(cmd_line)
        self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "latest",
                                                    "test-results",
                                                    "1-\'________\'/")))
        self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "latest",
                                                    "sysinfo", "pre",
                                                    "echo \'________\'")))

        if html_capable():
            with open(os.path.join(self.tmpdir, "latest",
                                   "results.html")) as html_res:
                html_results = html_res.read()
            # test results should replace odd chars with "_"
            self.assertIn(os.path.join("test-results", "1-'________'"),
                          html_results)
            # sysinfo replaces "_" with " "
            self.assertIn("echo '________'", html_results)

764 765 766 767
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        os.chdir(test_base_dir)
768
        test_file_name = os.path.basename(self.pass_script.path)
769
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
770
                    ' "%s"' % (avocado_path, self.tmpdir, test_file_name))
771 772 773 774 775 776
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
777
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
778 779 780
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
781
    def test_kill_stopped_sleep(self):
782 783 784 785
        proc = aexpect.Expect("%s run 60 --job-results-dir %s "
                              "--external-runner %s --sysinfo=off "
                              "--job-timeout 3"
                              % (AVOCADO, self.tmpdir, SLEEP_BINARY))
786 787
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
788 789 790 791
        # We need pid of the avocado process, not the shell executing it
        avocado_shell = psutil.Process(proc.get_pid())
        avocado_proc = avocado_shell.children()[0]
        pid = avocado_proc.pid
792
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
793
        deadline = time.time() + 9
794 795 796
        while time.time() < deadline:
            if not proc.is_alive():
                break
797
            time.sleep(0.1)
798 799
        else:
            proc.kill(signal.SIGKILL)
800
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
801 802 803 804 805 806 807
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
808
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
809
                         "1.")
810 811

        sleep_dir = astring.string_to_safe_path("1-60")
812 813 814 815
        debug_log_path = os.path.join(self.tmpdir, "latest", "test-results",
                                      sleep_dir, "debug.log")

        debug_log = genio.read_file(debug_log_path)
816 817 818 819 820 821 822
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
823

824
    def tearDown(self):
825 826
        self.pass_script.remove()
        self.fail_script.remove()
827
        shutil.rmtree(self.tmpdir)
828 829


A
Amador Pahim 已提交
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873
class RunnerSimpleTestStatus(unittest.TestCase):

    def setUp(self):
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)

        self.config_file = script.TemporaryScript('avocado.conf',
                                                  "[simpletests.status]\n"
                                                  "warn_regex = ^WARN$\n"
                                                  "skip_regex = ^SKIP$\n")
        self.config_file.save()
        os.chdir(basedir)

    def test_simpletest_status(self):
        warn_script = script.TemporaryScript('avocado_warn.sh',
                                             "#!/bin/sh\necho WARN",
                                             'avocado_simpletest_'
                                             'functional')
        warn_script.save()
        cmd_line = ('%s --config %s run --job-results-dir %s --sysinfo=off'
                    ' %s --json -' % (AVOCADO, self.config_file.path,
                                      self.tmpdir, warn_script.path))
        result = process.system_output(cmd_line, ignore_status=True)
        json_results = json.loads(result)
        self.assertEquals(json_results['tests'][0]['status'], 'WARN')
        warn_script.remove()

        skip_script = script.TemporaryScript('avocado_skip.sh',
                                             "#!/bin/sh\necho SKIP",
                                             'avocado_simpletest_'
                                             'functional')
        skip_script.save()
        cmd_line = ('%s --config %s run --job-results-dir %s --sysinfo=off'
                    ' %s --json -' % (AVOCADO, self.config_file.path,
                                      self.tmpdir, skip_script.path))
        result = process.system_output(cmd_line, ignore_status=True)
        json_results = json.loads(result)
        self.assertEquals(json_results['tests'][0]['status'], 'SKIP')
        skip_script.remove()

    def tearDown(self):
        self.config_file.remove()
        shutil.rmtree(self.tmpdir)


874
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
875 876

    def setUp(self):
877
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
878 879
        self.pass_script = script.TemporaryScript(
            'pass',
880
            "exit 0",
881
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
882 883 884
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
885
            "exit 1",
886
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
887
        self.fail_script.save()
888
        os.chdir(basedir)
C
Cleber Rosa 已提交
889

890
    def test_externalrunner_pass(self):
891 892 893
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh %s'
                    % (AVOCADO, self.tmpdir, self.pass_script.path))
C
Cleber Rosa 已提交
894
        result = process.run(cmd_line, ignore_status=True)
895
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
896 897 898 899
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

900
    def test_externalrunner_fail(self):
901 902 903
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh %s'
                    % (AVOCADO, self.tmpdir, self.fail_script.path))
C
Cleber Rosa 已提交
904
        result = process.run(cmd_line, ignore_status=True)
905
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
906 907 908 909
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

910
    def test_externalrunner_chdir_no_testdir(self):
911 912 913
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh --external-runner-chdir=test %s'
                    % (AVOCADO, self.tmpdir, self.pass_script.path))
C
Cleber Rosa 已提交
914
        result = process.run(cmd_line, ignore_status=True)
915 916
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
917
        self.assertIn(expected_output, result.stderr)
918
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
919 920 921 922 923
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
924 925
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=%s' % (AVOCADO, self.tmpdir, TRUE_CMD))
926
        result = process.run(cmd_line, ignore_status=True)
927 928
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
929 930
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
931 932 933 934 935 936 937 938 939 940
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


941
class AbsPluginsTest(object):
942

943
    def setUp(self):
944
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
945
        os.chdir(basedir)
946

947 948 949 950 951 952
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

953
    def test_sysinfo_plugin(self):
954
        cmd_line = '%s sysinfo %s' % (AVOCADO, self.base_outputdir)
955
        result = process.run(cmd_line, ignore_status=True)
956
        expected_rc = exit_codes.AVOCADO_ALL_OK
957 958 959 960 961 962
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

963
    def test_list_plugin(self):
964
        cmd_line = '%s list' % AVOCADO
965
        result = process.run(cmd_line, ignore_status=True)
966
        expected_rc = exit_codes.AVOCADO_ALL_OK
967 968 969
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
970
        self.assertNotIn('No tests were found on current tests dir', result.stdout)
971

972
    def test_list_error_output(self):
973
        cmd_line = '%s list sbrubles' % AVOCADO
974
        result = process.run(cmd_line, ignore_status=True)
975
        expected_rc = exit_codes.AVOCADO_FAIL
976 977 978
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
979
        self.assertIn("Unable to resolve reference", result.stderr)
980

981 982 983 984 985 986 987
    def test_list_no_file_loader(self):
        cmd_line = ("%s list --loaders external --verbose -- "
                    "this-wont-be-matched" % AVOCADO)
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
                         "Avocado did not return rc %d:\n%s"
                         % (exit_codes.AVOCADO_ALL_OK, result))
988 989
        exp = ("Type    Test                 Tag(s)\n"
               "MISSING this-wont-be-matched \n\n"
990 991
               "TEST TYPES SUMMARY\n"
               "==================\n"
992
               "EXTERNAL: 0\n"
993 994 995 996
               "MISSING: 1\n")
        self.assertEqual(exp, result.stdout, "Stdout mismatch:\n%s\n\n%s"
                         % (exp, result))

997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
    def test_list_verbose_tags(self):
        """
        Runs list verbosely and check for tag related output
        """
        test = script.make_script(os.path.join(self.base_outputdir, 'test.py'),
                                  VALID_PYTHON_TEST_WITH_TAGS)
        cmd_line = ("%s list --loaders file --verbose %s" % (AVOCADO,
                                                             test))
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
                         "Avocado did not return rc %d:\n%s"
                         % (exit_codes.AVOCADO_ALL_OK, result))
        stdout_lines = result.stdout.splitlines()
        self.assertIn("Tag(s)", stdout_lines[0])
        full_test_name = "%s:MyTest.test" % test
1012 1013
        self.assertEqual("INSTRUMENTED %s BIG_TAG_NAME" % full_test_name,
                         stdout_lines[1])
1014 1015 1016
        self.assertIn("TEST TYPES SUMMARY", stdout_lines)
        self.assertIn("INSTRUMENTED: 1", stdout_lines)
        self.assertIn("TEST TAGS SUMMARY", stdout_lines)
1017
        self.assertEqual("BIG_TAG_NAME: 1", stdout_lines[-1])
1018

1019
    def test_plugin_list(self):
1020
        cmd_line = '%s plugins' % AVOCADO
1021
        result = process.run(cmd_line, ignore_status=True)
1022
        expected_rc = exit_codes.AVOCADO_ALL_OK
1023 1024 1025
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1026
        if sys.version_info[:2] >= (2, 7, 0):
1027
            self.assertNotIn('Disabled', result.stdout)
1028

1029
    def test_config_plugin(self):
1030
        cmd_line = '%s config --paginator off' % AVOCADO
1031
        result = process.run(cmd_line, ignore_status=True)
1032
        expected_rc = exit_codes.AVOCADO_ALL_OK
1033 1034 1035
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1036
        self.assertNotIn('Disabled', result.stdout)
1037 1038

    def test_config_plugin_datadir(self):
1039
        cmd_line = '%s config --datadir --paginator off' % AVOCADO
1040
        result = process.run(cmd_line, ignore_status=True)
1041
        expected_rc = exit_codes.AVOCADO_ALL_OK
1042 1043 1044
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1045
        self.assertNotIn('Disabled', result.stdout)
1046

1047
    def test_disable_plugin(self):
1048
        cmd_line = '%s plugins' % AVOCADO
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
1060
            cmd_line = '%s --config %s plugins' % (AVOCADO, config)
1061 1062 1063 1064 1065 1066 1067
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
1080
            cmd = ('%s --config %s run passtest.py --archive '
1081
                   '--job-results-dir %s --sysinfo=off'
1082
                   % (AVOCADO, config_path, self.base_outputdir))
1083 1084 1085 1086 1087 1088 1089 1090
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
1091
        if html_capable():
1092
            result_plugins.append("html")
1093
            result_outputs.append("results.html")
1094

1095
        cmd_line = '%s plugins' % AVOCADO
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
1118 1119
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

1131
    def test_Namespace_object_has_no_attribute(self):
1132
        cmd_line = '%s plugins' % AVOCADO
1133
        result = process.run(cmd_line, ignore_status=True)
1134
        expected_rc = exit_codes.AVOCADO_ALL_OK
1135 1136 1137
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1138
        self.assertNotIn("'Namespace' object has no attribute", result.stderr)
1139

1140

1141 1142 1143 1144
class ParseXMLError(Exception):
    pass


1145
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1146

1147
    def setUp(self):
1148
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
L
Lucas Meneghel Rodrigues 已提交
1149 1150 1151
        junit_xsd = os.path.join(os.path.dirname(__file__),
                                 os.path.pardir, ".data", 'junit-4.xsd')
        self.junit = os.path.abspath(junit_xsd)
1152 1153
        super(PluginsXunitTest, self).setUp()

1154
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1155
                      e_nnotfound, e_nfailures, e_nskip):
1156 1157
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (AVOCADO, self.tmpdir, testname))
1158 1159 1160 1161 1162 1163 1164
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1165
        except Exception as detail:
1166 1167 1168
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

1169
        with open(self.junit, 'rb') as f:
1170 1171
            xmlschema = etree.XMLSchema(etree.parse(f))

1172
        self.assertTrue(xmlschema.validate(etree.parse(BytesIO(xml_output))),
1173 1174 1175 1176
                        "Failed to validate against %s, message:\n%s" %
                        (self.junit,
                         xmlschema.error_log.filter_from_errors()))

1177 1178 1179 1180
        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1181 1182
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1200
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1201 1202 1203 1204
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1205
    def test_xunit_plugin_passtest(self):
1206
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1207
                           1, 0, 0, 0, 0)
1208 1209

    def test_xunit_plugin_failtest(self):
1210
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1211
                           1, 0, 0, 1, 0)
1212

1213
    def test_xunit_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1214
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1215
                           1, 0, 0, 0, 1)
1216

1217
    def test_xunit_plugin_errortest(self):
1218
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1219
                           1, 1, 0, 0, 0)
1220

1221 1222 1223 1224
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1225 1226 1227 1228 1229

class ParseJSONError(Exception):
    pass


1230
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1231

1232
    def setUp(self):
1233
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1234 1235
        super(PluginsJSONTest, self).setUp()

1236
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1237
                      e_nfailures, e_nskip, e_ncancel=0, external_runner=None):
1238 1239
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off --json - '
                    '--archive %s' % (AVOCADO, self.tmpdir, testname))
1240 1241
        if external_runner is not None:
            cmd_line += " --external-runner '%s'" % external_runner
1242 1243 1244 1245 1246 1247 1248
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1249
        except Exception as detail:
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1267 1268
        n_cancel = json_data['cancel']
        self.assertEqual(n_cancel, e_ncancel)
1269
        return json_data
1270

1271
    def test_json_plugin_passtest(self):
1272
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1273
                           1, 0, 0, 0)
1274 1275

    def test_json_plugin_failtest(self):
1276
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1277
                           1, 0, 1, 0)
1278

1279
    def test_json_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1280
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1281
                           1, 0, 0, 0, 1)
1282

1283
    def test_json_plugin_errortest(self):
1284
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1285
                           1, 1, 0, 0)
1286

1287
    @unittest.skipIf(not GNU_ECHO_BINARY, 'echo binary not available')
1288
    def test_ugly_echo_cmd(self):
1289
        data = self.run_and_check('"-ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
1290
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
1291
                                  0, 0, external_runner=GNU_ECHO_BINARY)
1292 1293
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1294
                         '1--ne foo\\\\n\\\'\\"\\\\nbar/baz')
1295 1296
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1297
                         "1--ne foo__n_'____nbar_baz")
1298

1299 1300 1301 1302
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1303

1304 1305
if __name__ == '__main__':
    unittest.main()