test_output.py 28.6 KB
Newer Older
1 2
import json
import tempfile
3
import os
4
import re
5
import shutil
6
import unittest
7
from xml.dom import minidom
8

9 10
import pkg_resources

11
from avocado.core import exit_codes
12
from avocado.core.output import TermSupport
13
from avocado.utils import genio
14
from avocado.utils import process
15
from avocado.utils import script
16
from avocado.utils import path as utils_path
17

18 19 20
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)

21 22
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")

23

24 25 26
PERL_TAP_PARSER_SNIPPET = """#!/bin/env perl
use TAP::Parser;

27
my $parser = TAP::Parser->new( { exec => ['%s', 'run', 'passtest.py', 'errortest.py', 'warntest.py', '--tap', '-', '--sysinfo', 'off', '--job-results-dir', '%%s'] } );
28 29 30 31 32 33 34

while ( my $result = $parser->next ) {
        $result->is_unknown && die "Unknown line \\"" . $result->as_string . "\\" in the TAP output!\n";
}
$parser->parse_errors == 0 || die "Parser errors!\n";
$parser->is_good_plan || die "Plan is not a good plan!\n";
$parser->plan eq '1..3' || die "Plan does not match what was expected!\n";
35
""" % AVOCADO
36 37


38 39 40 41 42 43
OUTPUT_TEST_CONTENT = """#!/bin/env python
import sys

from avocado import Test
from avocado.utils import process

44
print("top_print")
45 46 47 48 49 50 51
sys.stdout.write("top_stdout\\n")
sys.stderr.write("top_stderr\\n")
process.run("/bin/echo top_process")

class OutputTest(Test):
    def __init__(self, *args, **kwargs):
        super(OutputTest, self).__init__(*args, **kwargs)
52
        print("init_print")
53 54 55 56 57
        sys.stdout.write("init_stdout\\n")
        sys.stderr.write("init_stderr\\n")
        process.run("/bin/echo init_process")

    def test(self):
58
        print("test_print")
59 60
        sys.stdout.write("test_stdout\\n")
        sys.stderr.write("test_stderr\\n")
61 62
        process.run("/bin/echo -n test_process > /dev/stdout",
                    shell=True)
63 64
        process.run("/bin/echo -n __test_stderr__ > /dev/stderr",
                    shell=True)
65 66
        process.run("/bin/echo -n __test_stdout__ > /dev/stdout",
                    shell=True)
67 68

    def __del__(self):
69
        print("del_print")
70 71
        sys.stdout.write("del_stdout\\n")
        sys.stderr.write("del_stderr\\n")
72
        process.run("/bin/echo -n del_process")
73 74
"""

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
OUTPUT_MODE_NONE_CONTENT = r"""
import sys

from avocado import Test
from avocado.utils import process


class OutputCheckNone(Test):

    def test(self):
        cmd = "%s -c \"import sys; sys.%%s.write('%%s')\"" % sys.executable
        process.run(cmd % ('stdout', '__STDOUT_DONT_RECORD_CONTENT__'))
        process.run(cmd % ('stderr', '__STDERR_DONT_RECORD_CONTENT__'))
"""

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
OUTPUT_CHECK_ON_OFF_CONTENT = r"""
import sys

from avocado import Test
from avocado.utils import process


class OutputCheckOnOff(Test):

    def test(self):
        cmd = "%s -c \"import sys; sys.%%s.write('%%s')\"" % sys.executable
        # start with the default behavior
        process.run(cmd % ('stdout', '__STDOUT_CONTENT__'))
        process.run(cmd % ('stderr', '__STDERR_CONTENT__'))
        # now shift to no recording
        process.run(cmd % ('stdout', '__STDOUT_DONT_RECORD_CONTENT__'),
                    allow_output_check='none')
        process.run(cmd % ('stderr', '__STDERR_DONT_RECORD_CONTENT__'),
                    allow_output_check='none')
        # now check that the default behavior (recording) is effective
        process.run(cmd % ('stdout', '__STDOUT_DO_RECORD_CONTENT__'))
        process.run(cmd % ('stderr', '__STDERR_DO_RECORD_CONTENT__'))
"""

114

115 116 117 118 119 120 121 122
def image_output_uncapable():
    try:
        import PIL
        return False
    except ImportError:
        return True


123 124
def html_uncapable():
    try:
125
        pkg_resources.require('avocado-framework-plugin-result-html')
126 127 128 129 130
        return False
    except pkg_resources.DistributionNotFound:
        return True


131 132 133 134
def perl_tap_parser_uncapable():
    return os.system("perl -e 'use TAP::Parser;'") != 0


135 136 137 138 139 140 141 142
def missing_binary(binary):
    try:
        utils_path.find_command(binary)
        return False
    except utils_path.CmdNotFoundError:
        return True


143 144
class OutputTest(unittest.TestCase):

145
    def setUp(self):
146
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
147
        os.chdir(basedir)
148

149 150
    @unittest.skipIf(missing_binary('cc'),
                     "C compiler is required by the underlying doublefree.py test")
151
    def test_output_doublefree(self):
152 153
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'doublefree.py' % (AVOCADO, self.tmpdir))
154
        result = process.run(cmd_line, ignore_status=True)
155
        expected_rc = exit_codes.AVOCADO_ALL_OK
156 157 158 159 160 161 162 163 164
        output = result.stdout + result.stderr
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        bad_string = 'double free or corruption'
        self.assertNotIn(bad_string, output,
                         "Libc double free can be seen in avocado "
                         "doublefree output:\n%s" % output)

165 166 167 168
    def test_print_to_std(self):
        def _check_output(path, exps, name):
            i = 0
            end = len(exps)
169 170 171 172 173 174 175 176 177 178 179 180
            with open(path) as output_file:
                output_file_content = output_file.read()
                output_file.seek(0)
                for line in output_file:
                    if exps[i] in line:
                        i += 1
                        if i == end:
                            break
                self.assertEqual(i, end, "Failed to find %sth message from\n%s\n"
                                 "\nin the %s. Either it's missing or in wrong "
                                 "order.\n%s" % (i, "\n".join(exps), name,
                                                 output_file_content))
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
        test = script.Script(os.path.join(self.tmpdir, "output_test.py"),
                             OUTPUT_TEST_CONTENT)
        test.save()
        result = process.run("%s run --job-results-dir %s --sysinfo=off "
                             "--json - -- %s" % (AVOCADO, self.tmpdir, test))
        res = json.loads(result.stdout)
        joblog = res["debuglog"]
        exps = ["[stdout] top_print", "[stdout] top_stdout",
                "[stderr] top_stderr", "[stdout] top_process",
                "[stdout] init_print", "[stdout] init_stdout",
                "[stderr] init_stderr", "[stdout] init_process",
                "[stdout] test_print", "[stdout] test_stdout",
                "[stderr] test_stderr", "[stdout] test_process"]
        _check_output(joblog, exps, "job.log")
        testdir = res["tests"][0]["logdir"]
196 197 198 199 200 201
        with open(os.path.join(testdir, "stdout")) as stdout_file:
            self.assertEqual("test_print\ntest_stdout\ntest_process__test_stdout__",
                             stdout_file.read())
        with open(os.path.join(testdir, "stderr")) as stderr_file:
            self.assertEqual("test_stderr\n__test_stderr__",
                             stderr_file.read())
202 203 204 205 206 207 208 209 210 211 212 213

        # Now run the same test, but with combined output
        # combined output can not keep track of sys.stdout and sys.stdout
        # writes, as they will eventually be out of sync.  In fact,
        # the correct fix is to run the entire test process with redirected
        # stdout and stderr, and *not* play with sys.stdout and sys.stderr.
        # But this change will come later
        result = process.run("%s run --job-results-dir %s --sysinfo=off "
                             "--output-check-record=combined "
                             "--json - -- %s" % (AVOCADO, self.tmpdir, test))
        res = json.loads(result.stdout)
        testdir = res["tests"][0]["logdir"]
214 215 216
        with open(os.path.join(testdir, "output")) as output_file:
            self.assertEqual("test_process__test_stderr____test_stdout__",
                             output_file.read())
217

218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
    def test_check_record_no_module_default(self):
        """
        Checks that the `avocado.utils.process` module won't have a output
        check record mode (`OUTPUT_CHECK_RECORD_MODE`) set by default.

        The reason is that, if this is always set from the command
        line runner, we can't distinguish from a situation where the
        module level configuration should be applied as a fallback to
        the API parameter.  By leaving it unset by default, the command line
        option parameter value `none` will slightly change its behavior,
        meaning that it will explicitly disable output check record when
        asked to do so.
        """
        with script.Script(os.path.join(self.tmpdir, "output_mode_none.py"),
                           OUTPUT_MODE_NONE_CONTENT,
                           script.READ_ONLY_MODE) as test:
            cmd = ("%s run --job-results-dir %s --sysinfo=off "
                   "--json - --output-check-record none -- %s") % (AVOCADO,
                                                                   self.tmpdir,
                                                                   test.path)
            result = process.run(cmd)
            res = json.loads(result.stdout)
            testdir = res["tests"][0]["logdir"]
            for output_file in ('stdout', 'stderr', 'output'):
                output_file_path = os.path.join(testdir, output_file)
                self.assertTrue(os.path.exists(output_file_path))
                with open(output_file_path, 'r') as output:
                    self.assertEqual(output.read(), '')

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
    def test_check_on_off(self):
        """
        Checks that output will always be kept, but it will only make into
        the *test* stdout/stderr/output files when it's not explicitly disabled

        This control is defined as an API parameter, `allow_output_check`, so
        it should be possible to enable/disable it on each call.
        """
        with script.Script(os.path.join(self.tmpdir, "test_check_on_off.py"),
                           OUTPUT_CHECK_ON_OFF_CONTENT,
                           script.READ_ONLY_MODE) as test:
            cmd = ("%s run --job-results-dir %s --sysinfo=off "
                   "--json - -- %s") % (AVOCADO, self.tmpdir, test.path)
            result = process.run(cmd)
            res = json.loads(result.stdout)
            testdir = res["tests"][0]["logdir"]
            stdout_path = os.path.join(testdir, 'stdout')
            self.assertTrue(os.path.exists(stdout_path))
            with open(stdout_path, 'r') as stdout:
                self.assertEqual(stdout.read(),
                                 '__STDOUT_CONTENT____STDOUT_DO_RECORD_CONTENT__')
            stderr_path = os.path.join(testdir, 'stderr')
            self.assertTrue(os.path.exists(stderr_path))
            with open(stderr_path, 'r') as stderr:
                self.assertEqual(stderr.read(),
                                 '__STDERR_CONTENT____STDERR_DO_RECORD_CONTENT__')

274 275 276
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

277

278 279
class OutputPluginTest(unittest.TestCase):

280
    def setUp(self):
281
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
282
        os.chdir(basedir)
283

284 285
    def check_output_files(self, debug_log):
        base_dir = os.path.dirname(debug_log)
286 287 288
        json_output_path = os.path.join(base_dir, 'results.json')
        self.assertTrue(os.path.isfile(json_output_path))
        with open(json_output_path, 'r') as fp:
289
            json.load(fp)
290 291
        xunit_output_path = os.path.join(base_dir, 'results.xml')
        self.assertTrue(os.path.isfile(json_output_path))
292
        try:
293
            minidom.parse(xunit_output_path)
C
Cleber Rosa 已提交
294
        except Exception as details:
295
            xunit_output_content = genio.read_file(xunit_output_path)
296
            raise AssertionError("Unable to parse xunit output: %s\n\n%s"
297
                                 % (details, xunit_output_content))
298 299
        tap_output = os.path.join(base_dir, "results.tap")
        self.assertTrue(os.path.isfile(tap_output))
300
        tap = genio.read_file(tap_output)
301 302
        self.assertIn("..", tap)
        self.assertIn("\n# debug.log of ", tap)
303

304
    def test_output_incompatible_setup(self):
305 306
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--xunit - --json - passtest.py' % (AVOCADO, self.tmpdir))
307
        result = process.run(cmd_line, ignore_status=True)
308
        expected_rc = exit_codes.AVOCADO_FAIL
309 310 311
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
312 313 314 315 316 317 318
        error_regex = re.compile(r'avocado run: error: argument ((--json)|'
                                 '(--xunit)): Options ((--xunit --json)|'
                                 '(--json --xunit)) are trying to use stdout '
                                 'simultaneously\n')
        self.assertIsNotNone(error_regex.match(result.stderr),
                             "Missing error message from output:\n%s" %
                             result.stderr)
319

320 321
    @unittest.skipIf(html_uncapable(),
                     "Uncapable of Avocado Result HTML plugin")
322
    def test_output_incompatible_setup_2(self):
323 324
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--html - passtest.py' % (AVOCADO, self.tmpdir))
325
        result = process.run(cmd_line, ignore_status=True)
326
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
327 328 329 330 331 332 333 334
        output = result.stdout + result.stderr
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        error_excerpt = "HTML to stdout not supported"
        self.assertIn(error_excerpt, output,
                      "Missing excerpt error message from output:\n%s" % output)

335 336
    def test_output_compatible_setup(self):
        tmpfile = tempfile.mktemp()
337
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
338
                    '--journal --xunit %s --json - passtest.py' %
339
                    (AVOCADO, self.tmpdir, tmpfile))
340 341
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout + result.stderr
342
        expected_rc = exit_codes.AVOCADO_ALL_OK
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
        try:
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            # Check if we are producing valid outputs
            json.loads(output)
            minidom.parse(tmpfile)
        finally:
            try:
                os.remove(tmpfile)
            except OSError:
                pass

    def test_output_compatible_setup_2(self):
        tmpfile = tempfile.mktemp()
358
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
359
                    '--xunit - --json %s passtest.py' %
360
                    (AVOCADO, self.tmpdir, tmpfile))
361 362
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout + result.stderr
363
        expected_rc = exit_codes.AVOCADO_ALL_OK
364 365 366 367 368 369
        try:
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            # Check if we are producing valid outputs
            with open(tmpfile, 'r') as fp:
370 371 372
                json_results = json.load(fp)
                debug_log = json_results['debuglog']
                self.check_output_files(debug_log)
373 374 375 376 377 378 379
            minidom.parseString(output)
        finally:
            try:
                os.remove(tmpfile)
            except OSError:
                pass

380 381
    @unittest.skipIf(html_uncapable(),
                     "Uncapable of Avocado Result HTML plugin")
382
    def test_output_compatible_setup_3(self):
383 384 385
        tmpfile = tempfile.mktemp(prefix='avocado_' + __name__)
        tmpfile2 = tempfile.mktemp(prefix='avocado_' + __name__)
        tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
386
        tmpfile3 = os.path.join(tmpdir, "result.html")
387 388 389
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--xunit %s --json %s --html %s passtest.py'
                    % (AVOCADO, self.tmpdir, tmpfile, tmpfile2, tmpfile3))
390 391
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout + result.stderr
392
        expected_rc = exit_codes.AVOCADO_ALL_OK
393
        tmpdir_contents = os.listdir(tmpdir)
394 395 396
        self.assertEqual(len(tmpdir_contents), 1, "Html plugin generated "
                         "extra files in the result dir: %s"
                         % tmpdir_contents)
397 398 399 400
        try:
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
401
            self.assertNotEqual(output, "", "Output is empty")
402
            # Check if we are producing valid outputs
403 404 405 406 407 408 409 410 411
            with open(tmpfile2, 'r') as fp:
                json_results = json.load(fp)
                debug_log = json_results['debuglog']
                self.check_output_files(debug_log)
            minidom.parse(tmpfile)
        finally:
            try:
                os.remove(tmpfile)
                os.remove(tmpfile2)
412
                shutil.rmtree(tmpdir)
413 414 415 416 417 418
            except OSError:
                pass

    def test_output_compatible_setup_nooutput(self):
        tmpfile = tempfile.mktemp()
        tmpfile2 = tempfile.mktemp()
419
        # Verify --silent can be supplied as app argument
420 421 422
        cmd_line = ('%s --silent run --job-results-dir %s '
                    '--sysinfo=off --xunit %s --json %s passtest.py'
                    % (AVOCADO, self.tmpdir, tmpfile, tmpfile2))
423 424
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout + result.stderr
425
        expected_rc = exit_codes.AVOCADO_ALL_OK
426 427 428 429 430 431
        try:
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertEqual(output, "", "Output is not empty:\n%s" % output)
            # Check if we are producing valid outputs
432
            with open(tmpfile2, 'r') as fp:
433 434 435
                json_results = json.load(fp)
                debug_log = json_results['debuglog']
                self.check_output_files(debug_log)
436 437 438 439 440 441 442 443
            minidom.parse(tmpfile)
        finally:
            try:
                os.remove(tmpfile)
                os.remove(tmpfile2)
            except OSError:
                pass

444
    def test_nonprintable_chars(self):
445
        cmd_line = ("%s run --external-runner /bin/ls "
446
                    "'NON_EXISTING_FILE_WITH_NONPRINTABLE_CHARS_IN_HERE\x1b' "
447 448
                    "--job-results-dir %s --sysinfo=off"
                    % (AVOCADO, self.tmpdir))
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout + result.stderr
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        debug_log = None
        for line in output.splitlines():
            if "JOB LOG" in line:
                debug_log = line.split(':', 1)[-1].strip()
                break
        self.assertTrue(debug_log, "Unable to get JOB LOG from output:\n%s"
                        % output)
        self.check_output_files(debug_log)

464
    def test_show_job_log(self):
465 466
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'passtest.py --show-job-log' % (AVOCADO, self.tmpdir))
467
        result = process.run(cmd_line, ignore_status=True)
468
        expected_rc = exit_codes.AVOCADO_ALL_OK
469 470 471
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
472
        job_id_list = re.findall('Job ID: (.*)', result.stdout,
473
                                 re.MULTILINE)
474 475
        self.assertTrue(job_id_list, 'No Job ID in stdout:\n%s' %
                        result.stdout)
476 477
        job_id = job_id_list[0]
        self.assertEqual(len(job_id), 40)
478 479

    def test_silent_trumps_show_job_log(self):
480
        # Also verify --silent can be supplied as run option
481 482 483
        cmd_line = ('%s run --silent --job-results-dir %s '
                    '--sysinfo=off passtest.py --show-job-log'
                    % (AVOCADO, self.tmpdir))
484 485
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout + result.stderr
486
        expected_rc = exit_codes.AVOCADO_ALL_OK
487 488 489 490 491
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertEqual(output, "")

492
    def test_default_enabled_plugins(self):
493 494
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'passtest.py' % (AVOCADO, self.tmpdir))
495 496
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout + result.stderr
497
        expected_rc = exit_codes.AVOCADO_ALL_OK
498 499 500 501
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        output_lines = output.splitlines()
C
Cleber Rosa 已提交
502 503 504 505 506
        # The current human output produces 6 lines when running a single test,
        # with an optional 7th line when the HTML report generation is enabled
        self.assertGreaterEqual(len(output_lines), 6,
                                ('Basic human interface did not produce the '
                                 'expect output. Output produced: "%s"' % output))
507 508
        second_line = output_lines[1]
        debug_log = second_line.split()[-1]
509 510
        self.check_output_files(debug_log)

511 512 513
    def test_verify_whiteboard_save(self):
        tmpfile = tempfile.mktemp()
        try:
514 515 516 517
            config = os.path.join(self.tmpdir, "conf.ini")
            content = ("[datadir.paths]\nlogs_dir = %s"
                       % os.path.relpath(self.tmpdir, "."))
            script.Script(config, content).save()
518 519 520
            cmd_line = ('%s --config %s --show all run '
                        '--sysinfo=off whiteboard.py --json %s'
                        % (AVOCADO, config, tmpfile))
521
            result = process.run(cmd_line, ignore_status=True)
522
            expected_rc = exit_codes.AVOCADO_ALL_OK
523 524 525 526 527
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            with open(tmpfile, 'r') as fp:
                json_results = json.load(fp)
528 529 530
                logfile = json_results['tests'][0]['logfile']
                debug_dir = os.path.dirname(logfile)
                whiteboard_path = os.path.join(debug_dir, 'whiteboard')
531 532 533 534 535 536 537
                self.assertTrue(os.path.exists(whiteboard_path),
                                'Missing whiteboard file %s' % whiteboard_path)
        finally:
            try:
                os.remove(tmpfile)
            except OSError:
                pass
538

539
    @unittest.skipIf(image_output_uncapable(),
540
                     "Uncapable of generating images with PIL library")
541 542 543
    def test_gendata(self):
        tmpfile = tempfile.mktemp()
        try:
544
            cmd_line = ("%s run --job-results-dir %s "
545
                        "--sysinfo=off gendata.py --json %s" %
546
                        (AVOCADO, self.tmpdir, tmpfile))
547
            result = process.run(cmd_line, ignore_status=True)
548
            expected_rc = exit_codes.AVOCADO_ALL_OK
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            with open(tmpfile, 'r') as fp:
                json_results = json.load(fp)
                bsod_dir = None
                json_dir = None
                for test in json_results['tests']:
                    if "test_bsod" in test['url']:
                        bsod_dir = test['logfile']
                    elif "test_json" in test['url']:
                        json_dir = test['logfile']
                self.assertTrue(bsod_dir, "Failed to get test_bsod output "
                                "directory")
                self.assertTrue(json_dir, "Failed to get test_json output "
                                "directory")
                bsod_dir = os.path.join(os.path.dirname(bsod_dir), "data",
                                        "bsod.png")
                json_dir = os.path.join(os.path.dirname(json_dir), "data",
                                        "test.json")
                self.assertTrue(os.path.exists(bsod_dir), "File %s produced by"
                                "test does not exist" % bsod_dir)
                self.assertTrue(os.path.exists(json_dir), "File %s produced by"
                                "test does not exist" % json_dir)
        finally:
            try:
                os.remove(tmpfile)
            except OSError:
                pass

579 580 581
    def test_redirect_output(self):
        redirected_output_path = tempfile.mktemp()
        try:
582 583 584
            cmd_line = ('%s run --job-results-dir %s '
                        '--sysinfo=off passtest.py > %s'
                        % (AVOCADO, self.tmpdir, redirected_output_path))
585 586
            result = process.run(cmd_line, ignore_status=True, shell=True)
            output = result.stdout + result.stderr
587
            expected_rc = exit_codes.AVOCADO_ALL_OK
588 589 590
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
591 592
            self.assertEqual(output, '',
                             'After redirecting to file, output is not empty: %s' % output)
593 594 595
            with open(redirected_output_path, 'r') as redirected_output_file_obj:
                redirected_output = redirected_output_file_obj.read()
                for code in TermSupport.ESCAPE_CODES:
R
Rudá Moura 已提交
596
                    self.assertNotIn(code, redirected_output,
597 598 599 600 601 602 603 604
                                     'Found terminal support code %s in redirected output\n%s' %
                                     (code, redirected_output))
        finally:
            try:
                os.remove(redirected_output_path)
            except OSError:
                pass

605 606 607 608
    @unittest.skipIf(perl_tap_parser_uncapable(),
                     "Uncapable of using Perl TAP::Parser library")
    def test_tap_parser(self):
        perl_script = script.TemporaryScript("tap_parser.pl",
609 610
                                             PERL_TAP_PARSER_SNIPPET
                                             % self.tmpdir)
611 612 613
        perl_script.save()
        process.run("perl %s" % perl_script)

A
Amador Pahim 已提交
614
    def test_tap_totaltests(self):
615
        cmd_line = ("%s run passtest.py "
A
Amador Pahim 已提交
616 617
                    "-m examples/tests/sleeptest.py.data/sleeptest.yaml "
                    "--job-results-dir %s "
618
                    "--tap -" % (AVOCADO, self.tmpdir))
A
Amador Pahim 已提交
619 620 621 622 623
        result = process.run(cmd_line)
        expr = '1..4'
        self.assertIn(expr, result.stdout, "'%s' not found in:\n%s"
                      % (expr, result.stdout))

624
    def test_broken_pipe(self):
625
        cmd_line = "(%s run --help | whacky-unknown-command)" % AVOCADO
626 627
        result = process.run(cmd_line, shell=True, ignore_status=True,
                             env={"LC_ALL": "C"})
628 629 630 631 632 633 634 635 636
        expected_rc = 127
        self.assertEqual(result.exit_status, expected_rc,
                         ("avocado run to broken pipe did not return "
                          "rc %d:\n%s" % (expected_rc, result)))
        self.assertEqual(len(result.stderr.splitlines()), 1)
        self.assertIn("whacky-unknown-command", result.stderr)
        self.assertIn("not found", result.stderr)
        self.assertNotIn("Avocado crashed", result.stderr)

A
Amador Pahim 已提交
637 638 639
    def test_results_plugins_no_tests(self):
        cmd_line = ("%s run UNEXISTING --job-results-dir %s"
                    % (AVOCADO, self.tmpdir))
640 641
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_JOB_FAIL)
A
Amador Pahim 已提交
642 643 644 645 646 647 648 649 650 651

        xunit_results = os.path.join(self.tmpdir, 'latest', 'results.xml')
        self.assertFalse(os.path.exists(xunit_results))

        json_results = os.path.join(self.tmpdir, 'latest', 'results.json')
        self.assertFalse(os.path.exists(json_results))

        tap_results = os.path.join(self.tmpdir, 'latest', 'results.tap')
        self.assertFalse(os.path.exists(tap_results))

652 653 654 655
        # Check that no UI output was generated
        self.assertNotIn("RESULTS    : PASS ", result.stdout)
        self.assertNotIn("JOB TIME   :", result.stdout)

656 657 658
        # Check that plugins do not produce errors
        self.assertNotIn("Error running method ", result.stderr)

659 660 661
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

662

663 664
if __name__ == '__main__':
    unittest.main()