test_basic.py 51.2 KB
Newer Older
1
# This Python file uses the following encoding: utf-8
2 3
import aexpect
import glob
4
import json
5
import os
6
import re
7
import shutil
8
import signal
9
import sys
10
import tempfile
11
import time
12
import xml.dom.minidom
13
import zipfile
14
import unittest
15
import psutil
16
import pkg_resources
17

18 19 20
from lxml import etree
from StringIO import StringIO

21
from avocado.core import exit_codes
22
from avocado.utils import astring
23 24
from avocado.utils import process
from avocado.utils import script
25
from avocado.utils import path as utils_path
26

27
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
28 29
basedir = os.path.abspath(basedir)

30 31 32 33
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
34 35
PASS_SHELL_CONTENTS = "exit 0"

36 37 38 39
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
40 41
FAIL_SHELL_CONTENTS = "exit 1"

42 43 44 45 46 47 48 49 50 51 52 53 54 55
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

56 57 58 59 60 61
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
62 63
        # Please do NOT ever use this, it's for unittesting only.
        self._Test__status = 'not supported'
64 65 66 67 68

    def test(self):
        pass
'''

69 70 71 72 73 74 75 76 77 78 79
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

80

81 82 83 84 85 86 87 88 89 90
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

91

92 93 94 95 96 97 98 99 100 101 102
DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


A
Amador Pahim 已提交
103
def probe_binary(binary):
104
    try:
A
Amador Pahim 已提交
105
        return utils_path.find_command(binary)
106
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
107 108
        return None

L
Lukáš Doktor 已提交
109

110
TRUE_CMD = probe_binary('true')
A
Amador Pahim 已提交
111
CC_BINARY = probe_binary('cc')
112 113 114 115 116 117 118

# On macOS, the default GNU coreutils installation (brew)
# installs the gnu utility versions with a g prefix. It still has the
# BSD versions of the core utilities installed on their expected paths
# but their behavior and flags are in most cases different.
GNU_ECHO_BINARY = probe_binary('echo')
if GNU_ECHO_BINARY is not None:
119 120 121 122
    if probe_binary('man') is not None:
        echo_manpage = process.run('man %s' % os.path.basename(GNU_ECHO_BINARY)).stdout
        if '-e' not in echo_manpage:
            GNU_ECHO_BINARY = probe_binary('gecho')
A
Amador Pahim 已提交
123 124
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
125 126


127 128
class RunnerOperationTest(unittest.TestCase):

129
    def setUp(self):
130
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
131

132 133 134
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
135 136 137
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
138

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

173 174
    def test_runner_all_ok(self):
        os.chdir(basedir)
175 176
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
177 178
        process.run(cmd_line)

179 180 181
    def test_runner_failfast(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
182 183
                    'passtest.py failtest.py passtest.py --failfast on' %
                    self.tmpdir)
184 185 186 187 188 189 190
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
191
    @unittest.skipIf(not CC_BINARY,
192
                     "C compiler is required by the underlying datadir.py test")
193 194
    def test_datadir_alias(self):
        os.chdir(basedir)
195 196 197 198 199 200 201 202 203
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
204 205
        process.run(cmd_line)

A
Amador Pahim 已提交
206
    @unittest.skipIf(not CC_BINARY,
207
                     "C compiler is required by the underlying datadir.py test")
208 209
    def test_datadir_noalias(self):
        os.chdir(basedir)
210 211
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
212 213
        process.run(cmd_line)

214 215
    def test_runner_noalias(self):
        os.chdir(basedir)
216 217
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
218 219
        process.run(cmd_line)

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

235 236 237 238 239 240 241 242 243 244 245 246 247
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
248
            self.assertIn("Runner error occurred: Test reports unsupported",
249 250
                          results["tests"][0]["fail_reason"])

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

287 288
    def test_runner_tests_fail(self):
        os.chdir(basedir)
289 290
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
291
        result = process.run(cmd_line, ignore_status=True)
292
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
293 294 295 296 297
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
298 299
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
300
        result = process.run(cmd_line, ignore_status=True)
301 302
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
303 304 305 306 307
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

308 309
    def test_runner_doublefail(self):
        os.chdir(basedir)
310 311
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
312 313
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
314 315
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
316 317 318 319
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
320
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
321
                      "Cleanup exception not printed to log output")
322
        self.assertIn("TestFail: This test is supposed to fail",
323
                      output,
324
                      "Test did not fail with action exception:\n%s" % output)
325

326 327 328
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
329
                    "--json - uncaught_exception.py" % self.tmpdir)
330
        result = process.run(cmd_line, ignore_status=True)
331
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
332 333 334 335 336
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

337
    def test_fail_on_exception(self):
338 339
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
340
                    "--json - fail_on_exception.py" % self.tmpdir)
341
        result = process.run(cmd_line, ignore_status=True)
342
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
343 344 345 346 347
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

348 349
    def test_runner_timeout(self):
        os.chdir(basedir)
350 351
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
352 353
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
354
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
355
        unexpected_rc = exit_codes.AVOCADO_FAIL
356 357 358 359
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
360
        self.assertIn("Runner error occurred: Timeout reached", output,
361
                      "Timeout reached message not found in the output:\n%s" % output)
362 363
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
364

365 366 367
    @unittest.skipIf(os.environ.get("AVOCADO_CHECK_FULL") != "1",
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
368 369
    def test_runner_abort(self):
        os.chdir(basedir)
370 371
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
372
        result = process.run(cmd_line, ignore_status=True)
373
        output = result.stdout
374
        excerpt = 'Test died without reporting the status.'
375 376
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
377 378 379 380
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
381
        self.assertIn(excerpt, output)
382

383 384
    def test_silent_output(self):
        os.chdir(basedir)
385 386
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
387
        result = process.run(cmd_line, ignore_status=True)
388
        expected_rc = exit_codes.AVOCADO_ALL_OK
389 390
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
391
        self.assertEqual(result.stdout, expected_output)
392

393 394 395 396
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
397
        expected_rc = exit_codes.AVOCADO_FAIL
398
        expected_output = 'error: too few arguments'
399
        self.assertEqual(result.exit_status, expected_rc)
400
        self.assertIn(expected_output, result.stderr)
401

402 403
    def test_empty_test_list(self):
        os.chdir(basedir)
404
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s' % self.tmpdir
405
        result = process.run(cmd_line, ignore_status=True)
406
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
407 408
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
409
        self.assertEqual(result.exit_status, expected_rc)
410
        self.assertIn(expected_output, result.stderr)
411

412 413
    def test_not_found(self):
        os.chdir(basedir)
414
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s sbrubles' % self.tmpdir
415
        result = process.run(cmd_line, ignore_status=True)
416
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
417
        self.assertEqual(result.exit_status, expected_rc)
418 419
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
420

421
    def test_invalid_unique_id(self):
422 423
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s --force-job-id foobar passtest.py' % self.tmpdir)
424
        result = process.run(cmd_line, ignore_status=True)
425
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
426
        self.assertIn('needs to be a 40 digit hex', result.stderr)
427
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
428 429

    def test_valid_unique_id(self):
430
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
431 432
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
433
        result = process.run(cmd_line, ignore_status=True)
434
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
435
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
436
        self.assertIn('PASS', result.stdout)
437

438
    def test_automatic_unique_id(self):
439 440
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
441
        result = process.run(cmd_line, ignore_status=True)
442
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
443 444 445 446
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

447 448 449 450 451
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
452 453
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
454 455 456 457 458 459
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
460
                avocado_process.wait()
461 462 463 464
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

465 466
    def test_dry_run(self):
        os.chdir(basedir)
467
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
468
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
469
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
470 471 472 473 474
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
475
        self.assertIn(tempfile.gettempdir(), debuglog)   # Use tmp dir, not default location
476 477
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
478 479
        self.assertEqual(result['skip'], 4)
        for i in xrange(4):
480 481 482 483 484 485 486 487
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
488
            self.assertEqual(log.count(line), 4)
489

490 491 492 493
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
494 495
        cmd_line = ('./scripts/avocado --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (self.tmpdir, test)
496 497 498 499 500
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
501 502
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
503

A
Amador Pahim 已提交
504
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
L
Lukáš Doktor 已提交
505 506
    def test_read(self):
        os.chdir(basedir)
507 508
        cmd = "./scripts/avocado run --sysinfo=off --job-results-dir %s %s"
        cmd %= (self.tmpdir, READ_BINARY)
509
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
510 511 512 513 514
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

515 516 517
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

518

519 520 521
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
522
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
523 524 525

    def test_output_pass(self):
        os.chdir(basedir)
526 527
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
528
        result = process.run(cmd_line, ignore_status=True)
529
        expected_rc = exit_codes.AVOCADO_ALL_OK
530 531 532 533 534 535 536
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
537 538
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
539
        result = process.run(cmd_line, ignore_status=True)
540
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
541 542 543 544 545 546 547
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
548 549
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
550
        result = process.run(cmd_line, ignore_status=True)
551
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
552 553 554 555 556
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

A
Amador Pahim 已提交
557
    def test_output_cancel(self):
558
        os.chdir(basedir)
559
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
560
                    'cancelonsetup.py' % self.tmpdir)
561
        result = process.run(cmd_line, ignore_status=True)
562
        expected_rc = exit_codes.AVOCADO_ALL_OK
563 564 565
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
A
Amador Pahim 已提交
566 567
        self.assertIn('PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1',
                      result.stdout)
568

569 570
    @unittest.skipIf(not GNU_ECHO_BINARY,
                     'GNU style echo binary not available')
571 572
    def test_ugly_echo_cmd(self):
        os.chdir(basedir)
573 574
        cmd_line = ('./scripts/avocado run --external-runner "%s -ne" '
                    '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
575
                    ' --sysinfo=off  --show-job-log' %
576
                    (GNU_ECHO_BINARY, self.tmpdir))
577 578 579 580 581
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
582 583 584
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
585 586
        self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
587 588 589 590 591 592 593
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
594
                         '1-foo\\\\n\\\'\\"\\\\nbar_baz')
595

596
    def test_replay_skip_skipped(self):
597
        cmd = ("./scripts/avocado run --job-results-dir %s --json - "
A
Amador Pahim 已提交
598
               "cancelonsetup.py" % self.tmpdir)
599
        result = process.run(cmd)
600
        result = json.loads(result.stdout)
601
        jobid = str(result["job_id"])
602 603
        cmd = ("./scripts/avocado run --job-results-dir %s "
               "--replay %s --replay-test-status PASS") % (self.tmpdir, jobid)
604
        process.run(cmd)
605

606 607 608
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

609

610
class RunnerSimpleTest(unittest.TestCase):
611 612

    def setUp(self):
613
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
614
        self.pass_script = script.TemporaryScript(
615
            'ʊʋʉʈɑ ʅʛʌ',
616
            PASS_SCRIPT_CONTENTS,
617
            'avocado_simpletest_functional')
618
        self.pass_script.save()
L
Lukáš Doktor 已提交
619 620 621 622
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
623
        self.fail_script.save()
624

625
    def test_simpletest_pass(self):
626
        os.chdir(basedir)
L
Lukáš Doktor 已提交
627
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
628
                    ' "%s"' % (self.tmpdir, self.pass_script.path))
629
        result = process.run(cmd_line, ignore_status=True)
630
        expected_rc = exit_codes.AVOCADO_ALL_OK
631 632 633 634
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

635
    def test_simpletest_fail(self):
636
        os.chdir(basedir)
L
Lukáš Doktor 已提交
637 638
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
639
        result = process.run(cmd_line, ignore_status=True)
640
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
641 642 643 644
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

645 646
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
647
        We can be pretty sure that a failtest should return immediately. Let's
648
        run 100 of them and assure they not take more than 30 seconds to run.
649

650 651
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
652 653
        """
        os.chdir(basedir)
654
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
655 656
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
657 658 659
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
660
        self.assertLess(actual_time, 30.0)
661
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
662 663 664 665 666 667 668 669 670
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
671 672
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
673 674
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
675 676 677
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
678
        self.assertLess(actual_time, 33.0)
679
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
680 681 682
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

683 684 685 686 687
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
688 689
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
690
        result = process.run(cmd_line, ignore_status=True)
691 692 693 694
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
695 696
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
697
        self.assertIn('WARN | Warning message (should cause this test to '
698
                      'finish with warning)', result.stdout, result)
699
        self.assertIn('ERROR| Error message (ordinary message not changing '
700
                      'the results)', result.stdout, result)
701

702 703 704 705 706 707
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
708
                    ' "%s"' % (avocado_path, self.tmpdir, test_file_name))
709 710 711 712 713 714
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
715
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
716
    def test_kill_stopped_sleep(self):
717 718 719
        proc = aexpect.Expect("./scripts/avocado run 60 --job-results-dir %s "
                              "--external-runner %s --sysinfo=off --job-timeout 3"
                              % (self.tmpdir, SLEEP_BINARY))
720 721
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
722 723 724 725
        # We need pid of the avocado process, not the shell executing it
        avocado_shell = psutil.Process(proc.get_pid())
        avocado_proc = avocado_shell.children()[0]
        pid = avocado_proc.pid
726
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
727
        deadline = time.time() + 9
728 729 730
        while time.time() < deadline:
            if not proc.is_alive():
                break
731
            time.sleep(0.1)
732 733
        else:
            proc.kill(signal.SIGKILL)
734
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
735 736 737 738 739 740 741
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
742
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
743
                         "1.")
744 745

        sleep_dir = astring.string_to_safe_path("1-60")
746
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
747
                                 sleep_dir, "debug.log")
748
        debug_log = open(debug_log).read()
749 750 751 752 753 754 755
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
756

757
    def tearDown(self):
758 759
        self.pass_script.remove()
        self.fail_script.remove()
760
        shutil.rmtree(self.tmpdir)
761 762


763
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
764 765

    def setUp(self):
766
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
767 768 769
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
770
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
771 772 773 774
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
775
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
776 777
        self.fail_script.save()

778
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
779
        os.chdir(basedir)
780
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
781 782
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
783
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
784 785 786 787
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

788
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
789
        os.chdir(basedir)
790
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
791 792
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
793
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
794 795 796 797
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

798
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
799
        os.chdir(basedir)
800 801
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
802 803
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
804 805
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
806
        self.assertIn(expected_output, result.stderr)
807
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
808 809 810 811 812 813 814
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
815
                    '--external-runner=%s' % (self.tmpdir, TRUE_CMD))
816
        result = process.run(cmd_line, ignore_status=True)
817 818
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
819 820
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
821 822 823 824 825 826 827 828 829 830
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


831
class AbsPluginsTest(object):
832

833
    def setUp(self):
834
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
835

836 837 838 839 840 841
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

842 843 844 845
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
846
        expected_rc = exit_codes.AVOCADO_ALL_OK
847 848 849 850 851 852
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

853 854 855 856 857
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
858
        expected_rc = exit_codes.AVOCADO_ALL_OK
859 860 861 862 863
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

864 865 866 867 868
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
869
        expected_rc = exit_codes.AVOCADO_FAIL
870 871 872
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
873
        self.assertIn("Unable to resolve reference", output)
874

875 876 877 878 879
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
880
        expected_rc = exit_codes.AVOCADO_ALL_OK
881 882 883
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
884 885
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
886

887
    def test_config_plugin(self):
888
        os.chdir(basedir)
889
        cmd_line = './scripts/avocado config --paginator off'
890 891
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
892
        expected_rc = exit_codes.AVOCADO_ALL_OK
893 894 895 896 897 898 899
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
900
        cmd_line = './scripts/avocado config --datadir --paginator off'
901 902
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
903
        expected_rc = exit_codes.AVOCADO_ALL_OK
904 905 906 907 908
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
    def test_disable_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
            cmd_line = './scripts/avocado --config %s plugins' % config
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

931 932 933 934 935 936 937 938 939 940 941 942 943
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
            cmd = ('./scripts/avocado --config %s run passtest.py --archive '
944 945
                   '--job-results-dir %s --sysinfo=off'
                   % (config_path, self.base_outputdir))
946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
        try:
            pkg_resources.require('avocado_result_html')
            result_plugins.append("html")
            result_outputs.append("html/results.html")
        except pkg_resources.DistributionNotFound:
            pass

        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
985 986
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
987 988 989 990 991 992 993 994 995 996 997
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

998 999 1000 1001 1002
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
1003
        expected_rc = exit_codes.AVOCADO_ALL_OK
1004 1005 1006 1007 1008
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

1009

1010 1011 1012 1013
class ParseXMLError(Exception):
    pass


1014
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1015

1016
    def setUp(self):
1017
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1018 1019
        self.junit = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                     os.path.pardir, ".data", 'junit-4.xsd'))
1020 1021
        super(PluginsXunitTest, self).setUp()

1022
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1023
                      e_nnotfound, e_nfailures, e_nskip):
1024
        os.chdir(basedir)
L
Lukáš Doktor 已提交
1025 1026
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
1027 1028 1029 1030 1031 1032 1033
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1034
        except Exception as detail:
1035 1036 1037
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

1038 1039 1040 1041 1042 1043 1044 1045
        with open(self.junit, 'r') as f:
            xmlschema = etree.XMLSchema(etree.parse(f))

        self.assertTrue(xmlschema.validate(etree.parse(StringIO(xml_output))),
                        "Failed to validate against %s, message:\n%s" %
                        (self.junit,
                         xmlschema.error_log.filter_from_errors()))

1046 1047 1048 1049
        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1050 1051
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1069
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1070 1071 1072 1073
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1074
    def test_xunit_plugin_passtest(self):
1075
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1076
                           1, 0, 0, 0, 0)
1077 1078

    def test_xunit_plugin_failtest(self):
1079
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1080
                           1, 0, 0, 1, 0)
1081

1082
    def test_xunit_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1083
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1084
                           1, 0, 0, 0, 1)
1085

1086
    def test_xunit_plugin_errortest(self):
1087
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1088
                           1, 1, 0, 0, 0)
1089

1090 1091 1092 1093
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1094 1095 1096 1097 1098

class ParseJSONError(Exception):
    pass


1099
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1100

1101
    def setUp(self):
1102
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1103 1104
        super(PluginsJSONTest, self).setUp()

1105
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1106
                      e_nfailures, e_nskip, e_ncancel=0, external_runner=None):
1107
        os.chdir(basedir)
1108 1109
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
1110 1111
        if external_runner is not None:
            cmd_line += " --external-runner '%s'" % external_runner
1112 1113 1114 1115 1116 1117 1118
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1119
        except Exception as detail:
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1137 1138
        n_cancel = json_data['cancel']
        self.assertEqual(n_cancel, e_ncancel)
1139
        return json_data
1140

1141
    def test_json_plugin_passtest(self):
1142
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1143
                           1, 0, 0, 0)
1144 1145

    def test_json_plugin_failtest(self):
1146
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1147
                           1, 0, 1, 0)
1148

1149
    def test_json_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1150
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1151
                           1, 0, 0, 0, 1)
1152

1153
    def test_json_plugin_errortest(self):
1154
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1155
                           1, 1, 0, 0)
1156

1157
    @unittest.skipIf(not GNU_ECHO_BINARY, 'echo binary not available')
1158
    def test_ugly_echo_cmd(self):
1159
        data = self.run_and_check('"-ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
1160
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
1161
                                  0, 0, external_runner=GNU_ECHO_BINARY)
1162 1163
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1164
                         '1--ne foo\\\\n\\\'\\"\\\\nbar/baz')
1165 1166
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1167
                         '1--ne foo\\\\n\\\'\\"\\\\nbar_baz')
1168

1169 1170 1171 1172
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1173

1174 1175
if __name__ == '__main__':
    unittest.main()