test_basic.py 50.6 KB
Newer Older
1
# This Python file uses the following encoding: utf-8
2 3
import aexpect
import glob
4
import json
5
import os
6
import re
7
import shutil
8
import signal
9
import sys
10
import tempfile
11
import time
12
import xml.dom.minidom
13
import zipfile
14
import unittest
15
import psutil
16 17

import pkg_resources
18

19
from avocado.core import exit_codes
20
from avocado.utils import astring
21 22
from avocado.utils import process
from avocado.utils import script
23
from avocado.utils import path as utils_path
24

25
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
26 27
basedir = os.path.abspath(basedir)

28 29 30 31
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
32 33
PASS_SHELL_CONTENTS = "exit 0"

34 35 36 37
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
38 39
FAIL_SHELL_CONTENTS = "exit 1"

40 41 42 43 44 45 46 47 48 49 50 51 52 53
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

54 55 56 57 58 59
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
60 61
        # Please do NOT ever use this, it's for unittesting only.
        self._Test__status = 'not supported'
62 63 64 65 66

    def test(self):
        pass
'''

67 68 69 70 71 72 73 74 75 76 77
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

78

79 80 81 82 83 84 85 86 87 88
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

89

90 91 92 93 94 95 96 97 98 99 100
DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


A
Amador Pahim 已提交
101
def probe_binary(binary):
102
    try:
A
Amador Pahim 已提交
103
        return utils_path.find_command(binary)
104
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
105 106
        return None

L
Lukáš Doktor 已提交
107

108
TRUE_CMD = probe_binary('true')
A
Amador Pahim 已提交
109
CC_BINARY = probe_binary('cc')
110 111 112 113 114 115 116

# On macOS, the default GNU coreutils installation (brew)
# installs the gnu utility versions with a g prefix. It still has the
# BSD versions of the core utilities installed on their expected paths
# but their behavior and flags are in most cases different.
GNU_ECHO_BINARY = probe_binary('echo')
if GNU_ECHO_BINARY is not None:
117 118 119 120
    if probe_binary('man') is not None:
        echo_manpage = process.run('man %s' % os.path.basename(GNU_ECHO_BINARY)).stdout
        if '-e' not in echo_manpage:
            GNU_ECHO_BINARY = probe_binary('gecho')
A
Amador Pahim 已提交
121 122
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
123 124


125 126
class RunnerOperationTest(unittest.TestCase):

127
    def setUp(self):
128
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
129

130 131 132
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
133 134 135
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
136

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

171 172
    def test_runner_all_ok(self):
        os.chdir(basedir)
173 174
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
175 176
        process.run(cmd_line)

177 178 179
    def test_runner_failfast(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
180 181
                    'passtest.py failtest.py passtest.py --failfast on' %
                    self.tmpdir)
182 183 184 185 186 187 188
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
189
    @unittest.skipIf(not CC_BINARY,
190
                     "C compiler is required by the underlying datadir.py test")
191 192
    def test_datadir_alias(self):
        os.chdir(basedir)
193 194 195 196 197 198 199 200 201
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
202 203
        process.run(cmd_line)

A
Amador Pahim 已提交
204
    @unittest.skipIf(not CC_BINARY,
205
                     "C compiler is required by the underlying datadir.py test")
206 207
    def test_datadir_noalias(self):
        os.chdir(basedir)
208 209
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
210 211
        process.run(cmd_line)

212 213
    def test_runner_noalias(self):
        os.chdir(basedir)
214 215
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
216 217
        process.run(cmd_line)

218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

233 234 235 236 237 238 239 240 241 242 243 244 245
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
246
            self.assertIn("Runner error occurred: Test reports unsupported",
247 248
                          results["tests"][0]["fail_reason"])

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

285 286
    def test_runner_tests_fail(self):
        os.chdir(basedir)
287 288
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
289
        result = process.run(cmd_line, ignore_status=True)
290
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
291 292 293 294 295
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
296 297
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
298
        result = process.run(cmd_line, ignore_status=True)
299 300
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
301 302 303 304 305
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

306 307
    def test_runner_doublefail(self):
        os.chdir(basedir)
308 309
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
310 311
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
312 313
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
314 315 316 317
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
318
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
319
                      "Cleanup exception not printed to log output")
320
        self.assertIn("TestFail: This test is supposed to fail",
321
                      output,
322
                      "Test did not fail with action exception:\n%s" % output)
323

324 325 326
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
327
                    "--json - uncaught_exception.py" % self.tmpdir)
328
        result = process.run(cmd_line, ignore_status=True)
329
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
330 331 332 333 334
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

335
    def test_fail_on_exception(self):
336 337
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
338
                    "--json - fail_on_exception.py" % self.tmpdir)
339
        result = process.run(cmd_line, ignore_status=True)
340
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
341 342 343 344 345
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

346 347
    def test_runner_timeout(self):
        os.chdir(basedir)
348 349
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
350 351
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
352
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
353
        unexpected_rc = exit_codes.AVOCADO_FAIL
354 355 356 357
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
358
        self.assertIn("Runner error occurred: Timeout reached", output,
359
                      "Timeout reached message not found in the output:\n%s" % output)
360 361
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
362

363 364 365
    @unittest.skipIf(os.environ.get("AVOCADO_CHECK_FULL") != "1",
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
366 367
    def test_runner_abort(self):
        os.chdir(basedir)
368 369
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
370
        result = process.run(cmd_line, ignore_status=True)
371
        output = result.stdout
372
        excerpt = 'Test died without reporting the status.'
373 374
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
375 376 377 378
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
379
        self.assertIn(excerpt, output)
380

381 382
    def test_silent_output(self):
        os.chdir(basedir)
383 384
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
385
        result = process.run(cmd_line, ignore_status=True)
386
        expected_rc = exit_codes.AVOCADO_ALL_OK
387 388
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
389
        self.assertEqual(result.stdout, expected_output)
390

391 392 393 394
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
395
        expected_rc = exit_codes.AVOCADO_FAIL
396
        expected_output = 'error: too few arguments'
397
        self.assertEqual(result.exit_status, expected_rc)
398
        self.assertIn(expected_output, result.stderr)
399

400 401
    def test_empty_test_list(self):
        os.chdir(basedir)
402
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s' % self.tmpdir
403
        result = process.run(cmd_line, ignore_status=True)
404
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
405 406
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
407
        self.assertEqual(result.exit_status, expected_rc)
408
        self.assertIn(expected_output, result.stderr)
409

410 411
    def test_not_found(self):
        os.chdir(basedir)
412
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s sbrubles' % self.tmpdir
413
        result = process.run(cmd_line, ignore_status=True)
414
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
415
        self.assertEqual(result.exit_status, expected_rc)
416 417
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
418

419
    def test_invalid_unique_id(self):
420 421
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s --force-job-id foobar passtest.py' % self.tmpdir)
422
        result = process.run(cmd_line, ignore_status=True)
423
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
424
        self.assertIn('needs to be a 40 digit hex', result.stderr)
425
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
426 427

    def test_valid_unique_id(self):
428
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
429 430
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
431
        result = process.run(cmd_line, ignore_status=True)
432
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
433
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
434
        self.assertIn('PASS', result.stdout)
435

436
    def test_automatic_unique_id(self):
437 438
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
439
        result = process.run(cmd_line, ignore_status=True)
440
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
441 442 443 444
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

445 446 447 448 449
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
450 451
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
452 453 454 455 456 457
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
458
                avocado_process.wait()
459 460 461 462
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

463 464
    def test_dry_run(self):
        os.chdir(basedir)
465
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
466
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
467
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
468 469 470 471 472
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
473
        self.assertIn(tempfile.gettempdir(), debuglog)   # Use tmp dir, not default location
474 475
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
476 477
        self.assertEqual(result['skip'], 4)
        for i in xrange(4):
478 479 480 481 482 483 484 485
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
486
            self.assertEqual(log.count(line), 4)
487

488 489 490 491
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
492 493
        cmd_line = ('./scripts/avocado --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (self.tmpdir, test)
494 495 496 497 498
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
499 500
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
501

A
Amador Pahim 已提交
502
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
L
Lukáš Doktor 已提交
503 504
    def test_read(self):
        os.chdir(basedir)
505 506
        cmd = "./scripts/avocado run --sysinfo=off --job-results-dir %s %s"
        cmd %= (self.tmpdir, READ_BINARY)
507
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
508 509 510 511 512
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

513 514 515
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

516

517 518 519
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
520
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
521 522 523

    def test_output_pass(self):
        os.chdir(basedir)
524 525
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
526
        result = process.run(cmd_line, ignore_status=True)
527
        expected_rc = exit_codes.AVOCADO_ALL_OK
528 529 530 531 532 533 534
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
535 536
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
537
        result = process.run(cmd_line, ignore_status=True)
538
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
539 540 541 542 543 544 545
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
546 547
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
548
        result = process.run(cmd_line, ignore_status=True)
549
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
550 551 552 553 554
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

A
Amador Pahim 已提交
555
    def test_output_cancel(self):
556
        os.chdir(basedir)
557
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
558
                    'cancelonsetup.py' % self.tmpdir)
559
        result = process.run(cmd_line, ignore_status=True)
560
        expected_rc = exit_codes.AVOCADO_ALL_OK
561 562 563
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
A
Amador Pahim 已提交
564 565
        self.assertIn('PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1',
                      result.stdout)
566

567 568
    @unittest.skipIf(not GNU_ECHO_BINARY,
                     'GNU style echo binary not available')
569 570
    def test_ugly_echo_cmd(self):
        os.chdir(basedir)
571 572
        cmd_line = ('./scripts/avocado run --external-runner "%s -ne" '
                    '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
573
                    ' --sysinfo=off  --show-job-log' %
574
                    (GNU_ECHO_BINARY, self.tmpdir))
575 576 577 578 579
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
580 581 582
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
583 584
        self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
585 586 587 588 589 590 591
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
592
                         '1-foo\\\\n\\\'\\"\\\\nbar_baz')
593

594
    def test_replay_skip_skipped(self):
595
        cmd = ("./scripts/avocado run --job-results-dir %s --json - "
A
Amador Pahim 已提交
596
               "cancelonsetup.py" % self.tmpdir)
597
        result = process.run(cmd)
598
        result = json.loads(result.stdout)
599
        jobid = str(result["job_id"])
600 601
        cmd = ("./scripts/avocado run --job-results-dir %s "
               "--replay %s --replay-test-status PASS") % (self.tmpdir, jobid)
602
        process.run(cmd)
603

604 605 606
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

607

608
class RunnerSimpleTest(unittest.TestCase):
609 610

    def setUp(self):
611
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
612
        self.pass_script = script.TemporaryScript(
613
            'ʊʋʉʈɑ ʅʛʌ',
614
            PASS_SCRIPT_CONTENTS,
615
            'avocado_simpletest_functional')
616
        self.pass_script.save()
L
Lukáš Doktor 已提交
617 618 619 620
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
621
        self.fail_script.save()
622

623
    def test_simpletest_pass(self):
624
        os.chdir(basedir)
L
Lukáš Doktor 已提交
625
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
626
                    ' "%s"' % (self.tmpdir, self.pass_script.path))
627
        result = process.run(cmd_line, ignore_status=True)
628
        expected_rc = exit_codes.AVOCADO_ALL_OK
629 630 631 632
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

633
    def test_simpletest_fail(self):
634
        os.chdir(basedir)
L
Lukáš Doktor 已提交
635 636
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
637
        result = process.run(cmd_line, ignore_status=True)
638
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
639 640 641 642
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

643 644
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
645
        We can be pretty sure that a failtest should return immediately. Let's
646
        run 100 of them and assure they not take more than 30 seconds to run.
647

648 649
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
650 651
        """
        os.chdir(basedir)
652
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
653 654
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
655 656 657
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
658
        self.assertLess(actual_time, 30.0)
659
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
660 661 662 663 664 665 666 667 668
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
669 670
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
671 672
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
673 674 675
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
676
        self.assertLess(actual_time, 33.0)
677
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
678 679 680
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

681 682 683 684 685
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
686 687
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
688
        result = process.run(cmd_line, ignore_status=True)
689 690 691 692
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
693 694
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
695
        self.assertIn('WARN | Warning message (should cause this test to '
696
                      'finish with warning)', result.stdout, result)
697
        self.assertIn('ERROR| Error message (ordinary message not changing '
698
                      'the results)', result.stdout, result)
699

700 701 702 703 704 705
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
706
                    ' "%s"' % (avocado_path, self.tmpdir, test_file_name))
707 708 709 710 711 712
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
713
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
714
    def test_kill_stopped_sleep(self):
715 716 717
        proc = aexpect.Expect("./scripts/avocado run 60 --job-results-dir %s "
                              "--external-runner %s --sysinfo=off --job-timeout 3"
                              % (self.tmpdir, SLEEP_BINARY))
718 719
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
720 721 722 723
        # We need pid of the avocado process, not the shell executing it
        avocado_shell = psutil.Process(proc.get_pid())
        avocado_proc = avocado_shell.children()[0]
        pid = avocado_proc.pid
724
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
725
        deadline = time.time() + 9
726 727 728
        while time.time() < deadline:
            if not proc.is_alive():
                break
729
            time.sleep(0.1)
730 731
        else:
            proc.kill(signal.SIGKILL)
732
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
733 734 735 736 737 738 739
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
740
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
741
                         "1.")
742 743

        sleep_dir = astring.string_to_safe_path("1-60")
744
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
745
                                 sleep_dir, "debug.log")
746
        debug_log = open(debug_log).read()
747 748 749 750 751 752 753
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
754

755
    def tearDown(self):
756 757
        self.pass_script.remove()
        self.fail_script.remove()
758
        shutil.rmtree(self.tmpdir)
759 760


761
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
762 763

    def setUp(self):
764
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
765 766 767
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
768
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
769 770 771 772
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
773
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
774 775
        self.fail_script.save()

776
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
777
        os.chdir(basedir)
778
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
779 780
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
781
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
782 783 784 785
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

786
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
787
        os.chdir(basedir)
788
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
789 790
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
791
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
792 793 794 795
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

796
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
797
        os.chdir(basedir)
798 799
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
800 801
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
802 803
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
804
        self.assertIn(expected_output, result.stderr)
805
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
806 807 808 809 810 811 812
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
813
                    '--external-runner=%s' % (self.tmpdir, TRUE_CMD))
814
        result = process.run(cmd_line, ignore_status=True)
815 816
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
817 818
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
819 820 821 822 823 824 825 826 827 828
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


829
class AbsPluginsTest(object):
830

831
    def setUp(self):
832
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
833

834 835 836 837 838 839
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

840 841 842 843
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
844
        expected_rc = exit_codes.AVOCADO_ALL_OK
845 846 847 848 849 850
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

851 852 853 854 855
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
856
        expected_rc = exit_codes.AVOCADO_ALL_OK
857 858 859 860 861
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

862 863 864 865 866
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
867
        expected_rc = exit_codes.AVOCADO_FAIL
868 869 870
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
871
        self.assertIn("Unable to resolve reference", output)
872

873 874 875 876 877
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
878
        expected_rc = exit_codes.AVOCADO_ALL_OK
879 880 881
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
882 883
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
884

885
    def test_config_plugin(self):
886
        os.chdir(basedir)
887
        cmd_line = './scripts/avocado config --paginator off'
888 889
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
890
        expected_rc = exit_codes.AVOCADO_ALL_OK
891 892 893 894 895 896 897
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
898
        cmd_line = './scripts/avocado config --datadir --paginator off'
899 900
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
901
        expected_rc = exit_codes.AVOCADO_ALL_OK
902 903 904 905 906
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
    def test_disable_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
            cmd_line = './scripts/avocado --config %s plugins' % config
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

929 930 931 932 933 934 935 936 937 938 939 940 941
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
            cmd = ('./scripts/avocado --config %s run passtest.py --archive '
942 943
                   '--job-results-dir %s --sysinfo=off'
                   % (config_path, self.base_outputdir))
944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
        try:
            pkg_resources.require('avocado_result_html')
            result_plugins.append("html")
            result_outputs.append("html/results.html")
        except pkg_resources.DistributionNotFound:
            pass

        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
983 984
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
985 986 987 988 989 990 991 992 993 994 995
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

996 997 998 999 1000
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
1001
        expected_rc = exit_codes.AVOCADO_ALL_OK
1002 1003 1004 1005 1006
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

1007

1008 1009 1010 1011
class ParseXMLError(Exception):
    pass


1012
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1013

1014
    def setUp(self):
1015
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1016 1017
        super(PluginsXunitTest, self).setUp()

1018
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1019
                      e_nnotfound, e_nfailures, e_nskip):
1020
        os.chdir(basedir)
L
Lukáš Doktor 已提交
1021 1022
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
1023 1024 1025 1026 1027 1028 1029
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1030
        except Exception as detail:
1031 1032 1033 1034 1035 1036 1037
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1038 1039
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1057
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1058 1059 1060 1061
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1062
    def test_xunit_plugin_passtest(self):
1063
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1064
                           1, 0, 0, 0, 0)
1065 1066

    def test_xunit_plugin_failtest(self):
1067
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1068
                           1, 0, 0, 1, 0)
1069

1070
    def test_xunit_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1071
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1072
                           1, 0, 0, 0, 1)
1073

1074
    def test_xunit_plugin_errortest(self):
1075
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1076
                           1, 1, 0, 0, 0)
1077

1078 1079 1080 1081
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1082 1083 1084 1085 1086

class ParseJSONError(Exception):
    pass


1087
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1088

1089
    def setUp(self):
1090
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1091 1092
        super(PluginsJSONTest, self).setUp()

1093
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1094
                      e_nfailures, e_nskip, e_ncancel=0, external_runner=None):
1095
        os.chdir(basedir)
1096 1097
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
1098 1099
        if external_runner is not None:
            cmd_line += " --external-runner '%s'" % external_runner
1100 1101 1102 1103 1104 1105 1106
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1107
        except Exception as detail:
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1125 1126
        n_cancel = json_data['cancel']
        self.assertEqual(n_cancel, e_ncancel)
1127
        return json_data
1128

1129
    def test_json_plugin_passtest(self):
1130
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1131
                           1, 0, 0, 0)
1132 1133

    def test_json_plugin_failtest(self):
1134
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1135
                           1, 0, 1, 0)
1136

1137
    def test_json_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1138
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1139
                           1, 0, 0, 0, 1)
1140

1141
    def test_json_plugin_errortest(self):
1142
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1143
                           1, 1, 0, 0)
1144

1145
    @unittest.skipIf(not GNU_ECHO_BINARY, 'echo binary not available')
1146
    def test_ugly_echo_cmd(self):
1147
        data = self.run_and_check('"-ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
1148
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
1149
                                  0, 0, external_runner=GNU_ECHO_BINARY)
1150 1151
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1152
                         '1--ne foo\\\\n\\\'\\"\\\\nbar/baz')
1153 1154
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1155
                         '1--ne foo\\\\n\\\'\\"\\\\nbar_baz')
1156

1157 1158 1159 1160
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1161

1162 1163
if __name__ == '__main__':
    unittest.main()