test_basic.py 50.4 KB
Newer Older
1
# This Python file uses the following encoding: utf-8
2 3
import aexpect
import glob
4
import json
5
import os
6
import re
7
import shutil
8
import signal
9
import sys
10
import tempfile
11
import time
12
import xml.dom.minidom
13 14 15
import zipfile

import pkg_resources
16

17 18 19 20 21
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

22
from avocado.core import exit_codes
23
from avocado.utils import astring
24 25
from avocado.utils import process
from avocado.utils import script
26
from avocado.utils import path as utils_path
27

28
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
29 30 31
basedir = os.path.abspath(basedir)


32 33 34 35
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
36 37
PASS_SHELL_CONTENTS = "exit 0"

38 39 40 41
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
42 43
FAIL_SHELL_CONTENTS = "exit 1"

44 45 46 47 48 49 50 51 52 53 54 55 56 57
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

58 59 60 61 62 63 64 65 66 67 68 69
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
        self.status = 'not supported'

    def test(self):
        pass
'''

70 71 72 73 74 75 76 77 78 79 80
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

81

82 83 84 85 86 87 88 89 90 91
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

92

93 94 95 96 97 98 99 100 101 102 103
DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


A
Amador Pahim 已提交
104
def probe_binary(binary):
105
    try:
A
Amador Pahim 已提交
106
        return utils_path.find_command(binary)
107
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
108 109
        return None

L
Lukáš Doktor 已提交
110

A
Amador Pahim 已提交
111 112 113 114
CC_BINARY = probe_binary('cc')
ECHO_BINARY = probe_binary('echo')
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
115 116


117 118
class RunnerOperationTest(unittest.TestCase):

119
    def setUp(self):
120
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
121

122 123 124
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
125 126 127
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
128

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

163 164
    def test_runner_all_ok(self):
        os.chdir(basedir)
165 166
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
167 168
        process.run(cmd_line)

169 170 171
    def test_runner_failfast(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
172 173
                    'passtest.py failtest.py passtest.py --failfast on' %
                    self.tmpdir)
174 175 176 177 178 179 180
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
181
    @unittest.skipIf(not CC_BINARY,
182
                     "C compiler is required by the underlying datadir.py test")
183 184
    def test_datadir_alias(self):
        os.chdir(basedir)
185 186 187 188 189 190 191 192 193
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
194 195
        process.run(cmd_line)

A
Amador Pahim 已提交
196
    @unittest.skipIf(not CC_BINARY,
197
                     "C compiler is required by the underlying datadir.py test")
198 199
    def test_datadir_noalias(self):
        os.chdir(basedir)
200 201
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
202 203
        process.run(cmd_line)

204 205
    def test_runner_noalias(self):
        os.chdir(basedir)
206 207
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
208 209
        process.run(cmd_line)

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

225 226 227 228 229 230 231 232 233 234 235 236 237
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
238
            self.assertIn("Runner error occurred: Test reports unsupported",
239 240
                          results["tests"][0]["fail_reason"])

241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

277 278
    def test_runner_tests_fail(self):
        os.chdir(basedir)
279 280
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
281
        result = process.run(cmd_line, ignore_status=True)
282
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
283 284 285 286 287
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
288 289
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
290
        result = process.run(cmd_line, ignore_status=True)
291 292
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
293 294 295 296 297
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

298 299
    def test_runner_doublefail(self):
        os.chdir(basedir)
300 301
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
302 303
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
304 305
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
306 307 308 309
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
310
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
311
                      "Cleanup exception not printed to log output")
312
        self.assertIn("TestFail: This test is supposed to fail",
313
                      output,
314
                      "Test did not fail with action exception:\n%s" % output)
315

316 317 318
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
319
                    "--json - uncaught_exception.py" % self.tmpdir)
320
        result = process.run(cmd_line, ignore_status=True)
321
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
322 323 324 325 326
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

327
    def test_fail_on_exception(self):
328 329
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
330
                    "--json - fail_on_exception.py" % self.tmpdir)
331
        result = process.run(cmd_line, ignore_status=True)
332
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
333 334 335 336 337
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

338 339
    def test_runner_timeout(self):
        os.chdir(basedir)
340 341
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
342 343
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
344
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
345
        unexpected_rc = exit_codes.AVOCADO_FAIL
346 347 348 349
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
350
        self.assertIn("Runner error occurred: Timeout reached", output,
351
                      "Timeout reached message not found in the output:\n%s" % output)
352 353
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
354

355 356 357
    @unittest.skipIf(os.environ.get("AVOCADO_CHECK_FULL") != "1",
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
358 359
    def test_runner_abort(self):
        os.chdir(basedir)
360 361
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
362
        result = process.run(cmd_line, ignore_status=True)
363
        output = result.stdout
364
        excerpt = 'Test died without reporting the status.'
365 366
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
367 368 369 370
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
371
        self.assertIn(excerpt, output)
372

373 374
    def test_silent_output(self):
        os.chdir(basedir)
375 376
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
377
        result = process.run(cmd_line, ignore_status=True)
378
        expected_rc = exit_codes.AVOCADO_ALL_OK
379 380
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
381
        self.assertEqual(result.stdout, expected_output)
382

383 384 385 386
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
387
        expected_rc = exit_codes.AVOCADO_FAIL
388
        expected_output = 'error: too few arguments'
389
        self.assertEqual(result.exit_status, expected_rc)
390
        self.assertIn(expected_output, result.stderr)
391

392 393
    def test_empty_test_list(self):
        os.chdir(basedir)
394
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s' % self.tmpdir
395
        result = process.run(cmd_line, ignore_status=True)
396
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
397 398
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
399
        self.assertEqual(result.exit_status, expected_rc)
400
        self.assertIn(expected_output, result.stderr)
401

402 403
    def test_not_found(self):
        os.chdir(basedir)
404
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s sbrubles' % self.tmpdir
405
        result = process.run(cmd_line, ignore_status=True)
406
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
407
        self.assertEqual(result.exit_status, expected_rc)
408 409
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
410

411
    def test_invalid_unique_id(self):
412 413
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s --force-job-id foobar passtest.py' % self.tmpdir)
414
        result = process.run(cmd_line, ignore_status=True)
415
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
416
        self.assertIn('needs to be a 40 digit hex', result.stderr)
417
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
418 419

    def test_valid_unique_id(self):
420
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
421 422
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
423
        result = process.run(cmd_line, ignore_status=True)
424
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
425
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
426
        self.assertIn('PASS', result.stdout)
427

428
    def test_automatic_unique_id(self):
429 430
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
431
        result = process.run(cmd_line, ignore_status=True)
432
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
433 434 435 436
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

437 438 439
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
440
                    "--json - skip_outside_setup.py" % self.tmpdir)
441
        result = process.run(cmd_line, ignore_status=True)
442
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
443 444 445 446 447
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

448 449 450 451 452
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
453 454
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
455 456 457 458 459 460
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
461
                avocado_process.wait()
462 463 464 465
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

466 467
    def test_dry_run(self):
        os.chdir(basedir)
468
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
469
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
470
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
471 472 473 474 475 476 477 478
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
479 480
        self.assertEqual(result['skip'], 4)
        for i in xrange(4):
481 482 483 484 485 486 487 488
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
489
            self.assertEqual(log.count(line), 4)
490

491 492 493 494
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
495 496
        cmd_line = ('./scripts/avocado --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (self.tmpdir, test)
497 498 499 500 501
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
502 503
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
504

A
Amador Pahim 已提交
505
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
L
Lukáš Doktor 已提交
506 507
    def test_read(self):
        os.chdir(basedir)
508 509
        cmd = "./scripts/avocado run --sysinfo=off --job-results-dir %s %s"
        cmd %= (self.tmpdir, READ_BINARY)
510
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
511 512 513 514 515
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

516 517 518
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

519

520 521 522
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
523
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
524 525 526

    def test_output_pass(self):
        os.chdir(basedir)
527 528
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
529
        result = process.run(cmd_line, ignore_status=True)
530
        expected_rc = exit_codes.AVOCADO_ALL_OK
531 532 533 534 535 536 537
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
538 539
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
540
        result = process.run(cmd_line, ignore_status=True)
541
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
542 543 544 545 546 547 548
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
549 550
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
551
        result = process.run(cmd_line, ignore_status=True)
552
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
553 554 555 556 557 558 559
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
560 561
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'skiponsetup.py' % self.tmpdir)
562
        result = process.run(cmd_line, ignore_status=True)
563
        expected_rc = exit_codes.AVOCADO_ALL_OK
564 565 566
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
567 568
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
569

A
Amador Pahim 已提交
570
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
571 572
    def test_ugly_echo_cmd(self):
        os.chdir(basedir)
573 574
        cmd_line = ('./scripts/avocado run --external-runner "%s -ne" '
                    '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
575 576
                    ' --sysinfo=off  --show-job-log' %
                    (ECHO_BINARY, self.tmpdir))
577 578 579 580 581
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
582 583 584
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
585 586
        self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
587 588 589 590 591 592 593
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
594
                         '1-foo\\\\n\\\'\\"\\\\nbar_baz')
595

596
    def test_replay_skip_skipped(self):
597 598 599
        cmd = ("./scripts/avocado run --job-results-dir %s --json - "
               "skiponsetup.py" % self.tmpdir)
        result = process.run(cmd)
600
        result = json.loads(result.stdout)
601
        jobid = str(result["job_id"])
602 603
        cmd = ("./scripts/avocado run --job-results-dir %s "
               "--replay %s --replay-test-status PASS") % (self.tmpdir, jobid)
604
        process.run(cmd)
605

606 607 608
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

609

610
class RunnerSimpleTest(unittest.TestCase):
611 612

    def setUp(self):
613
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
614
        self.pass_script = script.TemporaryScript(
615
            'ʊʋʉʈɑ ʅʛʌ',
616
            PASS_SCRIPT_CONTENTS,
617
            'avocado_simpletest_functional')
618
        self.pass_script.save()
L
Lukáš Doktor 已提交
619 620 621 622
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
623
        self.fail_script.save()
624

625
    def test_simpletest_pass(self):
626
        os.chdir(basedir)
L
Lukáš Doktor 已提交
627
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
628
                    ' "%s"' % (self.tmpdir, self.pass_script.path))
629
        result = process.run(cmd_line, ignore_status=True)
630
        expected_rc = exit_codes.AVOCADO_ALL_OK
631 632 633 634
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

635
    def test_simpletest_fail(self):
636
        os.chdir(basedir)
L
Lukáš Doktor 已提交
637 638
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
639
        result = process.run(cmd_line, ignore_status=True)
640
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
641 642 643 644
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

645 646
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
647
        We can be pretty sure that a failtest should return immediately. Let's
648
        run 100 of them and assure they not take more than 30 seconds to run.
649

650 651
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
652 653
        """
        os.chdir(basedir)
654
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
655 656
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
657 658 659
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
660
        self.assertLess(actual_time, 30.0)
661
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
662 663 664 665 666 667 668 669 670
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
671 672
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
673 674
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
675 676 677
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
678
        self.assertLess(actual_time, 33.0)
679
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
680 681 682
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

683 684 685 686 687
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
688 689
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
690
        result = process.run(cmd_line, ignore_status=True)
691 692 693 694
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
695 696
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
697
        self.assertIn('WARN | Warning message (should cause this test to '
698
                      'finish with warning)', result.stdout, result)
699
        self.assertIn('ERROR| Error message (ordinary message not changing '
700
                      'the results)', result.stdout, result)
701

702 703 704 705 706 707
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
708
                    ' "%s"' % (avocado_path, self.tmpdir, test_file_name))
709 710 711 712 713 714
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
715
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
716
    def test_kill_stopped_sleep(self):
717 718 719
        proc = aexpect.Expect("./scripts/avocado run 60 --job-results-dir %s "
                              "--external-runner %s --sysinfo=off --job-timeout 3"
                              % (self.tmpdir, SLEEP_BINARY))
720 721 722 723 724
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
725
        deadline = time.time() + 9
726 727 728
        while time.time() < deadline:
            if not proc.is_alive():
                break
729
            time.sleep(0.1)
730 731
        else:
            proc.kill(signal.SIGKILL)
732
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
733 734 735 736 737 738 739
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
740
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
741
                         "1.")
742 743

        sleep_dir = astring.string_to_safe_path("1-60")
744
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
745
                                 sleep_dir, "debug.log")
746
        debug_log = open(debug_log).read()
747 748 749 750 751 752 753
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
754

755
    def tearDown(self):
756 757
        self.pass_script.remove()
        self.fail_script.remove()
758
        shutil.rmtree(self.tmpdir)
759 760


761
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
762 763

    def setUp(self):
764
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
765 766 767
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
768
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
769 770 771 772
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
773
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
774 775
        self.fail_script.save()

776
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
777
        os.chdir(basedir)
778
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
779 780
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
781
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
782 783 784 785
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

786
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
787
        os.chdir(basedir)
788
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
789 790
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
791
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
792 793 794 795
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

796
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
797
        os.chdir(basedir)
798 799
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
800 801
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
802 803
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
804
        self.assertIn(expected_output, result.stderr)
805
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
806 807 808 809 810 811 812 813 814
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
815 816
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
817 818
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
819 820 821 822 823 824 825 826 827 828
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


829
class AbsPluginsTest(object):
830

831
    def setUp(self):
832
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
833

834 835 836 837 838 839
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

840 841 842 843
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
844
        expected_rc = exit_codes.AVOCADO_ALL_OK
845 846 847 848 849 850
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

851 852 853 854 855
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
856
        expected_rc = exit_codes.AVOCADO_ALL_OK
857 858 859 860 861
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

862 863 864 865 866
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
867
        expected_rc = exit_codes.AVOCADO_FAIL
868 869 870
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
871
        self.assertIn("Unable to resolve reference", output)
872

873 874 875 876 877
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
878
        expected_rc = exit_codes.AVOCADO_ALL_OK
879 880 881
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
882 883
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
884

885
    def test_config_plugin(self):
886
        os.chdir(basedir)
887
        cmd_line = './scripts/avocado config --paginator off'
888 889
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
890
        expected_rc = exit_codes.AVOCADO_ALL_OK
891 892 893 894 895 896 897
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
898
        cmd_line = './scripts/avocado config --datadir --paginator off'
899 900
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
901
        expected_rc = exit_codes.AVOCADO_ALL_OK
902 903 904 905 906
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
    def test_disable_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
            cmd_line = './scripts/avocado --config %s plugins' % config
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

929 930 931 932 933 934 935 936 937 938 939 940 941
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
            cmd = ('./scripts/avocado --config %s run passtest.py --archive '
942 943
                   '--job-results-dir %s --sysinfo=off'
                   % (config_path, self.base_outputdir))
944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
        try:
            pkg_resources.require('avocado_result_html')
            result_plugins.append("html")
            result_outputs.append("html/results.html")
        except pkg_resources.DistributionNotFound:
            pass

        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
983 984
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
985 986 987 988 989 990 991 992 993 994 995
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

996 997 998 999 1000
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
1001
        expected_rc = exit_codes.AVOCADO_ALL_OK
1002 1003 1004 1005 1006
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

1007

1008 1009 1010 1011
class ParseXMLError(Exception):
    pass


1012
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1013

1014
    def setUp(self):
1015
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1016 1017
        super(PluginsXunitTest, self).setUp()

1018
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1019
                      e_nnotfound, e_nfailures, e_nskip):
1020
        os.chdir(basedir)
L
Lukáš Doktor 已提交
1021 1022
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
1023 1024 1025 1026 1027 1028 1029
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1030
        except Exception as detail:
1031 1032 1033 1034 1035 1036 1037
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1038 1039
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1057
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1058 1059 1060 1061
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1062
    def test_xunit_plugin_passtest(self):
1063
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1064
                           1, 0, 0, 0, 0)
1065 1066

    def test_xunit_plugin_failtest(self):
1067
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1068
                           1, 0, 0, 1, 0)
1069

1070
    def test_xunit_plugin_skiponsetuptest(self):
1071
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1072
                           1, 0, 0, 0, 1)
1073

1074
    def test_xunit_plugin_errortest(self):
1075
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1076
                           1, 1, 0, 0, 0)
1077

1078 1079 1080 1081
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1082 1083 1084 1085 1086

class ParseJSONError(Exception):
    pass


1087
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1088

1089
    def setUp(self):
1090
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1091 1092
        super(PluginsJSONTest, self).setUp()

1093
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1094
                      e_nfailures, e_nskip, external_runner=None):
1095
        os.chdir(basedir)
1096 1097
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
1098 1099
        if external_runner is not None:
            cmd_line += " --external-runner '%s'" % external_runner
1100 1101 1102 1103 1104 1105 1106
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1107
        except Exception as detail:
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1125
        return json_data
1126

1127
    def test_json_plugin_passtest(self):
1128
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1129
                           1, 0, 0, 0)
1130 1131

    def test_json_plugin_failtest(self):
1132
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1133
                           1, 0, 1, 0)
1134

1135
    def test_json_plugin_skiponsetuptest(self):
1136
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1137
                           1, 0, 0, 1)
1138

1139
    def test_json_plugin_errortest(self):
1140
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1141
                           1, 1, 0, 0)
1142

A
Amador Pahim 已提交
1143
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
1144
    def test_ugly_echo_cmd(self):
1145
        data = self.run_and_check('"-ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
1146
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
1147
                                  0, 0, ECHO_BINARY)
1148 1149
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1150
                         '1--ne foo\\\\n\\\'\\"\\\\nbar/baz')
1151 1152
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1153
                         '1--ne foo\\\\n\\\'\\"\\\\nbar_baz')
1154

1155 1156 1157 1158
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1159

1160 1161
if __name__ == '__main__':
    unittest.main()