test_basic.py 50.4 KB
Newer Older
1 2
import aexpect
import glob
3
import json
4
import os
5
import re
6
import shutil
7
import signal
8
import sys
9
import tempfile
10
import time
11
import xml.dom.minidom
12 13 14
import zipfile

import pkg_resources
15

16 17 18 19 20
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

21
from avocado.core import exit_codes
22
from avocado.utils import astring
23 24
from avocado.utils import process
from avocado.utils import script
25
from avocado.utils import path as utils_path
26

27
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
28 29 30
basedir = os.path.abspath(basedir)


31 32 33 34
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
35 36
PASS_SHELL_CONTENTS = "exit 0"

37 38 39 40
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
41 42
FAIL_SHELL_CONTENTS = "exit 1"

43 44 45 46 47 48 49 50 51 52 53 54 55 56
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

57 58 59 60 61 62 63 64 65 66 67 68
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
        self.status = 'not supported'

    def test(self):
        pass
'''

69 70 71 72 73 74 75 76 77 78 79
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

80

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


A
Amador Pahim 已提交
102
def probe_binary(binary):
103
    try:
A
Amador Pahim 已提交
104
        return utils_path.find_command(binary)
105
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
106 107 108 109 110 111
        return None

CC_BINARY = probe_binary('cc')
ECHO_BINARY = probe_binary('echo')
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
112 113


114 115
class RunnerOperationTest(unittest.TestCase):

116
    def setUp(self):
117
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
118

119 120 121
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
122 123 124
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
125

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

160 161
    def test_runner_all_ok(self):
        os.chdir(basedir)
162 163
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
164 165
        process.run(cmd_line)

166 167 168
    def test_runner_failfast(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
169 170
                    'passtest.py failtest.py passtest.py --failfast on' %
                    self.tmpdir)
171 172 173 174 175 176 177
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
178
    @unittest.skipIf(not CC_BINARY,
179
                     "C compiler is required by the underlying datadir.py test")
180 181
    def test_datadir_alias(self):
        os.chdir(basedir)
182 183 184 185 186 187 188 189 190
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
191 192
        process.run(cmd_line)

A
Amador Pahim 已提交
193
    @unittest.skipIf(not CC_BINARY,
194
                     "C compiler is required by the underlying datadir.py test")
195 196
    def test_datadir_noalias(self):
        os.chdir(basedir)
197 198
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
199 200
        process.run(cmd_line)

201 202
    def test_runner_noalias(self):
        os.chdir(basedir)
203 204
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
205 206
        process.run(cmd_line)

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

222 223 224 225 226 227 228 229 230 231 232 233 234
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
235
            self.assertIn("Runner error occurred: Test reports unsupported",
236 237
                          results["tests"][0]["fail_reason"])

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

274 275
    def test_runner_tests_fail(self):
        os.chdir(basedir)
276 277
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
278
        result = process.run(cmd_line, ignore_status=True)
279
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
280 281 282 283 284
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
285 286
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
287
        result = process.run(cmd_line, ignore_status=True)
288 289
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
290 291 292 293 294
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

295 296
    def test_runner_doublefail(self):
        os.chdir(basedir)
297 298
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
299 300
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
301 302
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
303 304 305 306
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
307
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
308
                      "Cleanup exception not printed to log output")
309
        self.assertIn("TestFail: This test is supposed to fail",
310
                      output,
311
                      "Test did not fail with action exception:\n%s" % output)
312

313 314 315
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
316
                    "--json - uncaught_exception.py" % self.tmpdir)
317
        result = process.run(cmd_line, ignore_status=True)
318
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
319 320 321 322 323
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

324
    def test_fail_on_exception(self):
325 326
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
327
                    "--json - fail_on_exception.py" % self.tmpdir)
328
        result = process.run(cmd_line, ignore_status=True)
329
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
330 331 332 333 334
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

335 336
    def test_runner_timeout(self):
        os.chdir(basedir)
337 338
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
339 340
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
341
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
342
        unexpected_rc = exit_codes.AVOCADO_FAIL
343 344 345 346
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
347
        self.assertIn("Runner error occurred: Timeout reached", output,
348
                      "Timeout reached message not found in the output:\n%s" % output)
349 350
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
351

352 353
    def test_runner_abort(self):
        os.chdir(basedir)
354 355
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
356
        result = process.run(cmd_line, ignore_status=True)
357
        output = result.stdout
358
        excerpt = 'Test died without reporting the status.'
359 360
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
361 362 363 364
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
365
        self.assertIn(excerpt, output)
366

367 368
    def test_silent_output(self):
        os.chdir(basedir)
369 370
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
371
        result = process.run(cmd_line, ignore_status=True)
372
        expected_rc = exit_codes.AVOCADO_ALL_OK
373 374
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
375
        self.assertEqual(result.stdout, expected_output)
376

377 378 379 380
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
381
        expected_rc = exit_codes.AVOCADO_FAIL
382
        expected_output = 'error: too few arguments'
383
        self.assertEqual(result.exit_status, expected_rc)
384
        self.assertIn(expected_output, result.stderr)
385

386 387
    def test_empty_test_list(self):
        os.chdir(basedir)
388
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s' % self.tmpdir
389
        result = process.run(cmd_line, ignore_status=True)
390
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
391 392
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
393
        self.assertEqual(result.exit_status, expected_rc)
394
        self.assertIn(expected_output, result.stderr)
395

396 397
    def test_not_found(self):
        os.chdir(basedir)
398
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s sbrubles' % self.tmpdir
399
        result = process.run(cmd_line, ignore_status=True)
400
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
401
        self.assertEqual(result.exit_status, expected_rc)
402 403
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
404

405
    def test_invalid_unique_id(self):
406 407
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s --force-job-id foobar passtest.py' % self.tmpdir)
408
        result = process.run(cmd_line, ignore_status=True)
409
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
410
        self.assertIn('needs to be a 40 digit hex', result.stderr)
411
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
412 413

    def test_valid_unique_id(self):
414
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
415 416
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
417
        result = process.run(cmd_line, ignore_status=True)
418
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
419
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
420
        self.assertIn('PASS', result.stdout)
421

422
    def test_automatic_unique_id(self):
423 424
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
425
        result = process.run(cmd_line, ignore_status=True)
426
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
427 428 429 430
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

431 432 433
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
434
                    "--json - skip_outside_setup.py" % self.tmpdir)
435
        result = process.run(cmd_line, ignore_status=True)
436
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
437 438 439 440 441
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

442 443 444 445 446
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
447 448
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
449 450 451 452 453 454
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
455
                avocado_process.wait()
456 457 458 459
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

460 461
    def test_dry_run(self):
        os.chdir(basedir)
462
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
463
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
464
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
465 466 467 468 469 470 471 472
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
473 474
        self.assertEqual(result['skip'], 4)
        for i in xrange(4):
475 476 477 478 479 480 481 482
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
483
            self.assertEqual(log.count(line), 4)
484

485 486 487 488
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
489 490
        cmd_line = ('./scripts/avocado --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (self.tmpdir, test)
491 492 493 494 495
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
496 497
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
498

A
Amador Pahim 已提交
499
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
L
Lukáš Doktor 已提交
500 501
    def test_read(self):
        os.chdir(basedir)
502 503 504
        cmd = "./scripts/avocado run --job-results-dir %s %s" % (self.tmpdir,
                                                                 READ_BINARY)
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
505 506 507 508 509
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

510 511 512
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

513

514 515 516
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
517
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
518 519 520

    def test_output_pass(self):
        os.chdir(basedir)
521 522
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
523
        result = process.run(cmd_line, ignore_status=True)
524
        expected_rc = exit_codes.AVOCADO_ALL_OK
525 526 527 528 529 530 531
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
532 533
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
534
        result = process.run(cmd_line, ignore_status=True)
535
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
536 537 538 539 540 541 542
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
543 544
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
545
        result = process.run(cmd_line, ignore_status=True)
546
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
547 548 549 550 551 552 553
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
554 555
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'skiponsetup.py' % self.tmpdir)
556
        result = process.run(cmd_line, ignore_status=True)
557
        expected_rc = exit_codes.AVOCADO_ALL_OK
558 559 560
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
561 562
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
563

A
Amador Pahim 已提交
564
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
565 566
    def test_ugly_echo_cmd(self):
        os.chdir(basedir)
A
Amador Pahim 已提交
567
        cmd_line = ('./scripts/avocado run "%s -ne '
568
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
569 570
                    ' --sysinfo=off  --show-job-log' %
                    (ECHO_BINARY, self.tmpdir))
571 572 573 574 575
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
576 577 578
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
A
Amador Pahim 已提交
579 580
        self.assertIn('PASS 1-%s -ne foo\\\\n\\\'\\"\\\\nbar/baz' %
                      ECHO_BINARY, result.stdout, result)
581 582 583 584 585 586 587
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
A
Amador Pahim 已提交
588 589
                         '1-%s -ne foo\\\\n\\\'\\"\\\\nbar_baz' %
                         ECHO_BINARY.replace('/', '_'))
590

591
    def test_replay_skip_skipped(self):
592 593 594
        cmd = ("./scripts/avocado run --job-results-dir %s --json - "
               "skiponsetup.py" % self.tmpdir)
        result = process.run(cmd)
595
        result = json.loads(result.stdout)
596 597 598 599 600 601 602
        jobid = str(result["job_id"])
        replay_data_dir = os.path.dirname(str(result["debuglog"]))
        cmd = ("./scripts/avocado run --job-results-dir %s --replay-data-dir "
               "%s --replay %s --replay-test-status PASS") % (self.tmpdir,
                                                              replay_data_dir,
                                                              jobid)
        process.run(cmd)
603

604 605 606
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

607

608
class RunnerSimpleTest(unittest.TestCase):
609 610

    def setUp(self):
611
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
612 613 614
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
615
            'avocado_simpletest_functional')
616
        self.pass_script.save()
L
Lukáš Doktor 已提交
617 618 619 620
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
621
        self.fail_script.save()
622

623
    def test_simpletest_pass(self):
624
        os.chdir(basedir)
L
Lukáš Doktor 已提交
625 626
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
627
        result = process.run(cmd_line, ignore_status=True)
628
        expected_rc = exit_codes.AVOCADO_ALL_OK
629 630 631 632
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

633
    def test_simpletest_fail(self):
634
        os.chdir(basedir)
L
Lukáš Doktor 已提交
635 636
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
637
        result = process.run(cmd_line, ignore_status=True)
638
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
639 640 641 642
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

643 644
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
645
        We can be pretty sure that a failtest should return immediately. Let's
646
        run 100 of them and assure they not take more than 30 seconds to run.
647

648 649
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
650 651
        """
        os.chdir(basedir)
652
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
653 654
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
655 656 657
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
658
        self.assertLess(actual_time, 30.0)
659
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
660 661 662 663 664 665 666 667 668
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
669 670
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
671 672
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
673 674 675
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
676
        self.assertLess(actual_time, 33.0)
677
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
678 679 680
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

681 682 683 684 685
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
686 687
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
688
        result = process.run(cmd_line, ignore_status=True)
689 690 691 692
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
693 694
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
695
        self.assertIn('WARN | Warning message (should cause this test to '
696
                      'finish with warning)', result.stdout, result)
697
        self.assertIn('ERROR| Error message (ordinary message not changing '
698
                      'the results)', result.stdout, result)
699

700 701 702 703 704 705 706 707 708 709 710 711 712
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
713
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
714
    def test_kill_stopped_sleep(self):
A
Amador Pahim 已提交
715
        sleep = "'%s 60'" % SLEEP_BINARY
716 717 718 719 720 721 722 723
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
724
        deadline = time.time() + 9
725 726 727
        while time.time() < deadline:
            if not proc.is_alive():
                break
728
            time.sleep(0.1)
729 730
        else:
            proc.kill(signal.SIGKILL)
731
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
732 733 734 735 736 737 738
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
739
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
740
                         "1.")
741
        sleep_dir = astring.string_to_safe_path("1-" + sleep[1:-1])
742
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
743
                                 sleep_dir, "debug.log")
744
        debug_log = open(debug_log).read()
745 746 747 748 749 750 751
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
752

753
    def tearDown(self):
754 755
        self.pass_script.remove()
        self.fail_script.remove()
756
        shutil.rmtree(self.tmpdir)
757 758


759
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
760 761

    def setUp(self):
762
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
763 764 765
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
766
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
767 768 769 770
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
771
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
772 773
        self.fail_script.save()

774
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
775
        os.chdir(basedir)
776
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
777 778
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
779
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
780 781 782 783
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

784
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
785
        os.chdir(basedir)
786
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
787 788
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
789
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
790 791 792 793
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

794
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
795
        os.chdir(basedir)
796 797
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
798 799
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
800 801
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
802
        self.assertIn(expected_output, result.stderr)
803
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
804 805 806 807 808 809 810 811 812
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
813 814
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
815 816
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
817 818 819 820 821 822 823 824 825 826
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


827
class AbsPluginsTest(object):
828

829
    def setUp(self):
830
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
831

832 833 834 835 836 837
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

838 839 840 841
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
842
        expected_rc = exit_codes.AVOCADO_ALL_OK
843 844 845 846 847 848
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

849 850 851 852 853
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
854
        expected_rc = exit_codes.AVOCADO_ALL_OK
855 856 857 858 859
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

860 861 862 863 864
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
865
        expected_rc = exit_codes.AVOCADO_FAIL
866 867 868
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
869
        self.assertIn("Unable to resolve reference", output)
870

871 872 873 874 875
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
876
        expected_rc = exit_codes.AVOCADO_ALL_OK
877 878 879
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
880 881
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
882

883
    def test_config_plugin(self):
884
        os.chdir(basedir)
885
        cmd_line = './scripts/avocado config --paginator off'
886 887
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
888
        expected_rc = exit_codes.AVOCADO_ALL_OK
889 890 891 892 893 894 895
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
896
        cmd_line = './scripts/avocado config --datadir --paginator off'
897 898
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
899
        expected_rc = exit_codes.AVOCADO_ALL_OK
900 901 902 903 904
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
    def test_disable_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
            cmd_line = './scripts/avocado --config %s plugins' % config
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

927 928 929 930 931 932 933 934 935 936 937 938 939
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
            cmd = ('./scripts/avocado --config %s run passtest.py --archive '
940 941
                   '--job-results-dir %s --sysinfo=off'
                   % (config_path, self.base_outputdir))
942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
        try:
            pkg_resources.require('avocado_result_html')
            result_plugins.append("html")
            result_outputs.append("html/results.html")
        except pkg_resources.DistributionNotFound:
            pass

        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
981 982
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
983 984 985 986 987 988 989 990 991 992 993
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

994 995 996 997 998
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
999
        expected_rc = exit_codes.AVOCADO_ALL_OK
1000 1001 1002 1003 1004
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

1005

1006 1007 1008 1009
class ParseXMLError(Exception):
    pass


1010
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1011

1012
    def setUp(self):
1013
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1014 1015
        super(PluginsXunitTest, self).setUp()

1016
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1017
                      e_nnotfound, e_nfailures, e_nskip):
1018
        os.chdir(basedir)
L
Lukáš Doktor 已提交
1019 1020
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
1021 1022 1023 1024 1025 1026 1027
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1028
        except Exception as detail:
1029 1030 1031 1032 1033 1034 1035
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1036 1037
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1055
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1056 1057 1058 1059
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1060
    def test_xunit_plugin_passtest(self):
1061
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1062
                           1, 0, 0, 0, 0)
1063 1064

    def test_xunit_plugin_failtest(self):
1065
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1066
                           1, 0, 0, 1, 0)
1067

1068
    def test_xunit_plugin_skiponsetuptest(self):
1069
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1070
                           1, 0, 0, 0, 1)
1071

1072
    def test_xunit_plugin_errortest(self):
1073
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1074
                           1, 1, 0, 0, 0)
1075

1076 1077 1078 1079
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1080 1081 1082 1083 1084

class ParseJSONError(Exception):
    pass


1085
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1086

1087
    def setUp(self):
1088
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1089 1090
        super(PluginsJSONTest, self).setUp()

1091
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1092 1093
                      e_nfailures, e_nskip):
        os.chdir(basedir)
1094 1095
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
1096 1097 1098 1099 1100 1101 1102
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1103
        except Exception as detail:
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1121
        return json_data
1122

1123
    def test_json_plugin_passtest(self):
1124
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1125
                           1, 0, 0, 0)
1126 1127

    def test_json_plugin_failtest(self):
1128
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1129
                           1, 0, 1, 0)
1130

1131
    def test_json_plugin_skiponsetuptest(self):
1132
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1133
                           1, 0, 0, 1)
1134

1135
    def test_json_plugin_errortest(self):
1136
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1137
                           1, 1, 0, 0)
1138

A
Amador Pahim 已提交
1139
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
1140 1141 1142 1143 1144 1145
    def test_ugly_echo_cmd(self):
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1146
                         '1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
1147 1148
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1149
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
1150

1151 1152 1153 1154
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

1155 1156
if __name__ == '__main__':
    unittest.main()