test_basic.py 50.4 KB
Newer Older
1 2
import aexpect
import glob
3
import json
4
import os
5
import re
6
import shutil
7
import signal
8
import sys
9
import tempfile
10
import time
11
import xml.dom.minidom
12 13 14
import zipfile

import pkg_resources
15

16 17 18 19 20
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

21
from avocado.core import exit_codes
22
from avocado.utils import astring
23 24
from avocado.utils import process
from avocado.utils import script
25
from avocado.utils import path as utils_path
26

27
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
28 29 30
basedir = os.path.abspath(basedir)


31 32 33 34
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
35 36
PASS_SHELL_CONTENTS = "exit 0"

37 38 39 40
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
41 42
FAIL_SHELL_CONTENTS = "exit 1"

43 44 45 46 47 48 49 50 51 52 53 54 55 56
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

57 58 59 60 61 62 63 64 65 66 67 68
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
        self.status = 'not supported'

    def test(self):
        pass
'''

69 70 71 72 73 74 75 76 77 78 79
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

80

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


A
Amador Pahim 已提交
102
def probe_binary(binary):
103
    try:
A
Amador Pahim 已提交
104
        return utils_path.find_command(binary)
105
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
106 107
        return None

L
Lukáš Doktor 已提交
108

A
Amador Pahim 已提交
109 110 111 112
CC_BINARY = probe_binary('cc')
ECHO_BINARY = probe_binary('echo')
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
113 114


115 116
class RunnerOperationTest(unittest.TestCase):

117
    def setUp(self):
118
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
119

120 121 122
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
123 124 125
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
126

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

161 162
    def test_runner_all_ok(self):
        os.chdir(basedir)
163 164
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
165 166
        process.run(cmd_line)

167 168 169
    def test_runner_failfast(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
170 171
                    'passtest.py failtest.py passtest.py --failfast on' %
                    self.tmpdir)
172 173 174 175 176 177 178
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
179
    @unittest.skipIf(not CC_BINARY,
180
                     "C compiler is required by the underlying datadir.py test")
181 182
    def test_datadir_alias(self):
        os.chdir(basedir)
183 184 185 186 187 188 189 190 191
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
192 193
        process.run(cmd_line)

A
Amador Pahim 已提交
194
    @unittest.skipIf(not CC_BINARY,
195
                     "C compiler is required by the underlying datadir.py test")
196 197
    def test_datadir_noalias(self):
        os.chdir(basedir)
198 199
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
200 201
        process.run(cmd_line)

202 203
    def test_runner_noalias(self):
        os.chdir(basedir)
204 205
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
206 207
        process.run(cmd_line)

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

223 224 225 226 227 228 229 230 231 232 233 234 235
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
236
            self.assertIn("Runner error occurred: Test reports unsupported",
237 238
                          results["tests"][0]["fail_reason"])

239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

275 276
    def test_runner_tests_fail(self):
        os.chdir(basedir)
277 278
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
279
        result = process.run(cmd_line, ignore_status=True)
280
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
281 282 283 284 285
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
286 287
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
288
        result = process.run(cmd_line, ignore_status=True)
289 290
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
291 292 293 294 295
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

296 297
    def test_runner_doublefail(self):
        os.chdir(basedir)
298 299
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
300 301
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
302 303
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
304 305 306 307
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
308
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
309
                      "Cleanup exception not printed to log output")
310
        self.assertIn("TestFail: This test is supposed to fail",
311
                      output,
312
                      "Test did not fail with action exception:\n%s" % output)
313

314 315 316
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
317
                    "--json - uncaught_exception.py" % self.tmpdir)
318
        result = process.run(cmd_line, ignore_status=True)
319
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
320 321 322 323 324
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

325
    def test_fail_on_exception(self):
326 327
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
328
                    "--json - fail_on_exception.py" % self.tmpdir)
329
        result = process.run(cmd_line, ignore_status=True)
330
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
331 332 333 334 335
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

336 337
    def test_runner_timeout(self):
        os.chdir(basedir)
338 339
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
340 341
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
342
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
343
        unexpected_rc = exit_codes.AVOCADO_FAIL
344 345 346 347
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
348
        self.assertIn("Runner error occurred: Timeout reached", output,
349
                      "Timeout reached message not found in the output:\n%s" % output)
350 351
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
352

353 354 355
    @unittest.skipIf(os.environ.get("AVOCADO_CHECK_FULL") != "1",
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
356 357
    def test_runner_abort(self):
        os.chdir(basedir)
358 359
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
360
        result = process.run(cmd_line, ignore_status=True)
361
        output = result.stdout
362
        excerpt = 'Test died without reporting the status.'
363 364
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
365 366 367 368
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
369
        self.assertIn(excerpt, output)
370

371 372
    def test_silent_output(self):
        os.chdir(basedir)
373 374
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
375
        result = process.run(cmd_line, ignore_status=True)
376
        expected_rc = exit_codes.AVOCADO_ALL_OK
377 378
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
379
        self.assertEqual(result.stdout, expected_output)
380

381 382 383 384
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
385
        expected_rc = exit_codes.AVOCADO_FAIL
386
        expected_output = 'error: too few arguments'
387
        self.assertEqual(result.exit_status, expected_rc)
388
        self.assertIn(expected_output, result.stderr)
389

390 391
    def test_empty_test_list(self):
        os.chdir(basedir)
392
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s' % self.tmpdir
393
        result = process.run(cmd_line, ignore_status=True)
394
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
395 396
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
397
        self.assertEqual(result.exit_status, expected_rc)
398
        self.assertIn(expected_output, result.stderr)
399

400 401
    def test_not_found(self):
        os.chdir(basedir)
402
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s sbrubles' % self.tmpdir
403
        result = process.run(cmd_line, ignore_status=True)
404
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
405
        self.assertEqual(result.exit_status, expected_rc)
406 407
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
408

409
    def test_invalid_unique_id(self):
410 411
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s --force-job-id foobar passtest.py' % self.tmpdir)
412
        result = process.run(cmd_line, ignore_status=True)
413
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
414
        self.assertIn('needs to be a 40 digit hex', result.stderr)
415
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
416 417

    def test_valid_unique_id(self):
418
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
419 420
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
421
        result = process.run(cmd_line, ignore_status=True)
422
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
423
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
424
        self.assertIn('PASS', result.stdout)
425

426
    def test_automatic_unique_id(self):
427 428
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
429
        result = process.run(cmd_line, ignore_status=True)
430
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
431 432 433 434
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

435 436 437
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
438
                    "--json - skip_outside_setup.py" % self.tmpdir)
439
        result = process.run(cmd_line, ignore_status=True)
440
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
441 442 443 444 445
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

446 447 448 449 450
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
451 452
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
453 454 455 456 457 458
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
459
                avocado_process.wait()
460 461 462 463
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

464 465
    def test_dry_run(self):
        os.chdir(basedir)
466
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
467
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
468
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
469 470 471 472 473 474 475 476
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
477 478
        self.assertEqual(result['skip'], 4)
        for i in xrange(4):
479 480 481 482 483 484 485 486
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
487
            self.assertEqual(log.count(line), 4)
488

489 490 491 492
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
493 494
        cmd_line = ('./scripts/avocado --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (self.tmpdir, test)
495 496 497 498 499
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
500 501
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
502

A
Amador Pahim 已提交
503
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
L
Lukáš Doktor 已提交
504 505
    def test_read(self):
        os.chdir(basedir)
506 507 508
        cmd = "./scripts/avocado run --job-results-dir %s %s" % (self.tmpdir,
                                                                 READ_BINARY)
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
509 510 511 512 513
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

514 515 516
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

517

518 519 520
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
521
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
522 523 524

    def test_output_pass(self):
        os.chdir(basedir)
525 526
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
527
        result = process.run(cmd_line, ignore_status=True)
528
        expected_rc = exit_codes.AVOCADO_ALL_OK
529 530 531 532 533 534 535
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
536 537
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
538
        result = process.run(cmd_line, ignore_status=True)
539
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
540 541 542 543 544 545 546
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
547 548
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
549
        result = process.run(cmd_line, ignore_status=True)
550
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
551 552 553 554 555 556 557
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
558 559
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'skiponsetup.py' % self.tmpdir)
560
        result = process.run(cmd_line, ignore_status=True)
561
        expected_rc = exit_codes.AVOCADO_ALL_OK
562 563 564
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
565 566
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
567

A
Amador Pahim 已提交
568
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
569 570
    def test_ugly_echo_cmd(self):
        os.chdir(basedir)
A
Amador Pahim 已提交
571
        cmd_line = ('./scripts/avocado run "%s -ne '
572
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
573 574
                    ' --sysinfo=off  --show-job-log' %
                    (ECHO_BINARY, self.tmpdir))
575 576 577 578 579
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
580 581 582
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
A
Amador Pahim 已提交
583 584
        self.assertIn('PASS 1-%s -ne foo\\\\n\\\'\\"\\\\nbar/baz' %
                      ECHO_BINARY, result.stdout, result)
585 586 587 588 589 590 591
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
A
Amador Pahim 已提交
592 593
                         '1-%s -ne foo\\\\n\\\'\\"\\\\nbar_baz' %
                         ECHO_BINARY.replace('/', '_'))
594

595
    def test_replay_skip_skipped(self):
596 597 598
        cmd = ("./scripts/avocado run --job-results-dir %s --json - "
               "skiponsetup.py" % self.tmpdir)
        result = process.run(cmd)
599
        result = json.loads(result.stdout)
600
        jobid = str(result["job_id"])
601 602
        cmd = ("./scripts/avocado run --job-results-dir %s "
               "--replay %s --replay-test-status PASS") % (self.tmpdir, jobid)
603
        process.run(cmd)
604

605 606 607
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

608

609
class RunnerSimpleTest(unittest.TestCase):
610 611

    def setUp(self):
612
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
613 614 615
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
616
            'avocado_simpletest_functional')
617
        self.pass_script.save()
L
Lukáš Doktor 已提交
618 619 620 621
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
622
        self.fail_script.save()
623

624
    def test_simpletest_pass(self):
625
        os.chdir(basedir)
L
Lukáš Doktor 已提交
626 627
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
628
        result = process.run(cmd_line, ignore_status=True)
629
        expected_rc = exit_codes.AVOCADO_ALL_OK
630 631 632 633
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

634
    def test_simpletest_fail(self):
635
        os.chdir(basedir)
L
Lukáš Doktor 已提交
636 637
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
638
        result = process.run(cmd_line, ignore_status=True)
639
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
640 641 642 643
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

644 645
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
646
        We can be pretty sure that a failtest should return immediately. Let's
647
        run 100 of them and assure they not take more than 30 seconds to run.
648

649 650
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
651 652
        """
        os.chdir(basedir)
653
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
654 655
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
656 657 658
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
659
        self.assertLess(actual_time, 30.0)
660
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
661 662 663 664 665 666 667 668 669
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
670 671
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
672 673
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
674 675 676
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
677
        self.assertLess(actual_time, 33.0)
678
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
679 680 681
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

682 683 684 685 686
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
687 688
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
689
        result = process.run(cmd_line, ignore_status=True)
690 691 692 693
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
694 695
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
696
        self.assertIn('WARN | Warning message (should cause this test to '
697
                      'finish with warning)', result.stdout, result)
698
        self.assertIn('ERROR| Error message (ordinary message not changing '
699
                      'the results)', result.stdout, result)
700

701 702 703 704 705 706 707 708 709 710 711 712 713
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
714
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
715
    def test_kill_stopped_sleep(self):
A
Amador Pahim 已提交
716
        sleep = "'%s 60'" % SLEEP_BINARY
717 718 719 720 721 722 723 724
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
725
        deadline = time.time() + 9
726 727 728
        while time.time() < deadline:
            if not proc.is_alive():
                break
729
            time.sleep(0.1)
730 731
        else:
            proc.kill(signal.SIGKILL)
732
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
733 734 735 736 737 738 739
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
740
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
741
                         "1.")
742
        sleep_dir = astring.string_to_safe_path("1-" + sleep[1:-1])
743
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
744
                                 sleep_dir, "debug.log")
745
        debug_log = open(debug_log).read()
746 747 748 749 750 751 752
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
753

754
    def tearDown(self):
755 756
        self.pass_script.remove()
        self.fail_script.remove()
757
        shutil.rmtree(self.tmpdir)
758 759


760
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
761 762

    def setUp(self):
763
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
764 765 766
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
767
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
768 769 770 771
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
772
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
773 774
        self.fail_script.save()

775
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
776
        os.chdir(basedir)
777
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
778 779
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
780
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
781 782 783 784
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

785
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
786
        os.chdir(basedir)
787
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
788 789
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
790
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
791 792 793 794
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

795
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
796
        os.chdir(basedir)
797 798
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
799 800
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
801 802
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
803
        self.assertIn(expected_output, result.stderr)
804
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
805 806 807 808 809 810 811 812 813
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
814 815
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
816 817
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
818 819 820 821 822 823 824 825 826 827
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


828
class AbsPluginsTest(object):
829

830
    def setUp(self):
831
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
832

833 834 835 836 837 838
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

839 840 841 842
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
843
        expected_rc = exit_codes.AVOCADO_ALL_OK
844 845 846 847 848 849
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

850 851 852 853 854
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
855
        expected_rc = exit_codes.AVOCADO_ALL_OK
856 857 858 859 860
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

861 862 863 864 865
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
866
        expected_rc = exit_codes.AVOCADO_FAIL
867 868 869
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
870
        self.assertIn("Unable to resolve reference", output)
871

872 873 874 875 876
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
877
        expected_rc = exit_codes.AVOCADO_ALL_OK
878 879 880
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
881 882
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
883

884
    def test_config_plugin(self):
885
        os.chdir(basedir)
886
        cmd_line = './scripts/avocado config --paginator off'
887 888
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
889
        expected_rc = exit_codes.AVOCADO_ALL_OK
890 891 892 893 894 895 896
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
897
        cmd_line = './scripts/avocado config --datadir --paginator off'
898 899
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
900
        expected_rc = exit_codes.AVOCADO_ALL_OK
901 902 903 904 905
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
    def test_disable_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
            cmd_line = './scripts/avocado --config %s plugins' % config
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

928 929 930 931 932 933 934 935 936 937 938 939 940
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
            cmd = ('./scripts/avocado --config %s run passtest.py --archive '
941 942
                   '--job-results-dir %s --sysinfo=off'
                   % (config_path, self.base_outputdir))
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
        try:
            pkg_resources.require('avocado_result_html')
            result_plugins.append("html")
            result_outputs.append("html/results.html")
        except pkg_resources.DistributionNotFound:
            pass

        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
982 983
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
984 985 986 987 988 989 990 991 992 993 994
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

995 996 997 998 999
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
1000
        expected_rc = exit_codes.AVOCADO_ALL_OK
1001 1002 1003 1004 1005
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

1006

1007 1008 1009 1010
class ParseXMLError(Exception):
    pass


1011
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1012

1013
    def setUp(self):
1014
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1015 1016
        super(PluginsXunitTest, self).setUp()

1017
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1018
                      e_nnotfound, e_nfailures, e_nskip):
1019
        os.chdir(basedir)
L
Lukáš Doktor 已提交
1020 1021
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
1022 1023 1024 1025 1026 1027 1028
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1029
        except Exception as detail:
1030 1031 1032 1033 1034 1035 1036
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1037 1038
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1056
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1057 1058 1059 1060
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1061
    def test_xunit_plugin_passtest(self):
1062
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1063
                           1, 0, 0, 0, 0)
1064 1065

    def test_xunit_plugin_failtest(self):
1066
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1067
                           1, 0, 0, 1, 0)
1068

1069
    def test_xunit_plugin_skiponsetuptest(self):
1070
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1071
                           1, 0, 0, 0, 1)
1072

1073
    def test_xunit_plugin_errortest(self):
1074
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1075
                           1, 1, 0, 0, 0)
1076

1077 1078 1079 1080
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1081 1082 1083 1084 1085

class ParseJSONError(Exception):
    pass


1086
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1087

1088
    def setUp(self):
1089
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1090 1091
        super(PluginsJSONTest, self).setUp()

1092
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1093 1094
                      e_nfailures, e_nskip):
        os.chdir(basedir)
1095 1096
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
1097 1098 1099 1100 1101 1102 1103
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1104
        except Exception as detail:
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1122
        return json_data
1123

1124
    def test_json_plugin_passtest(self):
1125
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1126
                           1, 0, 0, 0)
1127 1128

    def test_json_plugin_failtest(self):
1129
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1130
                           1, 0, 1, 0)
1131

1132
    def test_json_plugin_skiponsetuptest(self):
1133
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1134
                           1, 0, 0, 1)
1135

1136
    def test_json_plugin_errortest(self):
1137
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1138
                           1, 1, 0, 0)
1139

A
Amador Pahim 已提交
1140
    @unittest.skipIf(not ECHO_BINARY, 'echo binary not available')
1141 1142 1143 1144 1145 1146
    def test_ugly_echo_cmd(self):
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1147
                         '1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
1148 1149
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1150
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
1151

1152 1153 1154 1155
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1156

1157 1158
if __name__ == '__main__':
    unittest.main()