test_basic.py 51.0 KB
Newer Older
1
# This Python file uses the following encoding: utf-8
2 3
import aexpect
import glob
4
import json
5
import os
6
import re
7
import shutil
8
import signal
9
import sys
10
import tempfile
11
import time
12
import xml.dom.minidom
13
import zipfile
14
import unittest
15
import psutil
16 17

import pkg_resources
18

19
from avocado.core import exit_codes
20
from avocado.utils import astring
21 22
from avocado.utils import process
from avocado.utils import script
23
from avocado.utils import path as utils_path
24

25
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
26 27
basedir = os.path.abspath(basedir)

28 29 30 31
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
32 33
PASS_SHELL_CONTENTS = "exit 0"

34 35 36 37
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
38 39
FAIL_SHELL_CONTENTS = "exit 1"

40 41 42 43 44 45 46 47 48 49 50 51 52 53
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

54 55 56 57 58 59
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
60 61
        # Please do NOT ever use this, it's for unittesting only.
        self._Test__status = 'not supported'
62 63 64 65 66

    def test(self):
        pass
'''

67 68 69 70 71 72 73 74 75 76 77
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

78

79 80 81 82 83 84 85 86 87 88
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

89

90 91 92 93 94 95 96 97 98 99 100
DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


A
Amador Pahim 已提交
101
def probe_binary(binary):
102
    try:
A
Amador Pahim 已提交
103
        return utils_path.find_command(binary)
104
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
105 106
        return None

L
Lukáš Doktor 已提交
107

108
TRUE_CMD = probe_binary('true')
A
Amador Pahim 已提交
109
CC_BINARY = probe_binary('cc')
110 111 112 113 114 115 116 117 118 119

# On macOS, the default GNU coreutils installation (brew)
# installs the gnu utility versions with a g prefix. It still has the
# BSD versions of the core utilities installed on their expected paths
# but their behavior and flags are in most cases different.
GNU_ECHO_BINARY = probe_binary('echo')
if GNU_ECHO_BINARY is not None:
    echo_manpage = process.run('man %s' % os.path.basename(GNU_ECHO_BINARY)).stdout
    if '-e' not in echo_manpage:
        GNU_ECHO_BINARY = probe_binary('gecho')
A
Amador Pahim 已提交
120 121
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
122 123


124 125
class RunnerOperationTest(unittest.TestCase):

126
    def setUp(self):
127
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
128

129 130 131
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
132 133 134
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
135

136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

170 171
    def test_runner_all_ok(self):
        os.chdir(basedir)
172 173
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
174 175
        process.run(cmd_line)

176 177 178
    def test_runner_failfast(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
179 180
                    'passtest.py failtest.py passtest.py --failfast on' %
                    self.tmpdir)
181 182 183 184 185 186 187
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
188
    @unittest.skipIf(not CC_BINARY,
189
                     "C compiler is required by the underlying datadir.py test")
190 191
    def test_datadir_alias(self):
        os.chdir(basedir)
192 193 194 195 196 197 198 199 200
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
201 202
        process.run(cmd_line)

A
Amador Pahim 已提交
203
    @unittest.skipIf(not CC_BINARY,
204
                     "C compiler is required by the underlying datadir.py test")
205 206
    def test_datadir_noalias(self):
        os.chdir(basedir)
207 208
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
209 210
        process.run(cmd_line)

211 212
    def test_runner_noalias(self):
        os.chdir(basedir)
213 214
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
215 216
        process.run(cmd_line)

217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

232 233 234 235 236 237 238 239 240 241 242 243 244
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
245
            self.assertIn("Runner error occurred: Test reports unsupported",
246 247
                          results["tests"][0]["fail_reason"])

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

284 285
    def test_runner_tests_fail(self):
        os.chdir(basedir)
286 287
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
288
        result = process.run(cmd_line, ignore_status=True)
289
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
290 291 292 293 294
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
295 296
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
297
        result = process.run(cmd_line, ignore_status=True)
298 299
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
300 301 302 303 304
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

305 306
    def test_runner_doublefail(self):
        os.chdir(basedir)
307 308
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
309 310
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
311 312
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
313 314 315 316
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
317
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
318
                      "Cleanup exception not printed to log output")
319
        self.assertIn("TestFail: This test is supposed to fail",
320
                      output,
321
                      "Test did not fail with action exception:\n%s" % output)
322

323 324 325
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
326
                    "--json - uncaught_exception.py" % self.tmpdir)
327
        result = process.run(cmd_line, ignore_status=True)
328
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
329 330 331 332 333
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

334
    def test_fail_on_exception(self):
335 336
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
337
                    "--json - fail_on_exception.py" % self.tmpdir)
338
        result = process.run(cmd_line, ignore_status=True)
339
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
340 341 342 343 344
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

345 346
    def test_runner_timeout(self):
        os.chdir(basedir)
347 348
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
349 350
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
351
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
352
        unexpected_rc = exit_codes.AVOCADO_FAIL
353 354 355 356
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
357
        self.assertIn("Runner error occurred: Timeout reached", output,
358
                      "Timeout reached message not found in the output:\n%s" % output)
359 360
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
361

362 363 364
    @unittest.skipIf(os.environ.get("AVOCADO_CHECK_FULL") != "1",
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
365 366
    def test_runner_abort(self):
        os.chdir(basedir)
367 368
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
369
        result = process.run(cmd_line, ignore_status=True)
370
        output = result.stdout
371
        excerpt = 'Test died without reporting the status.'
372 373
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
374 375 376 377
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
378
        self.assertIn(excerpt, output)
379

380 381
    def test_silent_output(self):
        os.chdir(basedir)
382 383
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
384
        result = process.run(cmd_line, ignore_status=True)
385
        expected_rc = exit_codes.AVOCADO_ALL_OK
386 387
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
388
        self.assertEqual(result.stdout, expected_output)
389

390 391 392 393
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
394
        expected_rc = exit_codes.AVOCADO_FAIL
395
        expected_output = 'error: too few arguments'
396
        self.assertEqual(result.exit_status, expected_rc)
397
        self.assertIn(expected_output, result.stderr)
398

399 400
    def test_empty_test_list(self):
        os.chdir(basedir)
401
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s' % self.tmpdir
402
        result = process.run(cmd_line, ignore_status=True)
403
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
404 405
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
406
        self.assertEqual(result.exit_status, expected_rc)
407
        self.assertIn(expected_output, result.stderr)
408

409 410
    def test_not_found(self):
        os.chdir(basedir)
411
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s sbrubles' % self.tmpdir
412
        result = process.run(cmd_line, ignore_status=True)
413
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
414
        self.assertEqual(result.exit_status, expected_rc)
415 416
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
417

418
    def test_invalid_unique_id(self):
419 420
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s --force-job-id foobar passtest.py' % self.tmpdir)
421
        result = process.run(cmd_line, ignore_status=True)
422
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
423
        self.assertIn('needs to be a 40 digit hex', result.stderr)
424
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
425 426

    def test_valid_unique_id(self):
427
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
428 429
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
430
        result = process.run(cmd_line, ignore_status=True)
431
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
432
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
433
        self.assertIn('PASS', result.stdout)
434

435
    def test_automatic_unique_id(self):
436 437
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
438
        result = process.run(cmd_line, ignore_status=True)
439
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
440 441 442 443
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

444 445 446
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
447
                    "--json - skip_outside_setup.py" % self.tmpdir)
448
        result = process.run(cmd_line, ignore_status=True)
449
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
450 451 452 453 454
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

455 456 457 458 459
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
460 461
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
462 463 464 465 466 467
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
468
                avocado_process.wait()
469 470 471 472
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

473 474
    def test_dry_run(self):
        os.chdir(basedir)
475
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
476
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
477
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
478 479 480 481 482
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
483
        self.assertIn(tempfile.gettempdir(), debuglog)   # Use tmp dir, not default location
484 485
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
486 487
        self.assertEqual(result['skip'], 4)
        for i in xrange(4):
488 489 490 491 492 493 494 495
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
496
            self.assertEqual(log.count(line), 4)
497

498 499 500 501
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
502 503
        cmd_line = ('./scripts/avocado --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (self.tmpdir, test)
504 505 506 507 508
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
509 510
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
511

A
Amador Pahim 已提交
512
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
L
Lukáš Doktor 已提交
513 514
    def test_read(self):
        os.chdir(basedir)
515 516
        cmd = "./scripts/avocado run --sysinfo=off --job-results-dir %s %s"
        cmd %= (self.tmpdir, READ_BINARY)
517
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
518 519 520 521 522
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

523 524 525
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

526

527 528 529
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
530
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
531 532 533

    def test_output_pass(self):
        os.chdir(basedir)
534 535
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
536
        result = process.run(cmd_line, ignore_status=True)
537
        expected_rc = exit_codes.AVOCADO_ALL_OK
538 539 540 541 542 543 544
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
545 546
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
547
        result = process.run(cmd_line, ignore_status=True)
548
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
549 550 551 552 553 554 555
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
556 557
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
558
        result = process.run(cmd_line, ignore_status=True)
559
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
560 561 562 563 564 565 566
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
567 568
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'skiponsetup.py' % self.tmpdir)
569
        result = process.run(cmd_line, ignore_status=True)
570
        expected_rc = exit_codes.AVOCADO_ALL_OK
571 572 573
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
574 575
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
576

577 578
    @unittest.skipIf(not GNU_ECHO_BINARY,
                     'GNU style echo binary not available')
579 580
    def test_ugly_echo_cmd(self):
        os.chdir(basedir)
581 582
        cmd_line = ('./scripts/avocado run --external-runner "%s -ne" '
                    '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
583
                    ' --sysinfo=off  --show-job-log' %
584
                    (GNU_ECHO_BINARY, self.tmpdir))
585 586 587 588 589
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
590 591 592
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
593 594
        self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
595 596 597 598 599 600 601
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
602
                         '1-foo\\\\n\\\'\\"\\\\nbar_baz')
603

604
    def test_replay_skip_skipped(self):
605 606 607
        cmd = ("./scripts/avocado run --job-results-dir %s --json - "
               "skiponsetup.py" % self.tmpdir)
        result = process.run(cmd)
608
        result = json.loads(result.stdout)
609
        jobid = str(result["job_id"])
610 611
        cmd = ("./scripts/avocado run --job-results-dir %s "
               "--replay %s --replay-test-status PASS") % (self.tmpdir, jobid)
612
        process.run(cmd)
613

614 615 616
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

617

618
class RunnerSimpleTest(unittest.TestCase):
619 620

    def setUp(self):
621
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
622
        self.pass_script = script.TemporaryScript(
623
            'ʊʋʉʈɑ ʅʛʌ',
624
            PASS_SCRIPT_CONTENTS,
625
            'avocado_simpletest_functional')
626
        self.pass_script.save()
L
Lukáš Doktor 已提交
627 628 629 630
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
631
        self.fail_script.save()
632

633
    def test_simpletest_pass(self):
634
        os.chdir(basedir)
L
Lukáš Doktor 已提交
635
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
636
                    ' "%s"' % (self.tmpdir, self.pass_script.path))
637
        result = process.run(cmd_line, ignore_status=True)
638
        expected_rc = exit_codes.AVOCADO_ALL_OK
639 640 641 642
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

643
    def test_simpletest_fail(self):
644
        os.chdir(basedir)
L
Lukáš Doktor 已提交
645 646
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
647
        result = process.run(cmd_line, ignore_status=True)
648
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
649 650 651 652
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

653 654
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
655
        We can be pretty sure that a failtest should return immediately. Let's
656
        run 100 of them and assure they not take more than 30 seconds to run.
657

658 659
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
660 661
        """
        os.chdir(basedir)
662
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
663 664
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
665 666 667
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
668
        self.assertLess(actual_time, 30.0)
669
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
670 671 672 673 674 675 676 677 678
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
679 680
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
681 682
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
683 684 685
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
686
        self.assertLess(actual_time, 33.0)
687
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
688 689 690
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

691 692 693 694 695
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
696 697
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
698
        result = process.run(cmd_line, ignore_status=True)
699 700 701 702
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
703 704
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
705
        self.assertIn('WARN | Warning message (should cause this test to '
706
                      'finish with warning)', result.stdout, result)
707
        self.assertIn('ERROR| Error message (ordinary message not changing '
708
                      'the results)', result.stdout, result)
709

710 711 712 713 714 715
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
716
                    ' "%s"' % (avocado_path, self.tmpdir, test_file_name))
717 718 719 720 721 722
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
723
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
724
    def test_kill_stopped_sleep(self):
725 726 727
        proc = aexpect.Expect("./scripts/avocado run 60 --job-results-dir %s "
                              "--external-runner %s --sysinfo=off --job-timeout 3"
                              % (self.tmpdir, SLEEP_BINARY))
728 729
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
730 731 732 733
        # We need pid of the avocado process, not the shell executing it
        avocado_shell = psutil.Process(proc.get_pid())
        avocado_proc = avocado_shell.children()[0]
        pid = avocado_proc.pid
734
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
735
        deadline = time.time() + 9
736 737 738
        while time.time() < deadline:
            if not proc.is_alive():
                break
739
            time.sleep(0.1)
740 741
        else:
            proc.kill(signal.SIGKILL)
742
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
743 744 745 746 747 748 749
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
750
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
751
                         "1.")
752 753

        sleep_dir = astring.string_to_safe_path("1-60")
754
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
755
                                 sleep_dir, "debug.log")
756
        debug_log = open(debug_log).read()
757 758 759 760 761 762 763
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
764

765
    def tearDown(self):
766 767
        self.pass_script.remove()
        self.fail_script.remove()
768
        shutil.rmtree(self.tmpdir)
769 770


771
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
772 773

    def setUp(self):
774
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
775 776 777
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
778
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
779 780 781 782
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
783
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
784 785
        self.fail_script.save()

786
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
787
        os.chdir(basedir)
788
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
789 790
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
791
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
792 793 794 795
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

796
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
797
        os.chdir(basedir)
798
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
799 800
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
801
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
802 803 804 805
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

806
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
807
        os.chdir(basedir)
808 809
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
810 811
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
812 813
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
814
        self.assertIn(expected_output, result.stderr)
815
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
816 817 818 819 820 821 822
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
823
                    '--external-runner=%s' % (self.tmpdir, TRUE_CMD))
824
        result = process.run(cmd_line, ignore_status=True)
825 826
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
827 828
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
829 830 831 832 833 834 835 836 837 838
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


839
class AbsPluginsTest(object):
840

841
    def setUp(self):
842
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
843

844 845 846 847 848 849
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

850 851 852 853
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
854
        expected_rc = exit_codes.AVOCADO_ALL_OK
855 856 857 858 859 860
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

861 862 863 864 865
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
866
        expected_rc = exit_codes.AVOCADO_ALL_OK
867 868 869 870 871
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

872 873 874 875 876
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
877
        expected_rc = exit_codes.AVOCADO_FAIL
878 879 880
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
881
        self.assertIn("Unable to resolve reference", output)
882

883 884 885 886 887
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
888
        expected_rc = exit_codes.AVOCADO_ALL_OK
889 890 891
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
892 893
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
894

895
    def test_config_plugin(self):
896
        os.chdir(basedir)
897
        cmd_line = './scripts/avocado config --paginator off'
898 899
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
900
        expected_rc = exit_codes.AVOCADO_ALL_OK
901 902 903 904 905 906 907
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
908
        cmd_line = './scripts/avocado config --datadir --paginator off'
909 910
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
911
        expected_rc = exit_codes.AVOCADO_ALL_OK
912 913 914 915 916
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
    def test_disable_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
            cmd_line = './scripts/avocado --config %s plugins' % config
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

939 940 941 942 943 944 945 946 947 948 949 950 951
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
            cmd = ('./scripts/avocado --config %s run passtest.py --archive '
952 953
                   '--job-results-dir %s --sysinfo=off'
                   % (config_path, self.base_outputdir))
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
        try:
            pkg_resources.require('avocado_result_html')
            result_plugins.append("html")
            result_outputs.append("html/results.html")
        except pkg_resources.DistributionNotFound:
            pass

        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
993 994
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
995 996 997 998 999 1000 1001 1002 1003 1004 1005
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

1006 1007 1008 1009 1010
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
1011
        expected_rc = exit_codes.AVOCADO_ALL_OK
1012 1013 1014 1015 1016
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

1017

1018 1019 1020 1021
class ParseXMLError(Exception):
    pass


1022
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1023

1024
    def setUp(self):
1025
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1026 1027
        super(PluginsXunitTest, self).setUp()

1028
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1029
                      e_nnotfound, e_nfailures, e_nskip):
1030
        os.chdir(basedir)
L
Lukáš Doktor 已提交
1031 1032
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
1033 1034 1035 1036 1037 1038 1039
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1040
        except Exception as detail:
1041 1042 1043 1044 1045 1046 1047
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1048 1049
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1067
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1068 1069 1070 1071
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1072
    def test_xunit_plugin_passtest(self):
1073
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1074
                           1, 0, 0, 0, 0)
1075 1076

    def test_xunit_plugin_failtest(self):
1077
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1078
                           1, 0, 0, 1, 0)
1079

1080
    def test_xunit_plugin_skiponsetuptest(self):
1081
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1082
                           1, 0, 0, 0, 1)
1083

1084
    def test_xunit_plugin_errortest(self):
1085
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1086
                           1, 1, 0, 0, 0)
1087

1088 1089 1090 1091
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1092 1093 1094 1095 1096

class ParseJSONError(Exception):
    pass


1097
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1098

1099
    def setUp(self):
1100
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1101 1102
        super(PluginsJSONTest, self).setUp()

1103
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1104
                      e_nfailures, e_nskip, external_runner=None):
1105
        os.chdir(basedir)
1106 1107
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
1108 1109
        if external_runner is not None:
            cmd_line += " --external-runner '%s'" % external_runner
1110 1111 1112 1113 1114 1115 1116
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1117
        except Exception as detail:
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1135
        return json_data
1136

1137
    def test_json_plugin_passtest(self):
1138
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1139
                           1, 0, 0, 0)
1140 1141

    def test_json_plugin_failtest(self):
1142
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1143
                           1, 0, 1, 0)
1144

1145
    def test_json_plugin_skiponsetuptest(self):
1146
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1147
                           1, 0, 0, 1)
1148

1149
    def test_json_plugin_errortest(self):
1150
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1151
                           1, 1, 0, 0)
1152

1153
    @unittest.skipIf(not GNU_ECHO_BINARY, 'echo binary not available')
1154
    def test_ugly_echo_cmd(self):
1155
        data = self.run_and_check('"-ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
1156
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
1157
                                  0, 0, GNU_ECHO_BINARY)
1158 1159
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1160
                         '1--ne foo\\\\n\\\'\\"\\\\nbar/baz')
1161 1162
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1163
                         '1--ne foo\\\\n\\\'\\"\\\\nbar_baz')
1164

1165 1166 1167 1168
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1169

1170 1171
if __name__ == '__main__':
    unittest.main()