test_basic.py 51.1 KB
Newer Older
1
# This Python file uses the following encoding: utf-8
2 3
import aexpect
import glob
4
import json
5
import os
6
import re
7
import shutil
8
import signal
9
import sys
10
import tempfile
11
import time
12
import xml.dom.minidom
13
import zipfile
14
import unittest
15
import psutil
16 17

import pkg_resources
18

19
from avocado.core import exit_codes
20
from avocado.utils import astring
21 22
from avocado.utils import process
from avocado.utils import script
23
from avocado.utils import path as utils_path
24

25
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
26 27
basedir = os.path.abspath(basedir)

28 29 30 31
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
32 33
PASS_SHELL_CONTENTS = "exit 0"

34 35 36 37
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
38 39
FAIL_SHELL_CONTENTS = "exit 1"

40 41 42 43 44 45 46 47 48 49 50 51 52 53
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

54 55 56 57 58 59
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
60 61
        # Please do NOT ever use this, it's for unittesting only.
        self._Test__status = 'not supported'
62 63 64 65 66

    def test(self):
        pass
'''

67 68 69 70 71 72 73 74 75 76 77
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

78

79 80 81 82 83 84 85 86 87 88
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

89

90 91 92 93 94 95 96 97 98 99 100
DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


A
Amador Pahim 已提交
101
def probe_binary(binary):
102
    try:
A
Amador Pahim 已提交
103
        return utils_path.find_command(binary)
104
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
105 106
        return None

L
Lukáš Doktor 已提交
107

108
TRUE_CMD = probe_binary('true')
A
Amador Pahim 已提交
109
CC_BINARY = probe_binary('cc')
110 111 112 113 114 115 116

# On macOS, the default GNU coreutils installation (brew)
# installs the gnu utility versions with a g prefix. It still has the
# BSD versions of the core utilities installed on their expected paths
# but their behavior and flags are in most cases different.
GNU_ECHO_BINARY = probe_binary('echo')
if GNU_ECHO_BINARY is not None:
117 118 119 120
    if probe_binary('man') is not None:
        echo_manpage = process.run('man %s' % os.path.basename(GNU_ECHO_BINARY)).stdout
        if '-e' not in echo_manpage:
            GNU_ECHO_BINARY = probe_binary('gecho')
A
Amador Pahim 已提交
121 122
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
123 124


125 126
class RunnerOperationTest(unittest.TestCase):

127
    def setUp(self):
128
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
129

130 131 132
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
133 134 135
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
136

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

171 172
    def test_runner_all_ok(self):
        os.chdir(basedir)
173 174
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
175 176
        process.run(cmd_line)

177 178 179
    def test_runner_failfast(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
180 181
                    'passtest.py failtest.py passtest.py --failfast on' %
                    self.tmpdir)
182 183 184 185 186 187 188
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
189
    @unittest.skipIf(not CC_BINARY,
190
                     "C compiler is required by the underlying datadir.py test")
191 192
    def test_datadir_alias(self):
        os.chdir(basedir)
193 194 195 196 197 198 199 200 201
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
202 203
        process.run(cmd_line)

A
Amador Pahim 已提交
204
    @unittest.skipIf(not CC_BINARY,
205
                     "C compiler is required by the underlying datadir.py test")
206 207
    def test_datadir_noalias(self):
        os.chdir(basedir)
208 209
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
210 211
        process.run(cmd_line)

212 213
    def test_runner_noalias(self):
        os.chdir(basedir)
214 215
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
216 217
        process.run(cmd_line)

218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

233 234 235 236 237 238 239 240 241 242 243 244 245
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
246
            self.assertIn("Runner error occurred: Test reports unsupported",
247 248
                          results["tests"][0]["fail_reason"])

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

285 286
    def test_runner_tests_fail(self):
        os.chdir(basedir)
287 288
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
289
        result = process.run(cmd_line, ignore_status=True)
290
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
291 292 293 294 295
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
296 297
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
298
        result = process.run(cmd_line, ignore_status=True)
299 300
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
301 302 303 304 305
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

306 307
    def test_runner_doublefail(self):
        os.chdir(basedir)
308 309
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
310 311
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
312 313
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
314 315 316 317
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
318
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
319
                      "Cleanup exception not printed to log output")
320
        self.assertIn("TestFail: This test is supposed to fail",
321
                      output,
322
                      "Test did not fail with action exception:\n%s" % output)
323

324 325 326
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
327
                    "--json - uncaught_exception.py" % self.tmpdir)
328
        result = process.run(cmd_line, ignore_status=True)
329
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
330 331 332 333 334
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

335
    def test_fail_on_exception(self):
336 337
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
338
                    "--json - fail_on_exception.py" % self.tmpdir)
339
        result = process.run(cmd_line, ignore_status=True)
340
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
341 342 343 344 345
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

346 347
    def test_runner_timeout(self):
        os.chdir(basedir)
348 349
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
350 351
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
352
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
353
        unexpected_rc = exit_codes.AVOCADO_FAIL
354 355 356 357
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
358
        self.assertIn("Runner error occurred: Timeout reached", output,
359
                      "Timeout reached message not found in the output:\n%s" % output)
360 361
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
362

363 364 365
    @unittest.skipIf(os.environ.get("AVOCADO_CHECK_FULL") != "1",
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
366 367
    def test_runner_abort(self):
        os.chdir(basedir)
368 369
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
370
        result = process.run(cmd_line, ignore_status=True)
371
        output = result.stdout
372
        excerpt = 'Test died without reporting the status.'
373 374
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
375 376 377 378
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
379
        self.assertIn(excerpt, output)
380

381 382
    def test_silent_output(self):
        os.chdir(basedir)
383 384
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
385
        result = process.run(cmd_line, ignore_status=True)
386
        expected_rc = exit_codes.AVOCADO_ALL_OK
387 388
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
389
        self.assertEqual(result.stdout, expected_output)
390

391 392 393 394
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
395
        expected_rc = exit_codes.AVOCADO_FAIL
396
        expected_output = 'error: too few arguments'
397
        self.assertEqual(result.exit_status, expected_rc)
398
        self.assertIn(expected_output, result.stderr)
399

400 401
    def test_empty_test_list(self):
        os.chdir(basedir)
402
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s' % self.tmpdir
403
        result = process.run(cmd_line, ignore_status=True)
404
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
405 406
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
407
        self.assertEqual(result.exit_status, expected_rc)
408
        self.assertIn(expected_output, result.stderr)
409

410 411
    def test_not_found(self):
        os.chdir(basedir)
412
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s sbrubles' % self.tmpdir
413
        result = process.run(cmd_line, ignore_status=True)
414
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
415
        self.assertEqual(result.exit_status, expected_rc)
416 417
        self.assertIn('Unable to resolve reference', result.stderr)
        self.assertNotIn('Unable to resolve reference', result.stdout)
418

419
    def test_invalid_unique_id(self):
420 421
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s --force-job-id foobar passtest.py' % self.tmpdir)
422
        result = process.run(cmd_line, ignore_status=True)
423
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
424
        self.assertIn('needs to be a 40 digit hex', result.stderr)
425
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
426 427

    def test_valid_unique_id(self):
428
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
429 430
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
431
        result = process.run(cmd_line, ignore_status=True)
432
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
433
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
434
        self.assertIn('PASS', result.stdout)
435

436
    def test_automatic_unique_id(self):
437 438
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
439
        result = process.run(cmd_line, ignore_status=True)
440
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
441 442 443 444
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

445 446 447
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
448
                    "--json - skip_outside_setup.py" % self.tmpdir)
449
        result = process.run(cmd_line, ignore_status=True)
450
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
451 452 453 454 455
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

456 457 458 459 460
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
461 462
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
463 464 465 466 467 468
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
469
                avocado_process.wait()
470 471 472 473
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

474 475
    def test_dry_run(self):
        os.chdir(basedir)
476
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
477
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
478
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
479 480 481 482 483
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
484
        self.assertIn(tempfile.gettempdir(), debuglog)   # Use tmp dir, not default location
485 486
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
487 488
        self.assertEqual(result['skip'], 4)
        for i in xrange(4):
489 490 491 492 493 494 495 496
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
497
            self.assertEqual(log.count(line), 4)
498

499 500 501 502
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
503 504
        cmd_line = ('./scripts/avocado --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (self.tmpdir, test)
505 506 507 508 509
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
510 511
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
512

A
Amador Pahim 已提交
513
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
L
Lukáš Doktor 已提交
514 515
    def test_read(self):
        os.chdir(basedir)
516 517
        cmd = "./scripts/avocado run --sysinfo=off --job-results-dir %s %s"
        cmd %= (self.tmpdir, READ_BINARY)
518
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
519 520 521 522 523
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

524 525 526
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

527

528 529 530
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
531
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
532 533 534

    def test_output_pass(self):
        os.chdir(basedir)
535 536
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
537
        result = process.run(cmd_line, ignore_status=True)
538
        expected_rc = exit_codes.AVOCADO_ALL_OK
539 540 541 542 543 544 545
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
546 547
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
548
        result = process.run(cmd_line, ignore_status=True)
549
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
550 551 552 553 554 555 556
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
557 558
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
559
        result = process.run(cmd_line, ignore_status=True)
560
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
561 562 563 564 565 566 567
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
568 569
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'skiponsetup.py' % self.tmpdir)
570
        result = process.run(cmd_line, ignore_status=True)
571
        expected_rc = exit_codes.AVOCADO_ALL_OK
572 573 574
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
575 576
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
577

578 579
    @unittest.skipIf(not GNU_ECHO_BINARY,
                     'GNU style echo binary not available')
580 581
    def test_ugly_echo_cmd(self):
        os.chdir(basedir)
582 583
        cmd_line = ('./scripts/avocado run --external-runner "%s -ne" '
                    '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
A
Amador Pahim 已提交
584
                    ' --sysinfo=off  --show-job-log' %
585
                    (GNU_ECHO_BINARY, self.tmpdir))
586 587 588 589 590
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
591 592 593
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
594 595
        self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
596 597 598 599 600 601 602
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
603
                         '1-foo\\\\n\\\'\\"\\\\nbar_baz')
604

605
    def test_replay_skip_skipped(self):
606 607 608
        cmd = ("./scripts/avocado run --job-results-dir %s --json - "
               "skiponsetup.py" % self.tmpdir)
        result = process.run(cmd)
609
        result = json.loads(result.stdout)
610
        jobid = str(result["job_id"])
611 612
        cmd = ("./scripts/avocado run --job-results-dir %s "
               "--replay %s --replay-test-status PASS") % (self.tmpdir, jobid)
613
        process.run(cmd)
614

615 616 617
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

618

619
class RunnerSimpleTest(unittest.TestCase):
620 621

    def setUp(self):
622
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
623
        self.pass_script = script.TemporaryScript(
624
            'ʊʋʉʈɑ ʅʛʌ',
625
            PASS_SCRIPT_CONTENTS,
626
            'avocado_simpletest_functional')
627
        self.pass_script.save()
L
Lukáš Doktor 已提交
628 629 630 631
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
632
        self.fail_script.save()
633

634
    def test_simpletest_pass(self):
635
        os.chdir(basedir)
L
Lukáš Doktor 已提交
636
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
637
                    ' "%s"' % (self.tmpdir, self.pass_script.path))
638
        result = process.run(cmd_line, ignore_status=True)
639
        expected_rc = exit_codes.AVOCADO_ALL_OK
640 641 642 643
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

644
    def test_simpletest_fail(self):
645
        os.chdir(basedir)
L
Lukáš Doktor 已提交
646 647
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
648
        result = process.run(cmd_line, ignore_status=True)
649
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
650 651 652 653
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

654 655
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
656
        We can be pretty sure that a failtest should return immediately. Let's
657
        run 100 of them and assure they not take more than 30 seconds to run.
658

659 660
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
661 662
        """
        os.chdir(basedir)
663
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
664 665
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
666 667 668
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
669
        self.assertLess(actual_time, 30.0)
670
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
671 672 673 674 675 676 677 678 679
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
680 681
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
682 683
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
684 685 686
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
687
        self.assertLess(actual_time, 33.0)
688
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
689 690 691
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

692 693 694 695 696
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
697 698
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
699
        result = process.run(cmd_line, ignore_status=True)
700 701 702 703
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
704 705
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
706
        self.assertIn('WARN | Warning message (should cause this test to '
707
                      'finish with warning)', result.stdout, result)
708
        self.assertIn('ERROR| Error message (ordinary message not changing '
709
                      'the results)', result.stdout, result)
710

711 712 713 714 715 716
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
717
                    ' "%s"' % (avocado_path, self.tmpdir, test_file_name))
718 719 720 721 722 723
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
724
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
725
    def test_kill_stopped_sleep(self):
726 727 728
        proc = aexpect.Expect("./scripts/avocado run 60 --job-results-dir %s "
                              "--external-runner %s --sysinfo=off --job-timeout 3"
                              % (self.tmpdir, SLEEP_BINARY))
729 730
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
731 732 733 734
        # We need pid of the avocado process, not the shell executing it
        avocado_shell = psutil.Process(proc.get_pid())
        avocado_proc = avocado_shell.children()[0]
        pid = avocado_proc.pid
735
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
736
        deadline = time.time() + 9
737 738 739
        while time.time() < deadline:
            if not proc.is_alive():
                break
740
            time.sleep(0.1)
741 742
        else:
            proc.kill(signal.SIGKILL)
743
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
744 745 746 747 748 749 750
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
751
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
752
                         "1.")
753 754

        sleep_dir = astring.string_to_safe_path("1-60")
755
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
756
                                 sleep_dir, "debug.log")
757
        debug_log = open(debug_log).read()
758 759 760 761 762 763 764
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
765

766
    def tearDown(self):
767 768
        self.pass_script.remove()
        self.fail_script.remove()
769
        shutil.rmtree(self.tmpdir)
770 771


772
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
773 774

    def setUp(self):
775
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
776 777 778
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
779
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
780 781 782 783
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
784
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
785 786
        self.fail_script.save()

787
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
788
        os.chdir(basedir)
789
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
790 791
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
792
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
793 794 795 796
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

797
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
798
        os.chdir(basedir)
799
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
800 801
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
802
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
803 804 805 806
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

807
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
808
        os.chdir(basedir)
809 810
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
811 812
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
813 814
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
815
        self.assertIn(expected_output, result.stderr)
816
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
817 818 819 820 821 822 823
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
824
                    '--external-runner=%s' % (self.tmpdir, TRUE_CMD))
825
        result = process.run(cmd_line, ignore_status=True)
826 827
        expected_output = ('No test references provided nor any other '
                           'arguments resolved into tests')
828 829
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
830 831 832 833 834 835 836 837 838 839
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


840
class AbsPluginsTest(object):
841

842
    def setUp(self):
843
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
844

845 846 847 848 849 850
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

851 852 853 854
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
855
        expected_rc = exit_codes.AVOCADO_ALL_OK
856 857 858 859 860 861
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

862 863 864 865 866
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
867
        expected_rc = exit_codes.AVOCADO_ALL_OK
868 869 870 871 872
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

873 874 875 876 877
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
878
        expected_rc = exit_codes.AVOCADO_FAIL
879 880 881
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
882
        self.assertIn("Unable to resolve reference", output)
883

884 885 886 887 888
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
889
        expected_rc = exit_codes.AVOCADO_ALL_OK
890 891 892
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
893 894
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
895

896
    def test_config_plugin(self):
897
        os.chdir(basedir)
898
        cmd_line = './scripts/avocado config --paginator off'
899 900
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
901
        expected_rc = exit_codes.AVOCADO_ALL_OK
902 903 904 905 906 907 908
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
909
        cmd_line = './scripts/avocado config --datadir --paginator off'
910 911
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
912
        expected_rc = exit_codes.AVOCADO_ALL_OK
913 914 915 916 917
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
    def test_disable_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn("Collect system information", result.stdout)

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
            cmd_line = './scripts/avocado --config %s plugins' % config
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
            self.assertNotIn("Collect system information", result.stdout)

940 941 942 943 944 945 946 947 948 949 950 951 952
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
            cmd = ('./scripts/avocado --config %s run passtest.py --archive '
953 954
                   '--job-results-dir %s --sysinfo=off'
                   % (config_path, self.base_outputdir))
955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
        try:
            pkg_resources.require('avocado_result_html')
            result_plugins.append("html")
            result_outputs.append("html/results.html")
        except pkg_resources.DistributionNotFound:
            pass

        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
            self.assertIn(result_plugin, result.stdout)

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
994 995
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
996 997 998 999 1000 1001 1002 1003 1004 1005 1006
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

1007 1008 1009 1010 1011
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
1012
        expected_rc = exit_codes.AVOCADO_ALL_OK
1013 1014 1015 1016 1017
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

1018

1019 1020 1021 1022
class ParseXMLError(Exception):
    pass


1023
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1024

1025
    def setUp(self):
1026
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1027 1028
        super(PluginsXunitTest, self).setUp()

1029
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1030
                      e_nnotfound, e_nfailures, e_nskip):
1031
        os.chdir(basedir)
L
Lukáš Doktor 已提交
1032 1033
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
1034 1035 1036 1037 1038 1039 1040
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1041
        except Exception as detail:
1042 1043 1044 1045 1046 1047 1048
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1049 1050
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1068
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1069 1070 1071 1072
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1073
    def test_xunit_plugin_passtest(self):
1074
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1075
                           1, 0, 0, 0, 0)
1076 1077

    def test_xunit_plugin_failtest(self):
1078
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1079
                           1, 0, 0, 1, 0)
1080

1081
    def test_xunit_plugin_skiponsetuptest(self):
1082
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1083
                           1, 0, 0, 0, 1)
1084

1085
    def test_xunit_plugin_errortest(self):
1086
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1087
                           1, 1, 0, 0, 0)
1088

1089 1090 1091 1092
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1093 1094 1095 1096 1097

class ParseJSONError(Exception):
    pass


1098
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1099

1100
    def setUp(self):
1101
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
1102 1103
        super(PluginsJSONTest, self).setUp()

1104
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1105
                      e_nfailures, e_nskip, external_runner=None):
1106
        os.chdir(basedir)
1107 1108
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
1109 1110
        if external_runner is not None:
            cmd_line += " --external-runner '%s'" % external_runner
1111 1112 1113 1114 1115 1116 1117
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1118
        except Exception as detail:
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1136
        return json_data
1137

1138
    def test_json_plugin_passtest(self):
1139
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1140
                           1, 0, 0, 0)
1141 1142

    def test_json_plugin_failtest(self):
1143
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1144
                           1, 0, 1, 0)
1145

1146
    def test_json_plugin_skiponsetuptest(self):
1147
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1148
                           1, 0, 0, 1)
1149

1150
    def test_json_plugin_errortest(self):
1151
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1152
                           1, 1, 0, 0)
1153

1154
    @unittest.skipIf(not GNU_ECHO_BINARY, 'echo binary not available')
1155
    def test_ugly_echo_cmd(self):
1156
        data = self.run_and_check('"-ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
1157
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
1158
                                  0, 0, GNU_ECHO_BINARY)
1159 1160
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1161
                         '1--ne foo\\\\n\\\'\\"\\\\nbar/baz')
1162 1163
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1164
                         '1--ne foo\\\\n\\\'\\"\\\\nbar_baz')
1165

1166 1167 1168 1169
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1170

1171 1172
if __name__ == '__main__':
    unittest.main()