test_basic.py 32.6 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9

10 11 12 13 14
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

15
from avocado.core import exit_codes
16 17 18 19
from avocado.utils import process
from avocado.utils import script


20
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
21 22 23
basedir = os.path.abspath(basedir)


24 25 26 27
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
28 29
PASS_SHELL_CONTENTS = "exit 0"

30 31 32 33
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
34 35
FAIL_SHELL_CONTENTS = "exit 1"

36
VOID_PLUGIN_CONTENTS = """#!/usr/bin/env python
37
from avocado.core.plugins.plugin import Plugin
38 39 40 41 42
class VoidPlugin(Plugin):
    pass
"""

SYNTAX_ERROR_PLUGIN_CONTENTS = """#!/usr/bin/env python
43
from avocado.core.plugins.plugin import Plugin
44 45 46
class VoidPlugin(Plugin)
"""

47
HELLO_PLUGIN_CONTENTS = """#!/usr/bin/env python
48
from avocado.core.plugins.plugin import Plugin
49 50 51
class HelloWorld(Plugin):
    name = 'hello'
    enabled = True
52 53 54 55
    def configure(self, parser):
        self.parser = parser.subcommands.add_parser('hello')
        super(HelloWorld, self).configure(self.parser)
    def run(self, args):
56
        print('Hello World!')
57 58
"""

59 60 61

class RunnerOperationTest(unittest.TestCase):

62
    def setUp(self):
63
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
64

65 66
    def test_runner_all_ok(self):
        os.chdir(basedir)
67
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest passtest' % self.tmpdir
68 69
        process.run(cmd_line)

70 71
    def test_datadir_alias(self):
        os.chdir(basedir)
72
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s datadir' % self.tmpdir
73 74 75 76
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
77 78
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
79 80
        process.run(cmd_line)

81 82
    def test_runner_noalias(self):
        os.chdir(basedir)
83 84
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
85 86
        process.run(cmd_line)

87 88
    def test_runner_tests_fail(self):
        os.chdir(basedir)
89
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest failtest passtest' % self.tmpdir
90
        result = process.run(cmd_line, ignore_status=True)
91
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
92 93 94 95 96
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
97
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s bogustest' % self.tmpdir
98
        result = process.run(cmd_line, ignore_status=True)
99 100
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
101 102 103 104 105
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

106 107
    def test_runner_doublefail(self):
        os.chdir(basedir)
108
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - doublefail' % self.tmpdir
109 110
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
111 112
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
113 114 115 116
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
117
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
118
                      "Cleanup exception not printed to log output")
119
        self.assertIn("TestFail: This test is supposed to fail",
120
                      output,
121
                      "Test did not fail with action exception:\n%s" % output)
122

123 124 125 126 127
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
128
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
129 130 131 132 133
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

134
    def test_fail_on_exception(self):
135 136
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
137
                    "--json - fail_on_exception" % self.tmpdir)
138
        result = process.run(cmd_line, ignore_status=True)
139
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
140 141 142 143 144
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

145 146
    def test_runner_timeout(self):
        os.chdir(basedir)
147
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - timeouttest' % self.tmpdir
148 149
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
150 151
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
152 153 154 155
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
156
        self.assertIn("TestTimeoutError: Timeout reached waiting for", output,
157
                      "Test did not fail with timeout exception:\n%s" % output)
158 159
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
160

161 162
    def test_runner_abort(self):
        os.chdir(basedir)
163
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - abort' % self.tmpdir
164
        result = process.run(cmd_line, ignore_status=True)
165 166
        output = result.stdout
        excerpt = 'Test process aborted'
167 168
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
169 170 171 172
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
173
        self.assertIn(excerpt, output)
174

175 176
    def test_silent_output(self):
        os.chdir(basedir)
177
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest --silent' % self.tmpdir
178
        result = process.run(cmd_line, ignore_status=True)
179
        expected_rc = exit_codes.AVOCADO_ALL_OK
180 181 182 183
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
        self.assertEqual(result.stderr, expected_output)

184 185 186 187
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
188 189
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        expected_output = 'error: too few arguments'
190
        self.assertEqual(result.exit_status, expected_rc)
191
        self.assertIn(expected_output, result.stderr)
192

193 194
    def test_empty_test_list(self):
        os.chdir(basedir)
195
        cmd_line = './scripts/avocado run --sysinfo=off'
196
        result = process.run(cmd_line, ignore_status=True)
197
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
198
        expected_output = 'No tests found for given urls'
199
        self.assertEqual(result.exit_status, expected_rc)
200
        self.assertIn(expected_output, result.stderr)
201

202 203
    def test_not_found(self):
        os.chdir(basedir)
204
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
205
        result = process.run(cmd_line, ignore_status=True)
206
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
207
        self.assertEqual(result.exit_status, expected_rc)
208 209
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
210

211
    def test_invalid_unique_id(self):
212
        cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar passtest'
213
        result = process.run(cmd_line, ignore_status=True)
214
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
215
        self.assertIn('needs to be a 40 digit hex', result.stderr)
216
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
217 218

    def test_valid_unique_id(self):
219
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
220
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 passtest' % self.tmpdir)
221
        result = process.run(cmd_line, ignore_status=True)
222
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
223
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
224
        self.assertIn('PASS', result.stdout)
225

226
    def test_automatic_unique_id(self):
227
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off passtest --json -' % self.tmpdir
228
        result = process.run(cmd_line, ignore_status=True)
229
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
230 231 232 233
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

234 235 236 237 238
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - skip_outside_setup" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
239
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
240 241 242 243 244
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

245 246 247 248 249
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
250 251
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
252 253 254 255 256 257
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
258
                avocado_process.wait()
259 260 261 262
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
    def test_dry_run(self):
        os.chdir(basedir)
        cmd = ("./scripts/avocado run --sysinfo=off passtest failtest "
               "errortest --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a "
               "foo:bar:b foo:baz:c bar:bar:bar --dry-run")
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
        self.assertEqual(result['skip'], 3)
        for i in xrange(3):
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
            self.assertEqual(log.count(line), 3)

288 289 290
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

291

292 293 294
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
295
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
296 297 298 299 300

    def test_output_pass(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
301
        expected_rc = exit_codes.AVOCADO_ALL_OK
302 303 304 305 306 307 308 309 310
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s failtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
311
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
312 313 314 315 316 317 318 319 320
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s errortest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
321
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
322 323 324 325 326 327 328 329 330
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s skiponsetup' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
331
        expected_rc = exit_codes.AVOCADO_ALL_OK
332 333 334
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
335 336
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
337

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run "/bin/echo -ne '
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
                    ' --sysinfo=off  --show-job-log' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
        self.assertIn('PASS /bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

364 365 366
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

367

368
class RunnerSimpleTest(unittest.TestCase):
369 370

    def setUp(self):
371
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
372 373 374
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
375
            'avocado_simpletest_functional')
376
        self.pass_script.save()
L
Lukáš Doktor 已提交
377 378 379 380
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
381
        self.fail_script.save()
382

383
    def test_simpletest_pass(self):
384
        os.chdir(basedir)
L
Lukáš Doktor 已提交
385 386
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
387
        result = process.run(cmd_line, ignore_status=True)
388
        expected_rc = exit_codes.AVOCADO_ALL_OK
389 390 391 392
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

393
    def test_simpletest_fail(self):
394
        os.chdir(basedir)
L
Lukáš Doktor 已提交
395 396
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
397
        result = process.run(cmd_line, ignore_status=True)
398
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
399 400 401 402
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

403 404 405
    def test_runner_onehundred_fail_timing(self):
        """
        We can be pretty sure that a failtest should return immediattely. Let's
406
        run 100 of them and assure they not take more than 30 seconds to run.
407

408 409
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
410 411 412
        """
        os.chdir(basedir)
        one_hundred = 'failtest ' * 100
L
Lukáš Doktor 已提交
413 414
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
415 416 417
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
418
        self.assertLess(actual_time, 30.0)
419
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
420 421 422 423 424 425 426 427 428 429
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
        sleep_fail_sleep = 'sleeptest ' + 'failtest ' * 100 + 'sleeptest'
L
Lukáš Doktor 已提交
430 431
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
432 433 434
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
435
        self.assertLess(actual_time, 33.0)
436
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
437 438 439
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

440 441 442 443 444
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
445 446
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
447
        result = process.run(cmd_line, ignore_status=True)
448 449 450 451
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
452 453 454 455 456 457 458
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
        self.assertIn('WARN | Warning message (should cause this test to '
                      'finish with warning)', result.stdout, result)
        self.assertIn('ERROR| Error message (ordinary message not changing '
                      'the results)', result.stdout, result)

459
    def tearDown(self):
460 461
        self.pass_script.remove()
        self.fail_script.remove()
462
        shutil.rmtree(self.tmpdir)
463 464


465
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
466 467

    def setUp(self):
468
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
469 470 471
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
472
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
473 474 475 476
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
477
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
478 479
        self.fail_script.save()

480
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
481
        os.chdir(basedir)
482
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
483 484
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
485
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
486 487 488 489
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

490
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
491
        os.chdir(basedir)
492
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
493 494
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
495
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
496 497 498 499
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

500
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
501
        os.chdir(basedir)
502 503
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
504 505
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
506 507
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
508
        self.assertIn(expected_output, result.stderr)
509
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
510 511 512 513 514 515 516 517 518 519
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


520
class AbsPluginsTest(object):
521

522
    def setUp(self):
523
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
524

525 526 527 528 529 530
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

531 532 533 534
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
535
        expected_rc = exit_codes.AVOCADO_ALL_OK
536 537 538 539 540 541
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

542 543 544 545 546
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
547
        expected_rc = exit_codes.AVOCADO_ALL_OK
548 549 550 551 552
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

553 554 555 556 557
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
558
        expected_rc = exit_codes.AVOCADO_FAIL
559 560 561
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
562
        self.assertIn("Unable to discover url", output)
563

564 565 566 567 568
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
569
        expected_rc = exit_codes.AVOCADO_ALL_OK
570 571 572
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
573 574
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
575

576
    def test_config_plugin(self):
577
        os.chdir(basedir)
578
        cmd_line = './scripts/avocado config --paginator off'
579 580
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
581
        expected_rc = exit_codes.AVOCADO_ALL_OK
582 583 584 585 586 587 588
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
589
        cmd_line = './scripts/avocado config --datadir --paginator off'
590 591
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
592
        expected_rc = exit_codes.AVOCADO_ALL_OK
593 594 595 596 597
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

598 599 600 601 602
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
603
        expected_rc = exit_codes.AVOCADO_ALL_OK
604 605 606 607 608
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

609

610 611 612 613
class ParseXMLError(Exception):
    pass


614
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
615

616
    def setUp(self):
617
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
618 619
        super(PluginsXunitTest, self).setUp()

620
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
621
                      e_nnotfound, e_nfailures, e_nskip):
622
        os.chdir(basedir)
L
Lukáš Doktor 已提交
623 624
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
        except Exception, detail:
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
640 641
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

        n_skip = int(testsuite_tag.attributes['skip'].value)
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

664
    def test_xunit_plugin_passtest(self):
665 666
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 0)
667 668

    def test_xunit_plugin_failtest(self):
669 670
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 0, 1, 0)
671

672
    def test_xunit_plugin_skiponsetuptest(self):
673 674
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 1)
675

676
    def test_xunit_plugin_errortest(self):
677 678
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0, 0)
679

680 681 682 683
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

684 685 686 687 688

class ParseJSONError(Exception):
    pass


689
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
690

691
    def setUp(self):
692
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
693 694
        super(PluginsJSONTest, self).setUp()

695
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
696 697
                      e_nfailures, e_nskip):
        os.chdir(basedir)
698 699
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
        except Exception, detail:
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
725
        return json_data
726

727
    def test_json_plugin_passtest(self):
728 729
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0)
730 731

    def test_json_plugin_failtest(self):
732 733
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 1, 0)
734

735
    def test_json_plugin_skiponsetuptest(self):
736 737
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 1)
738

739
    def test_json_plugin_errortest(self):
740 741
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0)
742

743 744 745 746 747 748 749 750 751 752 753 754 755
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
                         '/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

756 757 758 759
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

760 761
if __name__ == '__main__':
    unittest.main()