test_basic.py 30.5 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8

9 10 11 12 13
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

14
from avocado.core import exit_codes
15 16 17 18
from avocado.utils import process
from avocado.utils import script


19
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
20 21 22
basedir = os.path.abspath(basedir)


23 24 25 26
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
27 28
PASS_SHELL_CONTENTS = "exit 0"

29 30 31 32
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
33 34
FAIL_SHELL_CONTENTS = "exit 1"

35
VOID_PLUGIN_CONTENTS = """#!/usr/bin/env python
36
from avocado.core.plugins.plugin import Plugin
37 38 39 40 41
class VoidPlugin(Plugin):
    pass
"""

SYNTAX_ERROR_PLUGIN_CONTENTS = """#!/usr/bin/env python
42
from avocado.core.plugins.plugin import Plugin
43 44 45
class VoidPlugin(Plugin)
"""

46
HELLO_PLUGIN_CONTENTS = """#!/usr/bin/env python
47
from avocado.core.plugins.plugin import Plugin
48 49 50
class HelloWorld(Plugin):
    name = 'hello'
    enabled = True
51 52 53 54
    def configure(self, parser):
        self.parser = parser.subcommands.add_parser('hello')
        super(HelloWorld, self).configure(self.parser)
    def run(self, args):
55
        print('Hello World!')
56 57
"""

58 59 60

class RunnerOperationTest(unittest.TestCase):

61
    def setUp(self):
62
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
63

64 65
    def test_runner_all_ok(self):
        os.chdir(basedir)
66
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest passtest' % self.tmpdir
67 68
        process.run(cmd_line)

69 70
    def test_datadir_alias(self):
        os.chdir(basedir)
71
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s datadir' % self.tmpdir
72 73 74 75
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
76 77
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
78 79
        process.run(cmd_line)

80 81
    def test_runner_noalias(self):
        os.chdir(basedir)
82 83
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
84 85
        process.run(cmd_line)

86 87
    def test_runner_tests_fail(self):
        os.chdir(basedir)
88
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest failtest passtest' % self.tmpdir
89
        result = process.run(cmd_line, ignore_status=True)
90
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
91 92 93 94 95
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
96
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s bogustest' % self.tmpdir
97
        result = process.run(cmd_line, ignore_status=True)
98 99
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
100 101 102 103 104
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

105 106
    def test_runner_doublefail(self):
        os.chdir(basedir)
107
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - doublefail' % self.tmpdir
108 109
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
110 111
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
112 113 114 115
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
116
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
117
                      "Cleanup exception not printed to log output")
118
        self.assertIn("TestFail: This test is supposed to fail",
119
                      output,
120
                      "Test did not fail with action exception:\n%s" % output)
121

122 123 124 125 126
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
127
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
128 129 130 131 132
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

133
    def test_fail_on_exception(self):
134 135
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
136
                    "--json - fail_on_exception" % self.tmpdir)
137
        result = process.run(cmd_line, ignore_status=True)
138
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
139 140 141 142 143
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

144 145
    def test_runner_timeout(self):
        os.chdir(basedir)
146
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - timeouttest' % self.tmpdir
147 148
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
149 150
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
151 152 153 154
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
155
        self.assertIn("TestTimeoutError: Timeout reached waiting for", output,
156
                      "Test did not fail with timeout exception:\n%s" % output)
157 158
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
159

160 161
    def test_runner_abort(self):
        os.chdir(basedir)
162
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - abort' % self.tmpdir
163
        result = process.run(cmd_line, ignore_status=True)
164 165
        output = result.stdout
        excerpt = 'Test process aborted'
166 167
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
168 169 170 171
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
172
        self.assertIn(excerpt, output)
173

174 175
    def test_silent_output(self):
        os.chdir(basedir)
176
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest --silent' % self.tmpdir
177
        result = process.run(cmd_line, ignore_status=True)
178
        expected_rc = exit_codes.AVOCADO_ALL_OK
179 180 181 182
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
        self.assertEqual(result.stderr, expected_output)

183 184 185 186
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
187
        expected_rc = exit_codes.AVOCADO_ALL_OK
188 189 190 191
        unexpected_output = 'too few arguments'
        self.assertEqual(result.exit_status, expected_rc)
        self.assertNotIn(unexpected_output, result.stdout)

192 193
    def test_empty_test_list(self):
        os.chdir(basedir)
194
        cmd_line = './scripts/avocado run --sysinfo=off'
195
        result = process.run(cmd_line, ignore_status=True)
196
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
197
        expected_output = 'No tests found for given urls'
198
        self.assertEqual(result.exit_status, expected_rc)
199
        self.assertIn(expected_output, result.stderr)
200

201 202
    def test_not_found(self):
        os.chdir(basedir)
203
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
204
        result = process.run(cmd_line, ignore_status=True)
205
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
206
        self.assertEqual(result.exit_status, expected_rc)
207 208
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
209

210
    def test_invalid_unique_id(self):
211
        cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar passtest'
212
        result = process.run(cmd_line, ignore_status=True)
213
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
214
        self.assertIn('needs to be a 40 digit hex', result.stderr)
215
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
216 217

    def test_valid_unique_id(self):
218
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
219
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 passtest' % self.tmpdir)
220
        result = process.run(cmd_line, ignore_status=True)
221
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
222
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
223
        self.assertIn('PASS', result.stdout)
224

225
    def test_automatic_unique_id(self):
226
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off passtest --json -' % self.tmpdir
227
        result = process.run(cmd_line, ignore_status=True)
228
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
229 230 231 232
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

233 234 235 236 237
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - skip_outside_setup" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
238
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
239 240 241 242 243
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

244 245 246 247 248
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
249 250
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
251 252 253 254 255 256
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
257
                avocado_process.wait()
258 259 260 261
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
    def test_dry_run(self):
        os.chdir(basedir)
        cmd = ("./scripts/avocado run --sysinfo=off passtest failtest "
               "errortest --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a "
               "foo:bar:b foo:baz:c bar:bar:bar --dry-run")
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
        self.assertEqual(result['skip'], 3)
        for i in xrange(3):
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
            self.assertEqual(log.count(line), 3)

287 288 289
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

290

291 292 293
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
294
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
295 296 297 298 299

    def test_output_pass(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
300
        expected_rc = exit_codes.AVOCADO_ALL_OK
301 302 303 304 305 306 307 308 309
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s failtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
310
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
311 312 313 314 315 316 317 318 319
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s errortest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
320
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
321 322 323 324 325 326 327 328 329
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s skiponsetup' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
330
        expected_rc = exit_codes.AVOCADO_ALL_OK
331 332 333
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
334 335
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
336

337 338 339
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

340

341
class RunnerSimpleTest(unittest.TestCase):
342 343

    def setUp(self):
344
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
345 346 347
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
348
            'avocado_simpletest_functional')
349
        self.pass_script.save()
L
Lukáš Doktor 已提交
350 351 352 353
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
354
        self.fail_script.save()
355

356
    def test_simpletest_pass(self):
357
        os.chdir(basedir)
L
Lukáš Doktor 已提交
358 359
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
360
        result = process.run(cmd_line, ignore_status=True)
361
        expected_rc = exit_codes.AVOCADO_ALL_OK
362 363 364 365
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

366
    def test_simpletest_fail(self):
367
        os.chdir(basedir)
L
Lukáš Doktor 已提交
368 369
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
370
        result = process.run(cmd_line, ignore_status=True)
371
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
372 373 374 375
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

376 377 378
    def test_runner_onehundred_fail_timing(self):
        """
        We can be pretty sure that a failtest should return immediattely. Let's
379
        run 100 of them and assure they not take more than 30 seconds to run.
380

381 382
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
383 384 385
        """
        os.chdir(basedir)
        one_hundred = 'failtest ' * 100
L
Lukáš Doktor 已提交
386 387
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
388 389 390
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
391
        self.assertLess(actual_time, 30.0)
392
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
393 394 395 396 397 398 399 400 401 402
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
        sleep_fail_sleep = 'sleeptest ' + 'failtest ' * 100 + 'sleeptest'
L
Lukáš Doktor 已提交
403 404
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
405 406 407
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
408
        self.assertLess(actual_time, 33.0)
409
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
410 411 412
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

413 414 415 416 417
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
418 419
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
420
        result = process.run(cmd_line, ignore_status=True)
421 422 423 424
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
425 426 427 428 429 430 431
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
        self.assertIn('WARN | Warning message (should cause this test to '
                      'finish with warning)', result.stdout, result)
        self.assertIn('ERROR| Error message (ordinary message not changing '
                      'the results)', result.stdout, result)

432
    def tearDown(self):
433 434
        self.pass_script.remove()
        self.fail_script.remove()
435
        shutil.rmtree(self.tmpdir)
436 437


438
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
439 440

    def setUp(self):
441
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
442 443 444
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
445
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
446 447 448 449
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
450
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
451 452
        self.fail_script.save()

453
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
454
        os.chdir(basedir)
455
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
456 457
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
458
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
459 460 461 462
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

463
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
464
        os.chdir(basedir)
465
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
466 467
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
468
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
469 470 471 472
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

473
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
474
        os.chdir(basedir)
475 476
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
477 478
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
479 480
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
481
        self.assertIn(expected_output, result.stderr)
482
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
483 484 485 486 487 488 489 490 491 492
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


493
class AbsPluginsTest(object):
494

495
    def setUp(self):
496
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
497

498 499 500 501 502 503
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

504 505 506 507
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
508
        expected_rc = exit_codes.AVOCADO_ALL_OK
509 510 511 512 513 514
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

515 516 517 518 519
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
520
        expected_rc = exit_codes.AVOCADO_ALL_OK
521 522 523 524 525
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

526 527 528 529 530
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
531
        expected_rc = exit_codes.AVOCADO_FAIL
532 533 534
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
535
        self.assertIn("Unable to discover url", output)
536

537 538 539 540 541
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
542
        expected_rc = exit_codes.AVOCADO_ALL_OK
543 544 545
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
546 547
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
548

549
    def test_config_plugin(self):
550
        os.chdir(basedir)
551
        cmd_line = './scripts/avocado config --paginator off'
552 553
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
554
        expected_rc = exit_codes.AVOCADO_ALL_OK
555 556 557 558 559 560 561
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
562
        cmd_line = './scripts/avocado config --datadir --paginator off'
563 564
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
565
        expected_rc = exit_codes.AVOCADO_ALL_OK
566 567 568 569 570
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

571 572 573 574 575
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
576
        expected_rc = exit_codes.AVOCADO_ALL_OK
577 578 579 580 581
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

582

583 584 585 586
class ParseXMLError(Exception):
    pass


587
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
588

589
    def setUp(self):
590
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
591 592
        super(PluginsXunitTest, self).setUp()

593
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
594
                      e_nnotfound, e_nfailures, e_nskip):
595
        os.chdir(basedir)
L
Lukáš Doktor 已提交
596 597
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
        except Exception, detail:
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
613 614
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

        n_skip = int(testsuite_tag.attributes['skip'].value)
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

637
    def test_xunit_plugin_passtest(self):
638 639
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 0)
640 641

    def test_xunit_plugin_failtest(self):
642 643
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 0, 1, 0)
644

645
    def test_xunit_plugin_skiponsetuptest(self):
646 647
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 1)
648

649
    def test_xunit_plugin_errortest(self):
650 651
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0, 0)
652

653 654 655 656
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

657 658 659 660 661

class ParseJSONError(Exception):
    pass


662
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
663

664
    def setUp(self):
665
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
666 667
        super(PluginsJSONTest, self).setUp()

668
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
669 670
                      e_nfailures, e_nskip):
        os.chdir(basedir)
671 672
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
        except Exception, detail:
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")

699
    def test_json_plugin_passtest(self):
700 701
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0)
702 703

    def test_json_plugin_failtest(self):
704 705
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 1, 0)
706

707
    def test_json_plugin_skiponsetuptest(self):
708 709
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 1)
710

711
    def test_json_plugin_errortest(self):
712 713
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0)
714

715 716 717 718
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

719 720
if __name__ == '__main__':
    unittest.main()