test_basic.py 29.2 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8

9 10 11 12 13
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

14 15 16 17
from avocado.utils import process
from avocado.utils import script


18
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
19 20 21
basedir = os.path.abspath(basedir)


22 23 24 25
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
26 27
PASS_SHELL_CONTENTS = "exit 0"

28 29 30 31
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
32 33
FAIL_SHELL_CONTENTS = "exit 1"

34
VOID_PLUGIN_CONTENTS = """#!/usr/bin/env python
35
from avocado.core.plugins.plugin import Plugin
36 37 38 39 40
class VoidPlugin(Plugin):
    pass
"""

SYNTAX_ERROR_PLUGIN_CONTENTS = """#!/usr/bin/env python
41
from avocado.core.plugins.plugin import Plugin
42 43 44
class VoidPlugin(Plugin)
"""

45
HELLO_PLUGIN_CONTENTS = """#!/usr/bin/env python
46
from avocado.core.plugins.plugin import Plugin
47 48 49
class HelloWorld(Plugin):
    name = 'hello'
    enabled = True
50 51 52 53
    def configure(self, parser):
        self.parser = parser.subcommands.add_parser('hello')
        super(HelloWorld, self).configure(self.parser)
    def run(self, args):
54
        print('Hello World!')
55 56
"""

57 58 59

class RunnerOperationTest(unittest.TestCase):

60
    def setUp(self):
61
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
62

63 64
    def test_runner_all_ok(self):
        os.chdir(basedir)
65
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest passtest' % self.tmpdir
66 67
        process.run(cmd_line)

68 69
    def test_datadir_alias(self):
        os.chdir(basedir)
70
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s datadir' % self.tmpdir
71 72 73 74
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
75 76
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
77 78
        process.run(cmd_line)

79 80
    def test_runner_noalias(self):
        os.chdir(basedir)
81 82
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
83 84
        process.run(cmd_line)

85 86
    def test_runner_tests_fail(self):
        os.chdir(basedir)
87
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest failtest passtest' % self.tmpdir
88 89 90 91 92 93 94
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 1
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
95
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s bogustest' % self.tmpdir
96
        result = process.run(cmd_line, ignore_status=True)
97
        expected_rc = 2
98 99 100 101 102 103
        unexpected_rc = 3
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

104 105
    def test_runner_doublefail(self):
        os.chdir(basedir)
106
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - doublefail' % self.tmpdir
107 108 109 110 111 112 113 114
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
        expected_rc = 1
        unexpected_rc = 3
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
115
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
116
                      "Cleanup exception not printed to log output")
117
        self.assertIn("TestFail: This test is supposed to fail",
118
                      output,
119
                      "Test did not fail with action exception:\n%s" % output)
120

121 122 123 124 125 126 127 128 129 130 131
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 1
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

132
    def test_fail_on_exception(self):
133 134
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
135
                    "--json - fail_on_exception" % self.tmpdir)
136 137 138 139 140 141 142
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 1
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

143 144
    def test_runner_timeout(self):
        os.chdir(basedir)
145
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - timeouttest' % self.tmpdir
146 147 148 149 150 151 152 153
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
        expected_rc = 1
        unexpected_rc = 3
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
154
        self.assertIn("TestTimeoutError: Timeout reached waiting for", output,
155
                      "Test did not fail with timeout exception:\n%s" % output)
156 157
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
158

159 160
    def test_runner_abort(self):
        os.chdir(basedir)
161
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - abort' % self.tmpdir
162
        result = process.run(cmd_line, ignore_status=True)
163 164
        output = result.stdout
        excerpt = 'Test process aborted'
165 166 167 168 169 170
        expected_rc = 1
        unexpected_rc = 3
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
171
        self.assertIn(excerpt, output)
172

173 174
    def test_silent_output(self):
        os.chdir(basedir)
175
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest --silent' % self.tmpdir
176 177 178 179 180 181
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 0
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
        self.assertEqual(result.stderr, expected_output)

182 183 184 185 186 187 188 189 190
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 0
        unexpected_output = 'too few arguments'
        self.assertEqual(result.exit_status, expected_rc)
        self.assertNotIn(unexpected_output, result.stdout)

191 192
    def test_empty_test_list(self):
        os.chdir(basedir)
193
        cmd_line = './scripts/avocado run --sysinfo=off'
194 195
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 2
196
        expected_output = 'No tests found for given urls'
197
        self.assertEqual(result.exit_status, expected_rc)
198
        self.assertIn(expected_output, result.stderr)
199

200 201
    def test_not_found(self):
        os.chdir(basedir)
202
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
203
        result = process.run(cmd_line, ignore_status=True)
204
        expected_rc = 2
205
        self.assertEqual(result.exit_status, expected_rc)
206 207
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
208

209
    def test_invalid_unique_id(self):
210
        cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar passtest'
211 212
        result = process.run(cmd_line, ignore_status=True)
        self.assertNotEqual(0, result.exit_status)
213
        self.assertIn('needs to be a 40 digit hex', result.stderr)
214
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
215 216

    def test_valid_unique_id(self):
217
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
218
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 passtest' % self.tmpdir)
219 220
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(0, result.exit_status)
221
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
222
        self.assertIn('PASS', result.stdout)
223

224
    def test_automatic_unique_id(self):
225
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off passtest --json -' % self.tmpdir
226 227 228 229 230 231
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(0, result.exit_status)
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

232 233 234 235 236 237 238 239 240 241 242
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - skip_outside_setup" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 1
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

243 244 245 246 247
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
248
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py' % self.tmpdir)
249 250 251 252 253 254
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
255
                avocado_process.wait()
256 257 258 259
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

260 261 262
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

263

264 265 266
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
267
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308

    def test_output_pass(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 0
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s failtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 1
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s errortest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 1
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s skiponsetup' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 0
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:  SKIP', result.stdout)

309 310 311
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

312

313
class RunnerSimpleTest(unittest.TestCase):
314 315

    def setUp(self):
316
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
317 318 319
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
320
            'avocado_simpletest_functional')
321 322 323 324
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'avocado_fail.sh',
            FAIL_SCRIPT_CONTENTS,
325
            'avocado_simpletest_functional')
326
        self.fail_script.save()
327

328
    def test_simpletest_pass(self):
329
        os.chdir(basedir)
330
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (self.tmpdir, self.pass_script.path)
331 332 333 334 335 336
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 0
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

337
    def test_simpletest_fail(self):
338
        os.chdir(basedir)
339
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (self.tmpdir, self.fail_script.path)
340 341 342 343 344 345
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 1
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

346 347 348
    def test_runner_onehundred_fail_timing(self):
        """
        We can be pretty sure that a failtest should return immediattely. Let's
349
        run 100 of them and assure they not take more than 30 seconds to run.
350

351 352
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
353 354 355
        """
        os.chdir(basedir)
        one_hundred = 'failtest ' * 100
356
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (self.tmpdir, one_hundred)
357 358 359
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
360
        self.assertLess(actual_time, 30.0)
361 362 363 364 365 366 367 368 369 370 371
        expected_rc = 1
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
        sleep_fail_sleep = 'sleeptest ' + 'failtest ' * 100 + 'sleeptest'
372
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (self.tmpdir, sleep_fail_sleep)
373 374 375
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
376
        self.assertLess(actual_time, 33.0)
377 378 379 380
        expected_rc = 1
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

381 382 383 384 385
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
386 387
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
388 389 390 391 392 393 394 395 396 397 398
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, 0,
                         "Avocado did not return rc 0:\n%s" %
                         (result))
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
        self.assertIn('WARN | Warning message (should cause this test to '
                      'finish with warning)', result.stdout, result)
        self.assertIn('ERROR| Error message (ordinary message not changing '
                      'the results)', result.stdout, result)

399
    def tearDown(self):
400 401
        self.pass_script.remove()
        self.fail_script.remove()
402
        shutil.rmtree(self.tmpdir)
403 404


C
Cleber Rosa 已提交
405 406 407
class InnerRunnerTest(unittest.TestCase):

    def setUp(self):
408
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
            'avocado_innerrunner_functional')
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
            'avocado_innerrunner_functional')
        self.fail_script.save()

    def test_innerrunner_pass(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --inner-runner=/bin/sh %s'
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 0
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_innerrunner_fail(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --inner-runner=/bin/sh %s'
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 1
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_innerrunner_chdir_no_testdir(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --inner-runner=/bin/sh '
                    '--inner-runner-chdir=test %s')
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
        expected_output = 'Option "--inner-runner-testdir" is mandatory'
        self.assertIn(expected_output, result.stderr)
        expected_rc = 3
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


459
class ExternalPluginsTest(unittest.TestCase):
460 461

    def setUp(self):
462 463
        self.base_sourcedir = tempfile.mkdtemp(prefix='avocado_' + __name__)
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
464 465

    def test_void_plugin(self):
466 467 468
        self.void_plugin = script.make_script(
            os.path.join(self.base_sourcedir, 'avocado_void.py'),
            VOID_PLUGIN_CONTENTS)
469 470 471 472
        os.chdir(basedir)
        cmd_line = './scripts/avocado --plugins %s plugins' % self.base_sourcedir
        result = process.run(cmd_line, ignore_status=True)
        expected_output = 'noname'
473
        self.assertIn(expected_output, result.stdout)
474 475

    def test_syntax_error_plugin(self):
476 477 478
        self.syntax_err_plugin = script.make_script(
            os.path.join(self.base_sourcedir, 'avocado_syntax_err.py'),
            SYNTAX_ERROR_PLUGIN_CONTENTS)
479 480 481
        os.chdir(basedir)
        cmd_line = './scripts/avocado --plugins %s' % self.base_sourcedir
        result = process.run(cmd_line, ignore_status=True)
482
        expected_output = 'invalid syntax'
483 484
        self.assertIn(expected_output, result.stderr)

485
    def test_hello_plugin(self):
486 487 488
        self.hello_plugin = script.make_script(
            os.path.join(self.base_sourcedir, 'avocado_hello.py'),
            HELLO_PLUGIN_CONTENTS)
489
        os.chdir(basedir)
490
        cmd_line = './scripts/avocado --plugins %s hello' % self.base_sourcedir
491
        result = process.run(cmd_line, ignore_status=True)
492 493 494 495
        expected_output = 'Hello World!'
        self.assertIn(expected_output, result.stdout)

    def tearDown(self):
496
        shutil.rmtree(self.tmpdir)
497 498 499 500
        if os.path.isdir(self.base_sourcedir):
            shutil.rmtree(self.base_sourcedir, ignore_errors=True)


501
class AbsPluginsTest(object):
502

503
    def setUp(self):
504
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
505

506 507 508 509 510 511
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

512 513 514 515 516 517 518 519 520 521 522
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = 0
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

523 524 525 526 527 528 529 530 531 532 533
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
        expected_rc = 0
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

534 535 536 537 538 539 540 541 542
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
        expected_rc = 3
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
543
        self.assertIn("Unable to discover url", output)
544

545 546 547 548 549 550 551 552 553
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
        expected_rc = 0
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
554 555
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
556

557
    def test_config_plugin(self):
558
        os.chdir(basedir)
559
        cmd_line = './scripts/avocado config --paginator off'
560 561 562 563 564 565 566 567 568 569
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
        expected_rc = 0
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
570
        cmd_line = './scripts/avocado config --datadir --paginator off'
571 572 573 574 575 576 577 578
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
        expected_rc = 0
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

579 580 581 582 583 584 585 586 587 588 589
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
        expected_rc = 0
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

590

591 592 593 594
class ParseXMLError(Exception):
    pass


595
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
596

597
    def setUp(self):
598
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
599 600
        super(PluginsXunitTest, self).setUp()

601
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
602
                      e_nnotfound, e_nfailures, e_nskip):
603
        os.chdir(basedir)
604
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --xunit - %s' % (self.tmpdir, testname)
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
        except Exception, detail:
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
620 621
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

        n_skip = int(testsuite_tag.attributes['skip'].value)
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

644 645
    def test_xunit_plugin_passtest(self):
        self.run_and_check('passtest', 0, 1, 0, 0, 0, 0)
646 647

    def test_xunit_plugin_failtest(self):
648
        self.run_and_check('failtest', 1, 1, 0, 0, 1, 0)
649

650 651 652
    def test_xunit_plugin_skiponsetuptest(self):
        self.run_and_check('skiponsetup', 0, 1, 0, 0, 0, 1)

653
    def test_xunit_plugin_errortest(self):
654 655
        self.run_and_check('errortest', 1, 1, 1, 0, 0, 0)

656 657 658 659
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

660 661 662 663 664

class ParseJSONError(Exception):
    pass


665
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
666

667
    def setUp(self):
668
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
669 670
        super(PluginsJSONTest, self).setUp()

671
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
672 673
                      e_nfailures, e_nskip):
        os.chdir(basedir)
674 675
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
        except Exception, detail:
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")

702
    def test_json_plugin_passtest(self):
703
        self.run_and_check('passtest', 0, 1, 0, 0, 0)
704 705

    def test_json_plugin_failtest(self):
706
        self.run_and_check('failtest', 1, 1, 0, 1, 0)
707

708 709 710
    def test_json_plugin_skiponsetuptest(self):
        self.run_and_check('skiponsetup', 0, 1, 0, 0, 1)

711
    def test_json_plugin_errortest(self):
712
        self.run_and_check('errortest', 1, 1, 1, 0, 0)
713

714 715 716 717
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

718 719
if __name__ == '__main__':
    unittest.main()