test_basic.py 32.8 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9

10 11 12 13 14
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

15
from avocado.core import exit_codes
16 17 18 19
from avocado.utils import process
from avocado.utils import script


20
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
21 22 23
basedir = os.path.abspath(basedir)


24 25 26 27
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
28 29
PASS_SHELL_CONTENTS = "exit 0"

30 31 32 33
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
34 35
FAIL_SHELL_CONTENTS = "exit 1"

36 37 38 39 40 41 42 43 44 45 46 47 48 49
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

50 51 52

class RunnerOperationTest(unittest.TestCase):

53
    def setUp(self):
54
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
55

56 57
    def test_runner_all_ok(self):
        os.chdir(basedir)
58
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest passtest' % self.tmpdir
59 60
        process.run(cmd_line)

61 62
    def test_datadir_alias(self):
        os.chdir(basedir)
63
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s datadir' % self.tmpdir
64 65 66 67
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
68 69
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
70 71
        process.run(cmd_line)

72 73
    def test_runner_noalias(self):
        os.chdir(basedir)
74 75
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
76 77
        process.run(cmd_line)

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

93 94
    def test_runner_tests_fail(self):
        os.chdir(basedir)
95
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest failtest passtest' % self.tmpdir
96
        result = process.run(cmd_line, ignore_status=True)
97
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
98 99 100 101 102
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
103
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s bogustest' % self.tmpdir
104
        result = process.run(cmd_line, ignore_status=True)
105 106
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
107 108 109 110 111
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

112 113
    def test_runner_doublefail(self):
        os.chdir(basedir)
114
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - doublefail' % self.tmpdir
115 116
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
117 118
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
119 120 121 122
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
123
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
124
                      "Cleanup exception not printed to log output")
125
        self.assertIn("TestFail: This test is supposed to fail",
126
                      output,
127
                      "Test did not fail with action exception:\n%s" % output)
128

129 130 131 132 133
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
134
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
135 136 137 138 139
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

140
    def test_fail_on_exception(self):
141 142
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
143
                    "--json - fail_on_exception" % self.tmpdir)
144
        result = process.run(cmd_line, ignore_status=True)
145
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
146 147 148 149 150
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

151 152
    def test_runner_timeout(self):
        os.chdir(basedir)
153
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - timeouttest' % self.tmpdir
154 155
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
156 157
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
158 159 160 161
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
162
        self.assertIn("TestTimeoutError: Timeout reached waiting for", output,
163
                      "Test did not fail with timeout exception:\n%s" % output)
164 165
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
166

167 168
    def test_runner_abort(self):
        os.chdir(basedir)
169
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - abort' % self.tmpdir
170
        result = process.run(cmd_line, ignore_status=True)
171 172
        output = result.stdout
        excerpt = 'Test process aborted'
173 174
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
175 176 177 178
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
179
        self.assertIn(excerpt, output)
180

181 182
    def test_silent_output(self):
        os.chdir(basedir)
183
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest --silent' % self.tmpdir
184
        result = process.run(cmd_line, ignore_status=True)
185
        expected_rc = exit_codes.AVOCADO_ALL_OK
186 187 188 189
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
        self.assertEqual(result.stderr, expected_output)

190 191 192 193
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
194
        expected_rc = exit_codes.AVOCADO_FAIL
195
        expected_output = 'error: too few arguments'
196
        self.assertEqual(result.exit_status, expected_rc)
197
        self.assertIn(expected_output, result.stderr)
198

199 200
    def test_empty_test_list(self):
        os.chdir(basedir)
201
        cmd_line = './scripts/avocado run --sysinfo=off'
202
        result = process.run(cmd_line, ignore_status=True)
203
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
204
        expected_output = 'No tests found for given urls'
205
        self.assertEqual(result.exit_status, expected_rc)
206
        self.assertIn(expected_output, result.stderr)
207

208 209
    def test_not_found(self):
        os.chdir(basedir)
210
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
211
        result = process.run(cmd_line, ignore_status=True)
212
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
213
        self.assertEqual(result.exit_status, expected_rc)
214 215
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
216

217
    def test_invalid_unique_id(self):
218
        cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar passtest'
219
        result = process.run(cmd_line, ignore_status=True)
220
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
221
        self.assertIn('needs to be a 40 digit hex', result.stderr)
222
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
223 224

    def test_valid_unique_id(self):
225
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
226
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 passtest' % self.tmpdir)
227
        result = process.run(cmd_line, ignore_status=True)
228
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
229
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
230
        self.assertIn('PASS', result.stdout)
231

232
    def test_automatic_unique_id(self):
233
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off passtest --json -' % self.tmpdir
234
        result = process.run(cmd_line, ignore_status=True)
235
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
236 237 238 239
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

240 241 242 243 244
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - skip_outside_setup" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
245
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
246 247 248 249 250
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

251 252 253 254 255
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
256 257
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
258 259 260 261 262 263
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
264
                avocado_process.wait()
265 266 267 268
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
    def test_dry_run(self):
        os.chdir(basedir)
        cmd = ("./scripts/avocado run --sysinfo=off passtest failtest "
               "errortest --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a "
               "foo:bar:b foo:baz:c bar:bar:bar --dry-run")
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
        self.assertEqual(result['skip'], 3)
        for i in xrange(3):
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
            self.assertEqual(log.count(line), 3)

294 295 296
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

297

298 299 300
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
301
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
302 303 304 305 306

    def test_output_pass(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
307
        expected_rc = exit_codes.AVOCADO_ALL_OK
308 309 310 311 312 313 314 315 316
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s failtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
317
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
318 319 320 321 322 323 324 325 326
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s errortest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
327
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
328 329 330 331 332 333 334 335 336
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s skiponsetup' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
337
        expected_rc = exit_codes.AVOCADO_ALL_OK
338 339 340
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
341 342
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
343

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run "/bin/echo -ne '
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
                    ' --sysinfo=off  --show-job-log' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
        self.assertIn('PASS /bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

370 371 372
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

373

374
class RunnerSimpleTest(unittest.TestCase):
375 376

    def setUp(self):
377
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
378 379 380
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
381
            'avocado_simpletest_functional')
382
        self.pass_script.save()
L
Lukáš Doktor 已提交
383 384 385 386
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
387
        self.fail_script.save()
388

389
    def test_simpletest_pass(self):
390
        os.chdir(basedir)
L
Lukáš Doktor 已提交
391 392
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
393
        result = process.run(cmd_line, ignore_status=True)
394
        expected_rc = exit_codes.AVOCADO_ALL_OK
395 396 397 398
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

399
    def test_simpletest_fail(self):
400
        os.chdir(basedir)
L
Lukáš Doktor 已提交
401 402
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
403
        result = process.run(cmd_line, ignore_status=True)
404
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
405 406 407 408
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

409 410 411
    def test_runner_onehundred_fail_timing(self):
        """
        We can be pretty sure that a failtest should return immediattely. Let's
412
        run 100 of them and assure they not take more than 30 seconds to run.
413

414 415
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
416 417 418
        """
        os.chdir(basedir)
        one_hundred = 'failtest ' * 100
L
Lukáš Doktor 已提交
419 420
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
421 422 423
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
424
        self.assertLess(actual_time, 30.0)
425
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
426 427 428 429 430 431 432 433 434 435
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
        sleep_fail_sleep = 'sleeptest ' + 'failtest ' * 100 + 'sleeptest'
L
Lukáš Doktor 已提交
436 437
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
438 439 440
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
441
        self.assertLess(actual_time, 33.0)
442
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
443 444 445
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

446 447 448 449 450
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
451 452
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
453
        result = process.run(cmd_line, ignore_status=True)
454 455 456 457
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
458 459 460 461 462 463 464
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
        self.assertIn('WARN | Warning message (should cause this test to '
                      'finish with warning)', result.stdout, result)
        self.assertIn('ERROR| Error message (ordinary message not changing '
                      'the results)', result.stdout, result)

465
    def tearDown(self):
466 467
        self.pass_script.remove()
        self.fail_script.remove()
468
        shutil.rmtree(self.tmpdir)
469 470


471
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
472 473

    def setUp(self):
474
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
475 476 477
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
478
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
479 480 481 482
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
483
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
484 485
        self.fail_script.save()

486
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
487
        os.chdir(basedir)
488
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
489 490
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
491
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
492 493 494 495
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

496
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
497
        os.chdir(basedir)
498
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
499 500
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
501
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
502 503 504 505
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

506
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
507
        os.chdir(basedir)
508 509
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
510 511
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
512 513
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
514
        self.assertIn(expected_output, result.stderr)
515
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
516 517 518 519 520 521 522 523 524 525
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


526
class AbsPluginsTest(object):
527

528
    def setUp(self):
529
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
530

531 532 533 534 535 536
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

537 538 539 540
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
541
        expected_rc = exit_codes.AVOCADO_ALL_OK
542 543 544 545 546 547
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

548 549 550 551 552
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
553
        expected_rc = exit_codes.AVOCADO_ALL_OK
554 555 556 557 558
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

559 560 561 562 563
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
564
        expected_rc = exit_codes.AVOCADO_FAIL
565 566 567
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
568
        self.assertIn("Unable to discover url", output)
569

570 571 572 573 574
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
575
        expected_rc = exit_codes.AVOCADO_ALL_OK
576 577 578
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
579 580
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
581

582
    def test_config_plugin(self):
583
        os.chdir(basedir)
584
        cmd_line = './scripts/avocado config --paginator off'
585 586
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
587
        expected_rc = exit_codes.AVOCADO_ALL_OK
588 589 590 591 592 593 594
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
595
        cmd_line = './scripts/avocado config --datadir --paginator off'
596 597
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
598
        expected_rc = exit_codes.AVOCADO_ALL_OK
599 600 601 602 603
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

604 605 606 607 608
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
609
        expected_rc = exit_codes.AVOCADO_ALL_OK
610 611 612 613 614
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

615

616 617 618 619
class ParseXMLError(Exception):
    pass


620
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
621

622
    def setUp(self):
623
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
624 625
        super(PluginsXunitTest, self).setUp()

626
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
627
                      e_nnotfound, e_nfailures, e_nskip):
628
        os.chdir(basedir)
L
Lukáš Doktor 已提交
629 630
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
        except Exception, detail:
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
646 647
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

        n_skip = int(testsuite_tag.attributes['skip'].value)
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

670
    def test_xunit_plugin_passtest(self):
671 672
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 0)
673 674

    def test_xunit_plugin_failtest(self):
675 676
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 0, 1, 0)
677

678
    def test_xunit_plugin_skiponsetuptest(self):
679 680
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 1)
681

682
    def test_xunit_plugin_errortest(self):
683 684
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0, 0)
685

686 687 688 689
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

690 691 692 693 694

class ParseJSONError(Exception):
    pass


695
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
696

697
    def setUp(self):
698
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
699 700
        super(PluginsJSONTest, self).setUp()

701
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
702 703
                      e_nfailures, e_nskip):
        os.chdir(basedir)
704 705
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
        except Exception, detail:
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
731
        return json_data
732

733
    def test_json_plugin_passtest(self):
734 735
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0)
736 737

    def test_json_plugin_failtest(self):
738 739
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 1, 0)
740

741
    def test_json_plugin_skiponsetuptest(self):
742 743
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 1)
744

745
    def test_json_plugin_errortest(self):
746 747
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0)
748

749 750 751 752 753 754 755 756 757 758 759 760 761
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
                         '/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

762 763 764 765
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

766 767
if __name__ == '__main__':
    unittest.main()