test_basic.py 33.8 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9

10 11 12 13 14
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

15
from avocado.core import exit_codes
16 17 18 19
from avocado.utils import process
from avocado.utils import script


20
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
21 22 23
basedir = os.path.abspath(basedir)


24 25 26 27
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
28 29
PASS_SHELL_CONTENTS = "exit 0"

30 31 32 33
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
34 35
FAIL_SHELL_CONTENTS = "exit 1"

36 37 38 39 40 41 42 43 44 45 46 47 48 49
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

50 51 52

class RunnerOperationTest(unittest.TestCase):

53
    def setUp(self):
54
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
55

56 57
    def test_runner_all_ok(self):
        os.chdir(basedir)
58
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest passtest' % self.tmpdir
59 60
        process.run(cmd_line)

61 62
    def test_datadir_alias(self):
        os.chdir(basedir)
63
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s datadir' % self.tmpdir
64 65 66 67
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
68 69
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
70 71
        process.run(cmd_line)

72 73
    def test_runner_noalias(self):
        os.chdir(basedir)
74 75
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
76 77
        process.run(cmd_line)

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

93 94
    def test_runner_tests_fail(self):
        os.chdir(basedir)
95
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest failtest passtest' % self.tmpdir
96
        result = process.run(cmd_line, ignore_status=True)
97
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
98 99 100 101 102
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
103
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s bogustest' % self.tmpdir
104
        result = process.run(cmd_line, ignore_status=True)
105 106
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
107 108 109 110 111
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

112 113
    def test_runner_doublefail(self):
        os.chdir(basedir)
114
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - doublefail' % self.tmpdir
115 116
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
117 118
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
119 120 121 122
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
123
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
124
                      "Cleanup exception not printed to log output")
125
        self.assertIn("TestFail: This test is supposed to fail",
126
                      output,
127
                      "Test did not fail with action exception:\n%s" % output)
128

129 130 131 132 133
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
134
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
135 136 137 138 139
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

140
    def test_fail_on_exception(self):
141 142
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
143
                    "--json - fail_on_exception" % self.tmpdir)
144
        result = process.run(cmd_line, ignore_status=True)
145
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
146 147 148 149 150
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

151 152
    def test_runner_timeout(self):
        os.chdir(basedir)
153
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - timeouttest' % self.tmpdir
154 155
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
156 157
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
158 159 160 161
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
162
        self.assertIn("TestTimeoutError: Timeout reached waiting for", output,
163
                      "Test did not fail with timeout exception:\n%s" % output)
164 165
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
166

167 168
    def test_runner_abort(self):
        os.chdir(basedir)
169
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - abort' % self.tmpdir
170
        result = process.run(cmd_line, ignore_status=True)
171 172
        output = result.stdout
        excerpt = 'Test process aborted'
173 174
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
175 176 177 178
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
179
        self.assertIn(excerpt, output)
180

181 182
    def test_silent_output(self):
        os.chdir(basedir)
183
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest --silent' % self.tmpdir
184
        result = process.run(cmd_line, ignore_status=True)
185
        expected_rc = exit_codes.AVOCADO_ALL_OK
186 187 188 189
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
        self.assertEqual(result.stderr, expected_output)

190 191 192 193
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
194
        expected_rc = exit_codes.AVOCADO_FAIL
195
        expected_output = 'error: too few arguments'
196
        self.assertEqual(result.exit_status, expected_rc)
197
        self.assertIn(expected_output, result.stderr)
198

199 200
    def test_empty_test_list(self):
        os.chdir(basedir)
201
        cmd_line = './scripts/avocado run --sysinfo=off'
202
        result = process.run(cmd_line, ignore_status=True)
203
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
204
        expected_output = 'No tests found for given urls'
205
        self.assertEqual(result.exit_status, expected_rc)
206
        self.assertIn(expected_output, result.stderr)
207

208 209
    def test_not_found(self):
        os.chdir(basedir)
210
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
211
        result = process.run(cmd_line, ignore_status=True)
212
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
213
        self.assertEqual(result.exit_status, expected_rc)
214 215
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
216

217
    def test_invalid_unique_id(self):
218
        cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar passtest'
219
        result = process.run(cmd_line, ignore_status=True)
220
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
221
        self.assertIn('needs to be a 40 digit hex', result.stderr)
222
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
223 224

    def test_valid_unique_id(self):
225
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
226
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 passtest' % self.tmpdir)
227
        result = process.run(cmd_line, ignore_status=True)
228
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
229
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
230
        self.assertIn('PASS', result.stdout)
231

232
    def test_automatic_unique_id(self):
233
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off passtest --json -' % self.tmpdir
234
        result = process.run(cmd_line, ignore_status=True)
235
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
236 237 238 239
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

240 241 242 243 244
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - skip_outside_setup" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
245
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
246 247 248 249 250
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

251 252 253 254 255
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
256 257
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
258 259 260 261 262 263
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
264
                avocado_process.wait()
265 266 267 268
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
    def test_dry_run(self):
        os.chdir(basedir)
        cmd = ("./scripts/avocado run --sysinfo=off passtest failtest "
               "errortest --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a "
               "foo:bar:b foo:baz:c bar:bar:bar --dry-run")
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
        self.assertEqual(result['skip'], 3)
        for i in xrange(3):
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
            self.assertEqual(log.count(line), 3)

294 295 296
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

297

298 299 300
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
301
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
302 303 304 305 306

    def test_output_pass(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
307
        expected_rc = exit_codes.AVOCADO_ALL_OK
308 309 310 311 312 313 314 315 316
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s failtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
317
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
318 319 320 321 322 323 324 325 326
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s errortest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
327
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
328 329 330 331 332 333 334 335 336
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s skiponsetup' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
337
        expected_rc = exit_codes.AVOCADO_ALL_OK
338 339 340
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
341 342
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
343

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run "/bin/echo -ne '
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
                    ' --sysinfo=off  --show-job-log' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
        self.assertIn('PASS /bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz',
                      result.stdout, result)
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

370 371 372 373 374 375 376
    def test_replay_skip_skipped(self):
        result = process.run("./scripts/avocado run skiponsetup --json -")
        result = json.loads(result.stdout)
        jobid = result["job_id"]
        process.run(str("./scripts/avocado run --replay %s "
                        "--replay-test-status PASS" % jobid))

377 378 379
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

380

381
class RunnerSimpleTest(unittest.TestCase):
382 383

    def setUp(self):
384
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
385 386 387
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
388
            'avocado_simpletest_functional')
389
        self.pass_script.save()
L
Lukáš Doktor 已提交
390 391 392 393
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
394
        self.fail_script.save()
395

396
    def test_simpletest_pass(self):
397
        os.chdir(basedir)
L
Lukáš Doktor 已提交
398 399
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
400
        result = process.run(cmd_line, ignore_status=True)
401
        expected_rc = exit_codes.AVOCADO_ALL_OK
402 403 404 405
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

406
    def test_simpletest_fail(self):
407
        os.chdir(basedir)
L
Lukáš Doktor 已提交
408 409
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
410
        result = process.run(cmd_line, ignore_status=True)
411
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
412 413 414 415
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

416 417 418
    def test_runner_onehundred_fail_timing(self):
        """
        We can be pretty sure that a failtest should return immediattely. Let's
419
        run 100 of them and assure they not take more than 30 seconds to run.
420

421 422
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
423 424 425
        """
        os.chdir(basedir)
        one_hundred = 'failtest ' * 100
L
Lukáš Doktor 已提交
426 427
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
428 429 430
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
431
        self.assertLess(actual_time, 30.0)
432
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
433 434 435 436 437 438 439 440 441 442
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
        sleep_fail_sleep = 'sleeptest ' + 'failtest ' * 100 + 'sleeptest'
L
Lukáš Doktor 已提交
443 444
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
445 446 447
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
448
        self.assertLess(actual_time, 33.0)
449
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
450 451 452
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

453 454 455 456 457
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
458 459
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
460
        result = process.run(cmd_line, ignore_status=True)
461 462 463 464
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
465 466 467 468 469 470 471
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
        self.assertIn('WARN | Warning message (should cause this test to '
                      'finish with warning)', result.stdout, result)
        self.assertIn('ERROR| Error message (ordinary message not changing '
                      'the results)', result.stdout, result)

472 473 474 475 476 477 478 479 480 481 482 483 484
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

485
    def tearDown(self):
486 487
        self.pass_script.remove()
        self.fail_script.remove()
488
        shutil.rmtree(self.tmpdir)
489 490


491
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
492 493

    def setUp(self):
494
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
495 496 497
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
498
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
499 500 501 502
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
503
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
504 505
        self.fail_script.save()

506
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
507
        os.chdir(basedir)
508
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
509 510
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
511
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
512 513 514 515
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

516
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
517
        os.chdir(basedir)
518
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
519 520
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
521
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
522 523 524 525
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

526
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
527
        os.chdir(basedir)
528 529
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
530 531
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
532 533
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
534
        self.assertIn(expected_output, result.stderr)
535
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
536 537 538 539 540 541 542 543 544 545
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


546
class AbsPluginsTest(object):
547

548
    def setUp(self):
549
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
550

551 552 553 554 555 556
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

557 558 559 560
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
561
        expected_rc = exit_codes.AVOCADO_ALL_OK
562 563 564 565 566 567
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

568 569 570 571 572
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
573
        expected_rc = exit_codes.AVOCADO_ALL_OK
574 575 576 577 578
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

579 580 581 582 583
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
584
        expected_rc = exit_codes.AVOCADO_FAIL
585 586 587
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
588
        self.assertIn("Unable to discover url", output)
589

590 591 592 593 594
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
595
        expected_rc = exit_codes.AVOCADO_ALL_OK
596 597 598
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
599 600
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
601

602
    def test_config_plugin(self):
603
        os.chdir(basedir)
604
        cmd_line = './scripts/avocado config --paginator off'
605 606
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
607
        expected_rc = exit_codes.AVOCADO_ALL_OK
608 609 610 611 612 613 614
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
615
        cmd_line = './scripts/avocado config --datadir --paginator off'
616 617
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
618
        expected_rc = exit_codes.AVOCADO_ALL_OK
619 620 621 622 623
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

624 625 626 627 628
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
629
        expected_rc = exit_codes.AVOCADO_ALL_OK
630 631 632 633 634
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

635

636 637 638 639
class ParseXMLError(Exception):
    pass


640
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
641

642
    def setUp(self):
643
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
644 645
        super(PluginsXunitTest, self).setUp()

646
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
647
                      e_nnotfound, e_nfailures, e_nskip):
648
        os.chdir(basedir)
L
Lukáš Doktor 已提交
649 650
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
        except Exception, detail:
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
666 667
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

        n_skip = int(testsuite_tag.attributes['skip'].value)
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

690
    def test_xunit_plugin_passtest(self):
691 692
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 0)
693 694

    def test_xunit_plugin_failtest(self):
695 696
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 0, 1, 0)
697

698
    def test_xunit_plugin_skiponsetuptest(self):
699 700
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 1)
701

702
    def test_xunit_plugin_errortest(self):
703 704
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0, 0)
705

706 707 708 709
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

710 711 712 713 714

class ParseJSONError(Exception):
    pass


715
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
716

717
    def setUp(self):
718
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
719 720
        super(PluginsJSONTest, self).setUp()

721
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
722 723
                      e_nfailures, e_nskip):
        os.chdir(basedir)
724 725
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
        except Exception, detail:
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
751
        return json_data
752

753
    def test_json_plugin_passtest(self):
754 755
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0)
756 757

    def test_json_plugin_failtest(self):
758 759
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 1, 0)
760

761
    def test_json_plugin_skiponsetuptest(self):
762 763
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 1)
764

765
    def test_json_plugin_errortest(self):
766 767
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0)
768

769 770 771 772 773 774 775 776 777 778 779 780 781
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
                         '/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

782 783 784 785
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

786 787
if __name__ == '__main__':
    unittest.main()