test_basic.py 37.8 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9 10
import aexpect
import signal
11
import re
12

13 14 15 16 17
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

18
from avocado.core import exit_codes
19 20 21 22
from avocado.utils import process
from avocado.utils import script


23
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
24 25 26
basedir = os.path.abspath(basedir)


27 28 29 30
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
31 32
PASS_SHELL_CONTENTS = "exit 0"

33 34 35 36
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
37 38
FAIL_SHELL_CONTENTS = "exit 1"

39 40 41 42 43 44 45 46 47 48 49 50 51 52
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

53 54 55

class RunnerOperationTest(unittest.TestCase):

56
    def setUp(self):
57
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
58

59 60 61 62 63 64 65
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
        self.assertTrue(re.match(r"^Avocado \d+\.\d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d\\.\\"
                        "d':\n%r" % (result.stderr))

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

100 101
    def test_runner_all_ok(self):
        os.chdir(basedir)
102
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest passtest' % self.tmpdir
103 104
        process.run(cmd_line)

105 106
    def test_datadir_alias(self):
        os.chdir(basedir)
107
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s datadir' % self.tmpdir
108 109 110 111
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
112 113
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
114 115
        process.run(cmd_line)

116 117
    def test_runner_noalias(self):
        os.chdir(basedir)
118 119
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
120 121
        process.run(cmd_line)

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

137 138
    def test_runner_tests_fail(self):
        os.chdir(basedir)
139
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest failtest passtest' % self.tmpdir
140
        result = process.run(cmd_line, ignore_status=True)
141
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
142 143 144 145 146
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
147
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s bogustest' % self.tmpdir
148
        result = process.run(cmd_line, ignore_status=True)
149 150
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
151 152 153 154 155
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

156 157
    def test_runner_doublefail(self):
        os.chdir(basedir)
158
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - doublefail' % self.tmpdir
159 160
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
161 162
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
163 164 165 166
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
167
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
168
                      "Cleanup exception not printed to log output")
169
        self.assertIn("TestFail: This test is supposed to fail",
170
                      output,
171
                      "Test did not fail with action exception:\n%s" % output)
172

173 174 175 176 177
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
178
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
179 180 181 182 183
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

184
    def test_fail_on_exception(self):
185 186
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
187
                    "--json - fail_on_exception" % self.tmpdir)
188
        result = process.run(cmd_line, ignore_status=True)
189
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
190 191 192 193 194
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

195 196
    def test_runner_timeout(self):
        os.chdir(basedir)
197
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - timeouttest' % self.tmpdir
198 199
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
200 201
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
202 203 204 205
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
206
        self.assertIn("TestTimeoutError: Timeout reached waiting for", output,
207
                      "Test did not fail with timeout exception:\n%s" % output)
208 209
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
210

211 212
    def test_runner_abort(self):
        os.chdir(basedir)
213
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - abort' % self.tmpdir
214
        result = process.run(cmd_line, ignore_status=True)
215 216
        output = result.stdout
        excerpt = 'Test process aborted'
217 218
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
219 220 221 222
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
223
        self.assertIn(excerpt, output)
224

225 226
    def test_silent_output(self):
        os.chdir(basedir)
227
        cmd_line = './scripts/avocado --silent run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
228
        result = process.run(cmd_line, ignore_status=True)
229
        expected_rc = exit_codes.AVOCADO_ALL_OK
230 231
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
232
        self.assertEqual(result.stdout, expected_output)
233

234 235 236 237
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
238
        expected_rc = exit_codes.AVOCADO_FAIL
239
        expected_output = 'error: too few arguments'
240
        self.assertEqual(result.exit_status, expected_rc)
241
        self.assertIn(expected_output, result.stderr)
242

243 244
    def test_empty_test_list(self):
        os.chdir(basedir)
245
        cmd_line = './scripts/avocado run --sysinfo=off'
246
        result = process.run(cmd_line, ignore_status=True)
247
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
248
        expected_output = 'No tests found for given urls'
249
        self.assertEqual(result.exit_status, expected_rc)
250
        self.assertIn(expected_output, result.stderr)
251

252 253
    def test_not_found(self):
        os.chdir(basedir)
254
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
255
        result = process.run(cmd_line, ignore_status=True)
256
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
257
        self.assertEqual(result.exit_status, expected_rc)
258 259
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
260

261
    def test_invalid_unique_id(self):
262
        cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar passtest'
263
        result = process.run(cmd_line, ignore_status=True)
264
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
265
        self.assertIn('needs to be a 40 digit hex', result.stderr)
266
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
267 268

    def test_valid_unique_id(self):
269
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
270
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 passtest' % self.tmpdir)
271
        result = process.run(cmd_line, ignore_status=True)
272
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
273
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
274
        self.assertIn('PASS', result.stdout)
275

276
    def test_automatic_unique_id(self):
277
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off passtest --json -' % self.tmpdir
278
        result = process.run(cmd_line, ignore_status=True)
279
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
280 281 282 283
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

284 285 286 287 288
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - skip_outside_setup" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
289
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
290 291 292 293 294
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

295 296 297 298 299
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
300 301
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
302 303 304 305 306 307
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
308
                avocado_process.wait()
309 310 311 312
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
    def test_dry_run(self):
        os.chdir(basedir)
        cmd = ("./scripts/avocado run --sysinfo=off passtest failtest "
               "errortest --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a "
               "foo:bar:b foo:baz:c bar:bar:bar --dry-run")
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
        self.assertEqual(result['skip'], 3)
        for i in xrange(3):
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
            self.assertEqual(log.count(line), 3)

338 339 340
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

341

342 343 344
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
345
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
346 347 348 349 350

    def test_output_pass(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
351
        expected_rc = exit_codes.AVOCADO_ALL_OK
352 353 354 355 356 357 358 359 360
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s failtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
361
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
362 363 364 365 366 367 368 369 370
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s errortest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
371
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
372 373 374 375 376 377 378 379 380
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s skiponsetup' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
381
        expected_rc = exit_codes.AVOCADO_ALL_OK
382 383 384
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
385 386
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
387

388 389 390 391 392 393 394 395 396 397 398 399
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run "/bin/echo -ne '
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
                    ' --sysinfo=off  --show-job-log' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
400 401 402
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
403
        self.assertIn('PASS /bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz',
404
                      result.stdout, result)
405 406 407 408 409 410 411 412 413
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

414 415 416 417 418 419 420
    def test_replay_skip_skipped(self):
        result = process.run("./scripts/avocado run skiponsetup --json -")
        result = json.loads(result.stdout)
        jobid = result["job_id"]
        process.run(str("./scripts/avocado run --replay %s "
                        "--replay-test-status PASS" % jobid))

421 422 423
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

424

425
class RunnerSimpleTest(unittest.TestCase):
426 427

    def setUp(self):
428
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
429 430 431
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
432
            'avocado_simpletest_functional')
433
        self.pass_script.save()
L
Lukáš Doktor 已提交
434 435 436 437
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
438
        self.fail_script.save()
439

440
    def test_simpletest_pass(self):
441
        os.chdir(basedir)
L
Lukáš Doktor 已提交
442 443
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
444
        result = process.run(cmd_line, ignore_status=True)
445
        expected_rc = exit_codes.AVOCADO_ALL_OK
446 447 448 449
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

450
    def test_simpletest_fail(self):
451
        os.chdir(basedir)
L
Lukáš Doktor 已提交
452 453
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
454
        result = process.run(cmd_line, ignore_status=True)
455
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
456 457 458 459
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

460 461 462
    def test_runner_onehundred_fail_timing(self):
        """
        We can be pretty sure that a failtest should return immediattely. Let's
463
        run 100 of them and assure they not take more than 30 seconds to run.
464

465 466
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
467 468 469
        """
        os.chdir(basedir)
        one_hundred = 'failtest ' * 100
L
Lukáš Doktor 已提交
470 471
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
472 473 474
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
475
        self.assertLess(actual_time, 30.0)
476
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
477 478 479 480 481 482 483 484 485 486
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
        sleep_fail_sleep = 'sleeptest ' + 'failtest ' * 100 + 'sleeptest'
L
Lukáš Doktor 已提交
487 488
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
489 490 491
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
492
        self.assertLess(actual_time, 33.0)
493
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
494 495 496
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

497 498 499 500 501
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
502 503
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
504
        result = process.run(cmd_line, ignore_status=True)
505 506 507 508
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
509 510
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
511
        self.assertIn('WARN | Warning message (should cause this test to '
512
                      'finish with warning)', result.stdout, result)
513
        self.assertIn('ERROR| Error message (ordinary message not changing '
514
                      'the results)', result.stdout, result)
515

516 517 518 519 520 521 522 523 524 525 526 527 528
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

529 530 531 532 533 534 535 536 537 538 539 540 541
    def test_kill_stopped_sleep(self):
        sleep = process.run("which sleep", ignore_status=True, shell=True)
        if sleep.exit_status:
            self.skipTest("Sleep binary not found in PATH")
        sleep = "'%s 60'" % sleep.stdout.strip()
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
542
        deadline = time.time() + 9
543 544 545
        while time.time() < deadline:
            if not proc.is_alive():
                break
546
            time.sleep(0.1)
547 548
        else:
            proc.kill(signal.SIGKILL)
549
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
550 551 552 553 554 555 556 557 558 559
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
        self.assertEqual(proc.get_status(), 1, "Avocado did not finish with "
                         "1.")

560
    def tearDown(self):
561 562
        self.pass_script.remove()
        self.fail_script.remove()
563
        shutil.rmtree(self.tmpdir)
564 565


566
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
567 568

    def setUp(self):
569
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
570 571 572
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
573
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
574 575 576 577
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
578
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
579 580
        self.fail_script.save()

581
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
582
        os.chdir(basedir)
583
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
584 585
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
586
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
587 588 589 590
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

591
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
592
        os.chdir(basedir)
593
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
594 595
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
596
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
597 598 599 600
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

601
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
602
        os.chdir(basedir)
603 604
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
605 606
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
607 608
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
609
        self.assertIn(expected_output, result.stderr)
610
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
611 612 613 614 615 616 617 618 619 620 621 622
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_output = ('No tests found for given urls')
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
623 624 625 626 627 628 629 630 631 632
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


633
class AbsPluginsTest(object):
634

635
    def setUp(self):
636
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
637

638 639 640 641 642 643
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

644 645 646 647
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
648
        expected_rc = exit_codes.AVOCADO_ALL_OK
649 650 651 652 653 654
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

655 656 657 658 659
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
660
        expected_rc = exit_codes.AVOCADO_ALL_OK
661 662 663 664 665
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

666 667 668 669 670
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
671
        expected_rc = exit_codes.AVOCADO_FAIL
672 673 674
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
675
        self.assertIn("Unable to discover url", output)
676

677 678 679 680 681
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
682
        expected_rc = exit_codes.AVOCADO_ALL_OK
683 684 685
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
686 687
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
688

689
    def test_config_plugin(self):
690
        os.chdir(basedir)
691
        cmd_line = './scripts/avocado config --paginator off'
692 693
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
694
        expected_rc = exit_codes.AVOCADO_ALL_OK
695 696 697 698 699 700 701
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
702
        cmd_line = './scripts/avocado config --datadir --paginator off'
703 704
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
705
        expected_rc = exit_codes.AVOCADO_ALL_OK
706 707 708 709 710
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

711 712 713 714 715
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
716
        expected_rc = exit_codes.AVOCADO_ALL_OK
717 718 719 720 721
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

722

723 724 725 726
class ParseXMLError(Exception):
    pass


727
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
728

729
    def setUp(self):
730
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
731 732
        super(PluginsXunitTest, self).setUp()

733
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
734
                      e_nnotfound, e_nfailures, e_nskip):
735
        os.chdir(basedir)
L
Lukáš Doktor 已提交
736 737
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
738 739 740 741 742 743 744
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
745
        except Exception as detail:
746 747 748 749 750 751 752
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
753 754
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

        n_skip = int(testsuite_tag.attributes['skip'].value)
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

777
    def test_xunit_plugin_passtest(self):
778 779
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 0)
780 781

    def test_xunit_plugin_failtest(self):
782 783
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 0, 1, 0)
784

785
    def test_xunit_plugin_skiponsetuptest(self):
786 787
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 1)
788

789
    def test_xunit_plugin_errortest(self):
790 791
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0, 0)
792

793 794 795 796
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

797 798 799 800 801

class ParseJSONError(Exception):
    pass


802
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
803

804
    def setUp(self):
805
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
806 807
        super(PluginsJSONTest, self).setUp()

808
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
809 810
                      e_nfailures, e_nskip):
        os.chdir(basedir)
811 812
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
813 814 815 816 817 818 819
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
820
        except Exception as detail:
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
838
        return json_data
839

840
    def test_json_plugin_passtest(self):
841 842
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0)
843 844

    def test_json_plugin_failtest(self):
845 846
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 1, 0)
847

848
    def test_json_plugin_skiponsetuptest(self):
849 850
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 1)
851

852
    def test_json_plugin_errortest(self):
853 854
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0)
855

856 857 858 859 860 861 862 863 864 865 866 867 868
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
                         '/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

869 870 871 872
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

873 874
if __name__ == '__main__':
    unittest.main()