test_basic.py 35.6 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9 10
import aexpect
import signal
11
import re
12

13 14 15 16 17
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

18
from avocado.core import exit_codes
19 20 21 22
from avocado.utils import process
from avocado.utils import script


23
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
24 25 26
basedir = os.path.abspath(basedir)


27 28 29 30
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
31 32
PASS_SHELL_CONTENTS = "exit 0"

33 34 35 36
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
37 38
FAIL_SHELL_CONTENTS = "exit 1"

39 40 41 42 43 44 45 46 47 48 49 50 51 52
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

53 54 55

class RunnerOperationTest(unittest.TestCase):

56
    def setUp(self):
57
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
58

59 60 61 62 63 64 65
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
        self.assertTrue(re.match(r"^Avocado \d+\.\d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d\\.\\"
                        "d':\n%r" % (result.stderr))

66 67
    def test_runner_all_ok(self):
        os.chdir(basedir)
68
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest passtest' % self.tmpdir
69 70
        process.run(cmd_line)

71 72
    def test_datadir_alias(self):
        os.chdir(basedir)
73
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s datadir' % self.tmpdir
74 75 76 77
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
78 79
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
80 81
        process.run(cmd_line)

82 83
    def test_runner_noalias(self):
        os.chdir(basedir)
84 85
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
86 87
        process.run(cmd_line)

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

103 104
    def test_runner_tests_fail(self):
        os.chdir(basedir)
105
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest failtest passtest' % self.tmpdir
106
        result = process.run(cmd_line, ignore_status=True)
107
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
108 109 110 111 112
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
113
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s bogustest' % self.tmpdir
114
        result = process.run(cmd_line, ignore_status=True)
115 116
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
117 118 119 120 121
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

122 123
    def test_runner_doublefail(self):
        os.chdir(basedir)
124
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - doublefail' % self.tmpdir
125 126
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
127 128
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
129 130 131 132
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
133
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
134
                      "Cleanup exception not printed to log output")
135
        self.assertIn("TestFail: This test is supposed to fail",
136
                      output,
137
                      "Test did not fail with action exception:\n%s" % output)
138

139 140 141 142 143
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
144
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
145 146 147 148 149
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

150
    def test_fail_on_exception(self):
151 152
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
153
                    "--json - fail_on_exception" % self.tmpdir)
154
        result = process.run(cmd_line, ignore_status=True)
155
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
156 157 158 159 160
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

161 162
    def test_runner_timeout(self):
        os.chdir(basedir)
163
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - timeouttest' % self.tmpdir
164 165
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
166 167
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
168 169 170 171
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
172
        self.assertIn("TestTimeoutError: Timeout reached waiting for", output,
173
                      "Test did not fail with timeout exception:\n%s" % output)
174 175
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
176

177 178
    def test_runner_abort(self):
        os.chdir(basedir)
179
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s --xunit - abort' % self.tmpdir
180
        result = process.run(cmd_line, ignore_status=True)
181 182
        output = result.stdout
        excerpt = 'Test process aborted'
183 184
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
185 186 187 188
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
189
        self.assertIn(excerpt, output)
190

191 192
    def test_silent_output(self):
        os.chdir(basedir)
193
        cmd_line = './scripts/avocado --silent run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
194
        result = process.run(cmd_line, ignore_status=True)
195
        expected_rc = exit_codes.AVOCADO_ALL_OK
196 197 198 199
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
        self.assertEqual(result.stderr, expected_output)

200 201 202 203
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
204
        expected_rc = exit_codes.AVOCADO_FAIL
205
        expected_output = 'error: too few arguments'
206
        self.assertEqual(result.exit_status, expected_rc)
207
        self.assertIn(expected_output, result.stderr)
208

209 210
    def test_empty_test_list(self):
        os.chdir(basedir)
211
        cmd_line = './scripts/avocado run --sysinfo=off'
212
        result = process.run(cmd_line, ignore_status=True)
213
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
214
        expected_output = 'No tests found for given urls'
215
        self.assertEqual(result.exit_status, expected_rc)
216
        self.assertIn(expected_output, result.stderr)
217

218 219
    def test_not_found(self):
        os.chdir(basedir)
220
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
221
        result = process.run(cmd_line, ignore_status=True)
222
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
223
        self.assertEqual(result.exit_status, expected_rc)
224 225
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
226

227
    def test_invalid_unique_id(self):
228
        cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar passtest'
229
        result = process.run(cmd_line, ignore_status=True)
230
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
231
        self.assertIn('needs to be a 40 digit hex', result.stderr)
232
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
233 234

    def test_valid_unique_id(self):
235
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
236
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 passtest' % self.tmpdir)
237
        result = process.run(cmd_line, ignore_status=True)
238
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
239
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
240
        self.assertIn('PASS', result.stdout)
241

242
    def test_automatic_unique_id(self):
243
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off passtest --json -' % self.tmpdir
244
        result = process.run(cmd_line, ignore_status=True)
245
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
246 247 248 249
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

250 251 252 253 254
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "--json - skip_outside_setup" % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
255
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
256 257 258 259 260
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

261 262 263 264 265
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
266 267
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
268 269 270 271 272 273
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
274
                avocado_process.wait()
275 276 277 278
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
    def test_dry_run(self):
        os.chdir(basedir)
        cmd = ("./scripts/avocado run --sysinfo=off passtest failtest "
               "errortest --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a "
               "foo:bar:b foo:baz:c bar:bar:bar --dry-run")
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
        self.assertEqual(result['skip'], 3)
        for i in xrange(3):
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
            self.assertEqual(log.count(line), 3)

304 305 306
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

307

308 309 310
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
311
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
312 313 314 315 316

    def test_output_pass(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s passtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
317
        expected_rc = exit_codes.AVOCADO_ALL_OK
318 319 320 321 322 323 324 325 326
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s failtest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
327
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
328 329 330 331 332 333 334 335 336
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s errortest' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
337
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
338 339 340 341 342 343 344 345 346
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado run --sysinfo=off --job-results-dir %s skiponsetup' % self.tmpdir
        result = process.run(cmd_line, ignore_status=True)
347
        expected_rc = exit_codes.AVOCADO_ALL_OK
348 349 350
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
351 352
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
353

354 355 356 357 358 359 360 361 362 363 364 365
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run "/bin/echo -ne '
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
                    ' --sysinfo=off  --show-job-log' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
366 367 368
        self.assertIn('[stdout] foo', result.stderr, result)
        self.assertIn('[stdout] \'"', result.stderr, result)
        self.assertIn('[stdout] bar/baz', result.stderr, result)
369
        self.assertIn('PASS /bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz',
370
                      result.stderr, result)
371 372 373 374 375 376 377 378 379
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

380 381 382 383 384 385 386
    def test_replay_skip_skipped(self):
        result = process.run("./scripts/avocado run skiponsetup --json -")
        result = json.loads(result.stdout)
        jobid = result["job_id"]
        process.run(str("./scripts/avocado run --replay %s "
                        "--replay-test-status PASS" % jobid))

387 388 389
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

390

391
class RunnerSimpleTest(unittest.TestCase):
392 393

    def setUp(self):
394
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
395 396 397
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
398
            'avocado_simpletest_functional')
399
        self.pass_script.save()
L
Lukáš Doktor 已提交
400 401 402 403
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
404
        self.fail_script.save()
405

406
    def test_simpletest_pass(self):
407
        os.chdir(basedir)
L
Lukáš Doktor 已提交
408 409
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
410
        result = process.run(cmd_line, ignore_status=True)
411
        expected_rc = exit_codes.AVOCADO_ALL_OK
412 413 414 415
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

416
    def test_simpletest_fail(self):
417
        os.chdir(basedir)
L
Lukáš Doktor 已提交
418 419
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
420
        result = process.run(cmd_line, ignore_status=True)
421
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
422 423 424 425
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

426 427 428
    def test_runner_onehundred_fail_timing(self):
        """
        We can be pretty sure that a failtest should return immediattely. Let's
429
        run 100 of them and assure they not take more than 30 seconds to run.
430

431 432
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
433 434 435
        """
        os.chdir(basedir)
        one_hundred = 'failtest ' * 100
L
Lukáš Doktor 已提交
436 437
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
438 439 440
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
441
        self.assertLess(actual_time, 30.0)
442
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
443 444 445 446 447 448 449 450 451 452
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
        sleep_fail_sleep = 'sleeptest ' + 'failtest ' * 100 + 'sleeptest'
L
Lukáš Doktor 已提交
453 454
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
455 456 457
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
458
        self.assertLess(actual_time, 33.0)
459
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
460 461 462
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

463 464 465 466 467
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
468 469
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
470
        result = process.run(cmd_line, ignore_status=True)
471 472 473 474
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
475 476
        self.assertIn('DEBUG| Debug message', result.stderr, result)
        self.assertIn('INFO | Info message', result.stderr, result)
477
        self.assertIn('WARN | Warning message (should cause this test to '
478
                      'finish with warning)', result.stderr, result)
479
        self.assertIn('ERROR| Error message (ordinary message not changing '
480
                      'the results)', result.stderr, result)
481

482 483 484 485 486 487 488 489 490 491 492 493 494
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
    def test_kill_stopped_sleep(self):
        sleep = process.run("which sleep", ignore_status=True, shell=True)
        if sleep.exit_status:
            self.skipTest("Sleep binary not found in PATH")
        sleep = "'%s 60'" % sleep.stdout.strip()
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
        deadline = time.time() + 5
        while time.time() < deadline:
            if not proc.is_alive():
                break
        else:
            proc.kill(signal.SIGKILL)
            self.fail("Avocado process still alive 1s after job-timeout:\n%s"
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
        self.assertEqual(proc.get_status(), 1, "Avocado did not finish with "
                         "1.")

525
    def tearDown(self):
526 527
        self.pass_script.remove()
        self.fail_script.remove()
528
        shutil.rmtree(self.tmpdir)
529 530


531
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
532 533

    def setUp(self):
534
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
535 536 537
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
538
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
539 540 541 542
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
543
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
544 545
        self.fail_script.save()

546
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
547
        os.chdir(basedir)
548
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
549 550
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
551
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
552 553 554 555
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

556
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
557
        os.chdir(basedir)
558
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
559 560
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
561
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
562 563 564 565
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

566
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
567
        os.chdir(basedir)
568 569
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
570 571
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
572 573
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
574
        self.assertIn(expected_output, result.stderr)
575
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
576 577 578 579 580 581 582 583 584 585
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


586
class AbsPluginsTest(object):
587

588
    def setUp(self):
589
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
590

591 592 593 594 595 596
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

597 598 599 600
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
601
        expected_rc = exit_codes.AVOCADO_ALL_OK
602 603 604 605 606 607
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

608 609 610 611 612
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
613
        expected_rc = exit_codes.AVOCADO_ALL_OK
614 615 616 617 618
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

619 620 621 622 623
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
624
        expected_rc = exit_codes.AVOCADO_FAIL
625 626 627
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
628
        self.assertIn("Unable to discover url", output)
629

630 631 632 633 634
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
635
        expected_rc = exit_codes.AVOCADO_ALL_OK
636 637 638
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
639 640
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
641

642
    def test_config_plugin(self):
643
        os.chdir(basedir)
644
        cmd_line = './scripts/avocado config --paginator off'
645 646
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
647
        expected_rc = exit_codes.AVOCADO_ALL_OK
648 649 650 651 652 653 654
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
655
        cmd_line = './scripts/avocado config --datadir --paginator off'
656 657
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
658
        expected_rc = exit_codes.AVOCADO_ALL_OK
659 660 661 662 663
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

664 665 666 667 668
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
669
        expected_rc = exit_codes.AVOCADO_ALL_OK
670 671 672 673 674
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

675

676 677 678 679
class ParseXMLError(Exception):
    pass


680
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
681

682
    def setUp(self):
683
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
684 685
        super(PluginsXunitTest, self).setUp()

686
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
687
                      e_nnotfound, e_nfailures, e_nskip):
688
        os.chdir(basedir)
L
Lukáš Doktor 已提交
689 690
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
691 692 693 694 695 696 697
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
698
        except Exception as detail:
699 700 701 702 703 704 705
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
706 707
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

        n_skip = int(testsuite_tag.attributes['skip'].value)
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

730
    def test_xunit_plugin_passtest(self):
731 732
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 0)
733 734

    def test_xunit_plugin_failtest(self):
735 736
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 0, 1, 0)
737

738
    def test_xunit_plugin_skiponsetuptest(self):
739 740
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0, 1)
741

742
    def test_xunit_plugin_errortest(self):
743 744
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0, 0)
745

746 747 748 749
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

750 751 752 753 754

class ParseJSONError(Exception):
    pass


755
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
756

757
    def setUp(self):
758
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
759 760
        super(PluginsJSONTest, self).setUp()

761
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
762 763
                      e_nfailures, e_nskip):
        os.chdir(basedir)
764 765
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
766 767 768 769 770 771 772
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
773
        except Exception as detail:
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
791
        return json_data
792

793
    def test_json_plugin_passtest(self):
794 795
        self.run_and_check('passtest', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 0)
796 797

    def test_json_plugin_failtest(self):
798 799
        self.run_and_check('failtest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 0, 1, 0)
800

801
    def test_json_plugin_skiponsetuptest(self):
802 803
        self.run_and_check('skiponsetup', exit_codes.AVOCADO_ALL_OK,
                           1, 0, 0, 1)
804

805
    def test_json_plugin_errortest(self):
806 807
        self.run_and_check('errortest', exit_codes.AVOCADO_TESTS_FAIL,
                           1, 1, 0, 0)
808

809 810 811 812 813 814 815 816 817 818 819 820 821
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
                         '/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
                         '_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')

822 823 824 825
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

826 827
if __name__ == '__main__':
    unittest.main()