test_basic.py 44.5 KB
Newer Older
1
import json
2
import os
3
import shutil
4
import time
5
import sys
6
import tempfile
7
import xml.dom.minidom
8
import glob
9 10
import aexpect
import signal
11
import re
12

13 14 15 16 17
if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest

18
from avocado.core import exit_codes
19
from avocado.utils import astring
20 21 22 23
from avocado.utils import process
from avocado.utils import script


24
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
25 26 27
basedir = os.path.abspath(basedir)


28 29 30 31
PASS_SCRIPT_CONTENTS = """#!/bin/sh
true
"""

C
Cleber Rosa 已提交
32 33
PASS_SHELL_CONTENTS = "exit 0"

34 35 36 37
FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false
"""

C
Cleber Rosa 已提交
38 39
FAIL_SHELL_CONTENTS = "exit 1"

40 41 42 43 44 45 46 47 48 49 50 51 52 53
HELLO_LIB_CONTENTS = """
def hello():
    return 'Hello world'
"""

LOCAL_IMPORT_TEST_CONTENTS = '''
from avocado import Test
from mylib import hello

class LocalImportTest(Test):
    def test(self):
        self.log.info(hello())
'''

54 55 56 57 58 59 60 61 62 63 64 65
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
        self.status = 'not supported'

    def test(self):
        pass
'''

66 67 68 69 70 71 72 73 74 75 76
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

77

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
         time.sleep(60)
'''

DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


99 100
class RunnerOperationTest(unittest.TestCase):

101
    def setUp(self):
102
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
103

104 105 106
    def test_show_version(self):
        result = process.run('./scripts/avocado -v', ignore_status=True)
        self.assertEqual(result.exit_status, 0)
C
Cleber Rosa 已提交
107 108 109
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
                        "%r" % (result.stderr))
110

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
        config = '[datadir.paths]'
        for key, value in mapping.iteritems():
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
        os.write(fd, config)
        os.close(fd)

        os.chdir(basedir)
        cmd = './scripts/avocado --config %s config --datadir' % config_file
        result = process.run(cmd)
        output = result.stdout
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout)

145 146
    def test_runner_all_ok(self):
        os.chdir(basedir)
147 148
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % self.tmpdir)
149 150
        process.run(cmd_line)

151 152 153
    def test_runner_failfast(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
A
Amador Pahim 已提交
154 155
                    'passtest.py failtest.py passtest.py --failfast on' %
                    self.tmpdir)
156 157 158 159 160 161 162
        result = process.run(cmd_line, ignore_status=True)
        self.assertIn('Interrupting job (failfast).', result.stdout)
        self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

163 164
    def test_datadir_alias(self):
        os.chdir(basedir)
165 166 167 168 169 170 171 172 173
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'datadir.py' % self.tmpdir)
        process.run(cmd_line)

    def test_shell_alias(self):
        """ Tests that .sh files are also executable via alias """
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'env_variables.sh' % self.tmpdir)
174 175 176 177
        process.run(cmd_line)

    def test_datadir_noalias(self):
        os.chdir(basedir)
178 179
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/datadir.py '
                    'examples/tests/datadir.py' % self.tmpdir)
180 181
        process.run(cmd_line)

182 183
    def test_runner_noalias(self):
        os.chdir(basedir)
184 185
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s examples/tests/passtest.py "
                    "examples/tests/passtest.py" % self.tmpdir)
186 187
        process.run(cmd_line)

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
    def test_runner_test_with_local_imports(self):
        mylib = script.TemporaryScript(
            'mylib.py',
            HELLO_LIB_CONTENTS,
            'avocado_simpletest_functional')
        mylib.save()
        mytest = script.Script(
            os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'),
            LOCAL_IMPORT_TEST_CONTENTS)
        os.chdir(basedir)
        mytest.save()
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
                    "%s" % (self.tmpdir, mytest))
        process.run(cmd_line)

203 204 205 206 207 208 209 210 211 212 213 214 215
    def test_unsupported_status(self):
        os.chdir(basedir)
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
216
            self.assertIn("Runner error occurred: Test reports unsupported",
217 218
                          results["tests"][0]["fail_reason"])

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        os.chdir(basedir)
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
            self.assertLess(res.duration, 40, "Test execution took too long, "
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        os.chdir(basedir)
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
            res = process.run("./scripts/avocado run --sysinfo=off "
                              "--job-results-dir %s %s --json -"
                              % (self.tmpdir, tst), ignore_status=True)
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
            results = json.loads(res.stdout)
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test died without reporting the status",
                          results["tests"][0]["fail_reason"])

255 256
    def test_runner_tests_fail(self):
        os.chdir(basedir)
257 258
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py' % self.tmpdir)
259
        result = process.run(cmd_line, ignore_status=True)
260
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
261 262 263 264 265
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
        os.chdir(basedir)
266 267
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir '
                    '%s bogustest' % self.tmpdir)
268
        result = process.run(cmd_line, ignore_status=True)
269 270
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
271 272 273 274 275
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

276 277
    def test_runner_doublefail(self):
        os.chdir(basedir)
278 279
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % self.tmpdir)
280 281
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
282 283
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
284 285 286 287
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
288
        self.assertIn("TestError: Failing during tearDown. Yay!", output,
289
                      "Cleanup exception not printed to log output")
290
        self.assertIn("TestFail: This test is supposed to fail",
291
                      output,
292
                      "Test did not fail with action exception:\n%s" % output)
293

294 295 296
    def test_uncaught_exception(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
297
                    "--json - uncaught_exception.py" % self.tmpdir)
298
        result = process.run(cmd_line, ignore_status=True)
299
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
300 301 302 303 304
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

305
    def test_fail_on_exception(self):
306 307
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
308
                    "--json - fail_on_exception.py" % self.tmpdir)
309
        result = process.run(cmd_line, ignore_status=True)
310
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
311 312 313 314 315
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "FAIL"', result.stdout)

316 317
    def test_runner_timeout(self):
        os.chdir(basedir)
318 319
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % self.tmpdir)
320 321
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
322
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
323
        unexpected_rc = exit_codes.AVOCADO_FAIL
324 325 326 327
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
328
        self.assertIn("Runner error occurred: Timeout reached", output,
329
                      "Timeout reached message not found in the output:\n%s" % output)
330 331
        # Ensure no test aborted error messages show up
        self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
332

333 334
    def test_runner_abort(self):
        os.chdir(basedir)
335 336
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % self.tmpdir)
337
        result = process.run(cmd_line, ignore_status=True)
338
        output = result.stdout
339
        excerpt = 'Test died without reporting the status.'
340 341
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
342 343 344 345
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
346
        self.assertIn(excerpt, output)
347

348 349
    def test_silent_output(self):
        os.chdir(basedir)
350 351
        cmd_line = ('./scripts/avocado --silent run --sysinfo=off '
                    '--job-results-dir %s passtest.py' % self.tmpdir)
352
        result = process.run(cmd_line, ignore_status=True)
353
        expected_rc = exit_codes.AVOCADO_ALL_OK
354 355
        expected_output = ''
        self.assertEqual(result.exit_status, expected_rc)
356
        self.assertEqual(result.stdout, expected_output)
357

358 359 360 361
    def test_empty_args_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado'
        result = process.run(cmd_line, ignore_status=True)
362
        expected_rc = exit_codes.AVOCADO_FAIL
363
        expected_output = 'error: too few arguments'
364
        self.assertEqual(result.exit_status, expected_rc)
365
        self.assertIn(expected_output, result.stderr)
366

367 368
    def test_empty_test_list(self):
        os.chdir(basedir)
369
        cmd_line = './scripts/avocado run --sysinfo=off'
370
        result = process.run(cmd_line, ignore_status=True)
371
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
372
        expected_output = 'No urls provided nor any arguments produced'
373
        self.assertEqual(result.exit_status, expected_rc)
374
        self.assertIn(expected_output, result.stderr)
375

376 377
    def test_not_found(self):
        os.chdir(basedir)
378
        cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
379
        result = process.run(cmd_line, ignore_status=True)
380
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
381
        self.assertEqual(result.exit_status, expected_rc)
382 383
        self.assertIn('Unable to discover url', result.stderr)
        self.assertNotIn('Unable to discover url', result.stdout)
384

385
    def test_invalid_unique_id(self):
386 387
        cmd_line = ('./scripts/avocado run --sysinfo=off --force-job-id foobar'
                    ' passtest.py')
388
        result = process.run(cmd_line, ignore_status=True)
389
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
390
        self.assertIn('needs to be a 40 digit hex', result.stderr)
391
        self.assertNotIn('needs to be a 40 digit hex', result.stdout)
392 393

    def test_valid_unique_id(self):
394
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
395 396
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
                    'passtest.py' % self.tmpdir)
397
        result = process.run(cmd_line, ignore_status=True)
398
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
399
        self.assertNotIn('needs to be a 40 digit hex', result.stderr)
400
        self.assertIn('PASS', result.stdout)
401

402
    def test_automatic_unique_id(self):
403 404
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % self.tmpdir)
405
        result = process.run(cmd_line, ignore_status=True)
406
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
407 408 409 410
        r = json.loads(result.stdout)
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

411 412 413
    def test_skip_outside_setup(self):
        os.chdir(basedir)
        cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s "
414
                    "--json - skip_outside_setup.py" % self.tmpdir)
415
        result = process.run(cmd_line, ignore_status=True)
416
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
417 418 419 420 421
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn('"status": "ERROR"', result.stdout)

422 423 424 425 426
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
        os.chdir(basedir)
L
Lukáš Doktor 已提交
427 428
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % self.tmpdir)
429 430 431 432 433 434
        avocado_process = process.SubProcess(cmd_line)
        avocado_process.start()
        link = os.path.join(self.tmpdir, 'latest')
        for trial in xrange(0, 50):
            time.sleep(0.1)
            if os.path.exists(link) and os.path.islink(link):
435
                avocado_process.wait()
436 437 438 439
                break
        self.assertTrue(os.path.exists(link))
        self.assertTrue(os.path.islink(link))

440 441
    def test_dry_run(self):
        os.chdir(basedir)
442
        cmd = ("./scripts/avocado run --sysinfo=off passtest.py failtest.py "
443
               "gendata.py --json - --mux-inject foo:1 bar:2 baz:3 foo:foo:a"
444
               " foo:bar:b foo:baz:c bar:bar:bar --dry-run")
445 446 447 448 449 450 451 452
        result = json.loads(process.run(cmd).stdout)
        debuglog = result['debuglog']
        log = open(debuglog, 'r').read()
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
        self.assertIn('/tmp', debuglog)   # Use tmp dir, not default location
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
453 454
        self.assertEqual(result['skip'], 4)
        for i in xrange(4):
455 456 457 458 459 460 461 462
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
                             u'Test skipped due to --dry-run')
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
463
            self.assertEqual(log.count(line), 4)
464

465 466 467 468 469 470 471 472 473 474 475
    def test_invalid_python(self):
        os.chdir(basedir)
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
        cmd_line = './scripts/avocado --show test run --sysinfo=off '\
                   '--job-results-dir %s %s' % (self.tmpdir, test)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
476 477
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
                      result.stdout)
478

479 480 481
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

482

483 484 485
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
486
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
487 488 489

    def test_output_pass(self):
        os.chdir(basedir)
490 491
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % self.tmpdir)
492
        result = process.run(cmd_line, ignore_status=True)
493
        expected_rc = exit_codes.AVOCADO_ALL_OK
494 495 496 497 498 499 500
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('passtest.py:PassTest.test:  PASS', result.stdout)

    def test_output_fail(self):
        os.chdir(basedir)
501 502
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % self.tmpdir)
503
        result = process.run(cmd_line, ignore_status=True)
504
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
505 506 507 508 509 510 511
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('failtest.py:FailTest.test:  FAIL', result.stdout)

    def test_output_error(self):
        os.chdir(basedir)
512 513
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % self.tmpdir)
514
        result = process.run(cmd_line, ignore_status=True)
515
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
516 517 518 519 520 521 522
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertIn('errortest.py:ErrorTest.test:  ERROR', result.stdout)

    def test_output_skip(self):
        os.chdir(basedir)
523 524
        cmd_line = ('./scripts/avocado run --sysinfo=off --job-results-dir %s '
                    'skiponsetup.py' % self.tmpdir)
525
        result = process.run(cmd_line, ignore_status=True)
526
        expected_rc = exit_codes.AVOCADO_ALL_OK
527 528 529
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
L
Lukáš Doktor 已提交
530 531
        self.assertIn('skiponsetup.py:SkipOnSetupTest.test_wont_be_executed:'
                      '  SKIP', result.stdout)
532

533 534 535 536 537 538 539 540 541 542 543 544
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run "/bin/echo -ne '
                    'foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
                    ' --sysinfo=off  --show-job-log' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
545 546 547
        self.assertIn('[stdout] foo', result.stdout, result)
        self.assertIn('[stdout] \'"', result.stdout, result)
        self.assertIn('[stdout] bar/baz', result.stdout, result)
548
        self.assertIn('PASS 1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz',
549
                      result.stdout, result)
550 551 552 553 554 555 556
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
557
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
558

559
    def test_replay_skip_skipped(self):
560
        result = process.run("./scripts/avocado run skiponsetup.py --json -")
561 562 563 564 565
        result = json.loads(result.stdout)
        jobid = result["job_id"]
        process.run(str("./scripts/avocado run --replay %s "
                        "--replay-test-status PASS" % jobid))

566 567 568
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

569

570
class RunnerSimpleTest(unittest.TestCase):
571 572

    def setUp(self):
573
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
574 575 576
        self.pass_script = script.TemporaryScript(
            'avocado_pass.sh',
            PASS_SCRIPT_CONTENTS,
577
            'avocado_simpletest_functional')
578
        self.pass_script.save()
L
Lukáš Doktor 已提交
579 580 581 582
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
                                                  FAIL_SCRIPT_CONTENTS,
                                                  'avocado_simpletest_'
                                                  'functional')
583
        self.fail_script.save()
584

585
    def test_simpletest_pass(self):
586
        os.chdir(basedir)
L
Lukáš Doktor 已提交
587 588
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.pass_script.path))
589
        result = process.run(cmd_line, ignore_status=True)
590
        expected_rc = exit_codes.AVOCADO_ALL_OK
591 592 593 594
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

595
    def test_simpletest_fail(self):
596
        os.chdir(basedir)
L
Lukáš Doktor 已提交
597 598
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, self.fail_script.path))
599
        result = process.run(cmd_line, ignore_status=True)
600
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
601 602 603 604
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

605 606
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
607
        We can be pretty sure that a failtest should return immediately. Let's
608
        run 100 of them and assure they not take more than 30 seconds to run.
609

610 611
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
612 613
        """
        os.chdir(basedir)
614
        one_hundred = 'failtest.py ' * 100
L
Lukáš Doktor 已提交
615 616
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' %s' % (self.tmpdir, one_hundred))
617 618 619
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
620
        self.assertLess(actual_time, 30.0)
621
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
622 623 624 625 626 627 628 629 630
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
        os.chdir(basedir)
631 632
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
L
Lukáš Doktor 已提交
633 634
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off %s' % (
            self.tmpdir, sleep_fail_sleep)
635 636 637
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
638
        self.assertLess(actual_time, 33.0)
639
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
640 641 642
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

643 644 645 646 647
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
        os.chdir(basedir)
648 649
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh --show-job-log' % self.tmpdir)
650
        result = process.run(cmd_line, ignore_status=True)
651 652 653 654
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
655 656
        self.assertIn('DEBUG| Debug message', result.stdout, result)
        self.assertIn('INFO | Info message', result.stdout, result)
657
        self.assertIn('WARN | Warning message (should cause this test to '
658
                      'finish with warning)', result.stdout, result)
659
        self.assertIn('ERROR| Error message (ordinary message not changing '
660
                      'the results)', result.stdout, result)
661

662 663 664 665 666 667 668 669 670 671 672 673 674
    def test_non_absolute_path(self):
        avocado_path = os.path.join(basedir, 'scripts', 'avocado')
        test_base_dir = os.path.dirname(self.pass_script.path)
        test_file_name = os.path.basename(self.pass_script.path)
        os.chdir(test_base_dir)
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (avocado_path, self.tmpdir, test_file_name))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

675 676 677 678 679 680 681 682 683 684 685 686 687
    def test_kill_stopped_sleep(self):
        sleep = process.run("which sleep", ignore_status=True, shell=True)
        if sleep.exit_status:
            self.skipTest("Sleep binary not found in PATH")
        sleep = "'%s 60'" % sleep.stdout.strip()
        proc = aexpect.Expect("./scripts/avocado run %s --job-results-dir %s "
                              "--sysinfo=off --job-timeout 3"
                              % (sleep, self.tmpdir))
        proc.read_until_output_matches(["\(1/1\)"], timeout=3,
                                       internal_timeout=0.01)
        # We need pid of the avocado, not the shell executing it
        pid = int(process.get_children_pids(proc.get_pid())[0])
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
688
        deadline = time.time() + 9
689 690 691
        while time.time() < deadline:
            if not proc.is_alive():
                break
692
            time.sleep(0.1)
693 694
        else:
            proc.kill(signal.SIGKILL)
695
            self.fail("Avocado process still alive 5s after job-timeout:\n%s"
696 697 698 699 700 701 702
                      % proc.get_output())
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
703
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
704
                         "1.")
705
        sleep_dir = astring.string_to_safe_path("1-" + sleep[1:-1])
706
        debug_log = os.path.join(self.tmpdir, "latest", "test-results",
707
                                 sleep_dir, "debug.log")
708
        debug_log = open(debug_log).read()
709 710 711 712 713 714 715
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
716

717
    def tearDown(self):
718 719
        self.pass_script.remove()
        self.fail_script.remove()
720
        shutil.rmtree(self.tmpdir)
721 722


723
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
724 725

    def setUp(self):
726
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
C
Cleber Rosa 已提交
727 728 729
        self.pass_script = script.TemporaryScript(
            'pass',
            PASS_SHELL_CONTENTS,
730
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
731 732 733 734
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
            FAIL_SHELL_CONTENTS,
735
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
736 737
        self.fail_script.save()

738
    def test_externalrunner_pass(self):
C
Cleber Rosa 已提交
739
        os.chdir(basedir)
740
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
741 742
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
743
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
744 745 746 747
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

748
    def test_externalrunner_fail(self):
C
Cleber Rosa 已提交
749
        os.chdir(basedir)
750
        cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh %s'
C
Cleber Rosa 已提交
751 752
        cmd_line %= (self.tmpdir, self.fail_script.path)
        result = process.run(cmd_line, ignore_status=True)
753
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
754 755 756 757
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

758
    def test_externalrunner_chdir_no_testdir(self):
C
Cleber Rosa 已提交
759
        os.chdir(basedir)
760 761
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --external-runner=/bin/sh '
                    '--external-runner-chdir=test %s')
C
Cleber Rosa 已提交
762 763
        cmd_line %= (self.tmpdir, self.pass_script.path)
        result = process.run(cmd_line, ignore_status=True)
764 765
        expected_output = ('Option "--external-runner-chdir=test" requires '
                           '"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
766
        self.assertIn(expected_output, result.stderr)
767
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
768 769 770 771 772 773 774 775 776
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def test_externalrunner_no_url(self):
        os.chdir(basedir)
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/true' % self.tmpdir)
        result = process.run(cmd_line, ignore_status=True)
777
        expected_output = ('No urls provided nor any arguments produced')
778 779
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
780 781 782 783 784 785 786 787 788 789
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


790
class AbsPluginsTest(object):
791

792
    def setUp(self):
793
        self.base_outputdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
794

795 796 797 798 799 800
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

801 802 803 804
    def test_sysinfo_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir
        result = process.run(cmd_line, ignore_status=True)
805
        expected_rc = exit_codes.AVOCADO_ALL_OK
806 807 808 809 810 811
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

812 813 814 815 816
    def test_list_plugin(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
817
        expected_rc = exit_codes.AVOCADO_ALL_OK
818 819 820 821 822
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('No tests were found on current tests dir', output)

823 824 825 826 827
    def test_list_error_output(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado list sbrubles'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
828
        expected_rc = exit_codes.AVOCADO_FAIL
829 830 831
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
832
        self.assertIn("Unable to discover url", output)
833

834 835 836 837 838
    def test_plugin_list(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
839
        expected_rc = exit_codes.AVOCADO_ALL_OK
840 841 842
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
843 844
        if sys.version_info[:2] >= (2, 7, 0):
            self.assertNotIn('Disabled', output)
845

846
    def test_config_plugin(self):
847
        os.chdir(basedir)
848
        cmd_line = './scripts/avocado config --paginator off'
849 850
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
851
        expected_rc = exit_codes.AVOCADO_ALL_OK
852 853 854 855 856 857 858
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

    def test_config_plugin_datadir(self):
        os.chdir(basedir)
859
        cmd_line = './scripts/avocado config --datadir --paginator off'
860 861
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
862
        expected_rc = exit_codes.AVOCADO_ALL_OK
863 864 865 866 867
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn('Disabled', output)

868 869 870 871 872
    def test_Namespace_object_has_no_attribute(self):
        os.chdir(basedir)
        cmd_line = './scripts/avocado plugins'
        result = process.run(cmd_line, ignore_status=True)
        output = result.stderr
873
        expected_rc = exit_codes.AVOCADO_ALL_OK
874 875 876 877 878
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        self.assertNotIn("'Namespace' object has no attribute", output)

879

880 881 882 883
class ParseXMLError(Exception):
    pass


884
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
885

886
    def setUp(self):
887
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
888 889
        super(PluginsXunitTest, self).setUp()

890
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
891
                      e_nnotfound, e_nfailures, e_nskip):
892
        os.chdir(basedir)
L
Lukáš Doktor 已提交
893 894
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (self.tmpdir, testname))
895 896 897 898 899 900 901
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
902
        except Exception as detail:
903 904 905 906 907 908 909
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
910 911
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

929
        n_skip = int(testsuite_tag.attributes['skipped'].value)
930 931 932 933
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

934
    def test_xunit_plugin_passtest(self):
935
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
936
                           1, 0, 0, 0, 0)
937 938

    def test_xunit_plugin_failtest(self):
939
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
940
                           1, 0, 0, 1, 0)
941

942
    def test_xunit_plugin_skiponsetuptest(self):
943
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
944
                           1, 0, 0, 0, 1)
945

946
    def test_xunit_plugin_errortest(self):
947
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
948
                           1, 1, 0, 0, 0)
949

950 951 952 953
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

954 955 956 957 958

class ParseJSONError(Exception):
    pass


959
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
960

961
    def setUp(self):
962
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
963 964
        super(PluginsJSONTest, self).setUp()

965
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
966 967
                      e_nfailures, e_nskip):
        os.chdir(basedir)
968 969
        cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --json - --archive %s' %
                    (self.tmpdir, testname))
970 971 972 973 974 975 976
        result = process.run(cmd_line, ignore_status=True)
        json_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
977
        except Exception as detail:
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
995
        return json_data
996

997
    def test_json_plugin_passtest(self):
998
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
999
                           1, 0, 0, 0)
1000 1001

    def test_json_plugin_failtest(self):
1002
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1003
                           1, 0, 1, 0)
1004

1005
    def test_json_plugin_skiponsetuptest(self):
1006
        self.run_and_check('skiponsetup.py', exit_codes.AVOCADO_ALL_OK,
1007
                           1, 0, 0, 1)
1008

1009
    def test_json_plugin_errortest(self):
1010
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1011
                           1, 1, 0, 0)
1012

1013 1014 1015 1016 1017 1018 1019 1020
    def test_ugly_echo_cmd(self):
        if not os.path.exists("/bin/echo"):
            self.skipTest("Program /bin/echo does not exist")
        data = self.run_and_check('"/bin/echo -ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
                                  0, 0)
        # The executed test should be this
        self.assertEqual(data['tests'][0]['url'],
1021
                         '1-/bin/echo -ne foo\\\\n\\\'\\"\\\\nbar/baz')
1022 1023
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1024
                         '1-_bin_echo -ne foo\\\\n\\\'\\"\\\\nbar_baz')
1025

1026 1027 1028 1029
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

1030 1031
if __name__ == '__main__':
    unittest.main()