test_basic.py 65.0 KB
Newer Older
1 2
import aexpect
import glob
3
import json
4
import os
5
import re
6
import shutil
7
import signal
8
import sys
9
import tempfile
10
import time
11
import xml.dom.minidom
12
import zipfile
13
import unittest
14
import psutil
15

16
try:
17
    import xmlschema
18 19 20 21
    SCHEMA_CAPABLE = True
except ImportError:
    SCHEMA_CAPABLE = False

22
from avocado.core import exit_codes
23
from avocado.utils import astring
24
from avocado.utils import genio
25 26
from avocado.utils import process
from avocado.utils import script
27
from avocado.utils import path as utils_path
28

29
from .. import AVOCADO, BASEDIR, python_module_available, temp_dir_prefix
30

31

32 33 34 35 36 37
UNSUPPORTED_STATUS_TEST_CONTENTS = '''
from avocado import Test

class FakeStatusTest(Test):
    def run_avocado(self):
        super(FakeStatusTest, self).run_avocado()
38 39
        # Please do NOT ever use this, it's for unittesting only.
        self._Test__status = 'not supported'
40 41 42 43 44

    def test(self):
        pass
'''

45 46 47 48 49 50 51 52 53 54 55
INVALID_PYTHON_TEST = '''
from avocado import Test

class MyTest(Test):

    non_existing_variable_causing_crash

    def test_my_name(self):
        pass
'''

56

57 58 59 60 61 62 63 64 65 66 67 68
VALID_PYTHON_TEST_WITH_TAGS = '''
from avocado import Test

class MyTest(Test):
    def test(self):
         """
         :avocado: tags=BIG_TAG_NAME
         """
         pass
'''


69 70 71 72 73 74 75
REPORTS_STATUS_AND_HANG = '''
from avocado import Test
import time

class MyTest(Test):
    def test(self):
         self.runner_queue.put({"running": False})
76
         time.sleep(70)
77 78
'''

79

80 81 82 83 84 85 86 87 88 89 90
DIE_WITHOUT_REPORTING_STATUS = '''
from avocado import Test
import os
import signal

class MyTest(Test):
    def test(self):
         os.kill(os.getpid(), signal.SIGKILL)
'''


91 92 93 94 95 96 97 98 99 100 101 102 103
RAISE_CUSTOM_PATH_EXCEPTION_CONTENT = '''import os
import sys

from avocado import Test

class SharedLibTest(Test):
    def test(self):
        sys.path.append(os.path.join(os.path.dirname(__file__), "shared_lib"))
        from mylib import CancelExc
        raise CancelExc("This should not crash on unpickling in runner")
'''


104 105 106 107 108 109 110 111 112 113
TEST_OTHER_LOGGERS_CONTENT = '''
import logging
from avocado import Test

class My(Test):
    def test(self):
        logging.getLogger("some.other.logger").info("SHOULD BE ON debug.log")
'''


A
Amador Pahim 已提交
114
def probe_binary(binary):
115
    try:
A
Amador Pahim 已提交
116
        return utils_path.find_command(binary)
117
    except utils_path.CmdNotFoundError:
A
Amador Pahim 已提交
118 119
        return None

L
Lukáš Doktor 已提交
120

121
TRUE_CMD = probe_binary('true')
A
Amador Pahim 已提交
122
CC_BINARY = probe_binary('cc')
123

L
Lukáš Doktor 已提交
124
# On macOS, the default GNU core-utils installation (brew)
125 126 127 128 129
# installs the gnu utility versions with a g prefix. It still has the
# BSD versions of the core utilities installed on their expected paths
# but their behavior and flags are in most cases different.
GNU_ECHO_BINARY = probe_binary('echo')
if GNU_ECHO_BINARY is not None:
130
    if probe_binary('man') is not None:
131 132 133
        echo_cmd = 'man %s' % os.path.basename(GNU_ECHO_BINARY)
        echo_manpage = process.run(echo_cmd, env={'LANG': 'C'},
                                   encoding='ascii').stdout
134
        if b'-e' not in echo_manpage:
135
            GNU_ECHO_BINARY = probe_binary('gecho')
A
Amador Pahim 已提交
136 137
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
138 139


140 141
class RunnerOperationTest(unittest.TestCase):

142
    def setUp(self):
143 144
        prefix = temp_dir_prefix(__name__, self, 'setUp')
        self.tmpdir = tempfile.mkdtemp(prefix=prefix)
145
        os.chdir(BASEDIR)
146

147
    def test_show_version(self):
148
        result = process.run('%s -v' % AVOCADO, ignore_status=True)
149
        self.assertEqual(result.exit_status, 0)
150
        self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stdout_text),
C
Cleber Rosa 已提交
151
                        "Version string does not match 'Avocado \\d\\.\\d:'\n"
152
                        "%r" % (result.stdout_text))
153

154 155 156 157 158 159 160 161 162 163 164 165 166
    def test_alternate_config_datadir(self):
        """
        Uses the "--config" flag to check custom configuration is applied

        Even on the more complex data_dir module, which adds extra checks
        to what is set on the plain settings module.
        """
        base_dir = os.path.join(self.tmpdir, 'datadir_base')
        os.mkdir(base_dir)
        mapping = {'base_dir': base_dir,
                   'test_dir': os.path.join(base_dir, 'test'),
                   'data_dir': os.path.join(base_dir, 'data'),
                   'logs_dir': os.path.join(base_dir, 'logs')}
167
        config = '[datadir.paths]\n'
C
Cleber Rosa 已提交
168
        for key, value in mapping.items():
169 170 171 172
            if not os.path.isdir(value):
                os.mkdir(value)
            config += "%s = %s\n" % (key, value)
        fd, config_file = tempfile.mkstemp(dir=self.tmpdir)
173
        os.write(fd, config.encode())
174 175
        os.close(fd)

176
        cmd = '%s --config %s config --datadir' % (AVOCADO, config_file)
177 178 179 180 181
        result = process.run(cmd)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
182 183 184
        self.assertIn('    base     ' + mapping['base_dir'], result.stdout_text)
        self.assertIn('    data     ' + mapping['data_dir'], result.stdout_text)
        self.assertIn('    logs     ' + mapping['logs_dir'], result.stdout_text)
185

C
Cleber Rosa 已提交
186 187 188 189 190 191 192 193
    def test_runner_phases(self):
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'phases.py' % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

194
    def test_runner_all_ok(self):
195 196
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py passtest.py' % (AVOCADO, self.tmpdir))
197
        process.run(cmd_line)
198
        # Also check whether jobdata contains correct parameter paths
199 200
        variants = open(os.path.join(self.tmpdir, "latest", "jobdata",
                        "variants.json")).read()
201
        self.assertIn('["/run/*"]', variants, "paths stored in jobdata "
202
                      "does not contains [\"/run/*\"]\n%s" % variants)
203

204
    def test_runner_failfast(self):
205 206 207
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py failtest.py passtest.py --failfast on'
                    % (AVOCADO, self.tmpdir))
208
        result = process.run(cmd_line, ignore_status=True)
209 210
        self.assertIn(b'Interrupting job (failfast).', result.stdout)
        self.assertIn(b'PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
211 212 213 214
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

A
Amador Pahim 已提交
215 216 217 218 219
    def test_runner_ignore_missing_references_one_missing(self):
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py badtest.py --ignore-missing-references on'
                    % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
220 221
        self.assertIn(b"Unable to resolve reference(s) 'badtest.py'", result.stderr)
        self.assertIn(b'PASS 1 | ERROR 0 | FAIL 0 | SKIP 0', result.stdout)
A
Amador Pahim 已提交
222 223 224 225 226 227 228 229 230
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_ignore_missing_references_all_missing(self):
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'badtest.py badtest2.py --ignore-missing-references on'
                    % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
231
        self.assertIn(b"Unable to resolve reference(s) 'badtest.py', 'badtest2.py'",
A
Amador Pahim 已提交
232
                      result.stderr)
233
        self.assertEqual(b'', result.stdout)
A
Amador Pahim 已提交
234 235 236 237
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

238
    def test_runner_test_with_local_imports(self):
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
        prefix = temp_dir_prefix(__name__, self,
                                 'test_runner_test_with_local_imports')
        libdir = tempfile.mkdtemp(prefix=prefix)
        with script.Script(os.path.join(libdir, 'mylib.py'),
                           "def hello():\n    return 'Hello world'"):
            with script.Script(
                os.path.join(libdir, 'test_local_imports.py'),
                ('from avocado import Test\n'
                 'from mylib import hello\n'
                 'class LocalImportTest(Test):\n'
                 '    def test(self):\n'
                 '        self.log.info(hello())\n')) as mytest:
                cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                            "%s" % (AVOCADO, self.tmpdir, mytest))
                process.run(cmd_line)
        shutil.rmtree(libdir)
255

256 257 258 259
    def test_unsupported_status(self):
        with script.TemporaryScript("fake_status.py",
                                    UNSUPPORTED_STATUS_TEST_CONTENTS,
                                    "avocado_unsupported_status") as tst:
260 261 262
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s"
                              " --json -" % (AVOCADO, self.tmpdir, tst),
                              ignore_status=True)
263
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
264
            results = json.loads(res.stdout_text)
265 266 267
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
268
            self.assertIn("Runner error occurred: Test reports unsupported",
269 270
                          results["tests"][0]["fail_reason"])

271 272 273
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
274 275 276 277 278
    def test_hanged_test_with_status(self):
        """ Check that avocado handles hanged tests properly """
        with script.TemporaryScript("report_status_and_hang.py",
                                    REPORTS_STATUS_AND_HANG,
                                    "hanged_test_with_status") as tst:
279
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
280
                              "--json - --job-timeout 1" % (AVOCADO, self.tmpdir, tst),
281
                              ignore_status=True)
282
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
283
            results = json.loads(res.stdout_text)
284 285 286 287 288
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
            self.assertIn("Test reported status but did not finish",
                          results["tests"][0]["fail_reason"])
289 290 291 292 293
            # Currently it should finish up to 1s after the job-timeout
            # but the prep and postprocess could take a bit longer on
            # some environments, so let's just check it does not take
            # > 60s, which is the deadline for force-finishing the test.
            self.assertLess(res.duration, 55, "Test execution took too long, "
294 295 296 297 298 299 300
                            "which is likely because the hanged test was not "
                            "interrupted. Results:\n%s" % res)

    def test_no_status_reported(self):
        with script.TemporaryScript("die_without_reporting_status.py",
                                    DIE_WITHOUT_REPORTING_STATUS,
                                    "no_status_reported") as tst:
301 302 303
            res = process.run("%s run --sysinfo=off --job-results-dir %s %s "
                              "--json -" % (AVOCADO, self.tmpdir, tst),
                              ignore_status=True)
304
            self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)
305
            results = json.loads(res.stdout_text)
306 307 308
            self.assertEqual(results["tests"][0]["status"], "ERROR",
                             "%s != %s\n%s" % (results["tests"][0]["status"],
                                               "ERROR", res))
C
Cleber Rosa 已提交
309 310 311
            self.assertIn("Test reports unsupported test status",
                          results["tests"][0]["fail_reason"])
            self.assertIn("status: None",
312 313
                          results["tests"][0]["fail_reason"])

314
    def test_runner_tests_fail(self):
315 316
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s passtest.py '
                    'failtest.py passtest.py' % (AVOCADO, self.tmpdir))
317
        result = process.run(cmd_line, ignore_status=True)
318
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
319 320 321 322
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

    def test_runner_nonexistent_test(self):
323 324
        cmd_line = ('%s run --sysinfo=off --job-results-dir '
                    '%s bogustest' % (AVOCADO, self.tmpdir))
325
        result = process.run(cmd_line, ignore_status=True)
326 327
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
328 329 330 331 332
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

333
    def test_runner_doublefail(self):
334 335
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - doublefail.py' % (AVOCADO, self.tmpdir))
336
        result = process.run(cmd_line, ignore_status=True)
337 338
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
339 340 341 342
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
343
        self.assertIn(b"TestError: Failing during tearDown. Yay!", result.stdout,
344
                      "Cleanup exception not printed to log output")
345 346
        self.assertIn(b"TestFail: This test is supposed to fail", result.stdout,
                      "Test did not fail with action exception:\n%s" % result.stdout)
347

348
    def test_uncaught_exception(self):
349 350
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "--json - uncaught_exception.py" % (AVOCADO, self.tmpdir))
351
        result = process.run(cmd_line, ignore_status=True)
352
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
353 354 355
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
356
        self.assertIn(b'"status": "ERROR"', result.stdout)
357

358
    def test_fail_on_exception(self):
359 360
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "--json - fail_on_exception.py" % (AVOCADO, self.tmpdir))
361
        result = process.run(cmd_line, ignore_status=True)
362
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
363 364 365
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
366
        self.assertIn(b'"status": "FAIL"', result.stdout)
367

368 369 370 371 372 373 374 375 376 377 378 379
    def test_assert_raises(self):
        cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
                    "-- assert.py" % (AVOCADO, self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc,
                                                                result))
        self.assertIn(b'Assert.test_assert_raises:  PASS', result.stdout)
        self.assertIn(b'Assert.test_fails_to_raise:  FAIL', result.stdout)
        self.assertIn(b'PASS 1 | ERROR 0 | FAIL 1 ', result.stdout)

380 381 382 383 384 385 386 387 388 389 390 391 392 393
    def test_exception_not_in_path(self):
        os.mkdir(os.path.join(self.tmpdir, "shared_lib"))
        mylib = script.Script(os.path.join(self.tmpdir, "shared_lib",
                                           "mylib.py"),
                              "from avocado import TestCancel\n\n"
                              "class CancelExc(TestCancel):\n"
                              "    pass")
        mylib.save()
        mytest = script.Script(os.path.join(self.tmpdir, "mytest.py"),
                               RAISE_CUSTOM_PATH_EXCEPTION_CONTENT)
        mytest.save()
        result = process.run("%s --show test run --sysinfo=off "
                             "--job-results-dir %s %s"
                             % (AVOCADO, self.tmpdir, mytest))
394 395
        self.assertIn(b"mytest.py:SharedLibTest.test -> CancelExc: This "
                      b"should not crash on unpickling in runner",
396
                      result.stdout)
397
        self.assertNotIn(b"Failed to read queue", result.stdout)
398

399
    def test_runner_timeout(self):
400 401
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - timeouttest.py' % (AVOCADO, self.tmpdir))
402 403
        result = process.run(cmd_line, ignore_status=True)
        output = result.stdout
404
        expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED
405
        unexpected_rc = exit_codes.AVOCADO_FAIL
406 407 408 409
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
410
        self.assertIn(b"Runner error occurred: Timeout reached", output,
411
                      "Timeout reached message not found in the output:\n%s" % output)
412
        # Ensure no test aborted error messages show up
C
Cleber Rosa 已提交
413
        self.assertNotIn(b"TestAbortError: Test aborted unexpectedly", output)
414

415
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
416 417
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
418
    def test_runner_abort(self):
419 420
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    '--xunit - abort.py' % (AVOCADO, self.tmpdir))
421
        result = process.run(cmd_line, ignore_status=True)
422
        excerpt = b'Test died without reporting the status.'
423 424
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        unexpected_rc = exit_codes.AVOCADO_FAIL
425 426 427 428
        self.assertNotEqual(result.exit_status, unexpected_rc,
                            "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
429
        self.assertIn(excerpt, result.stdout)
430

431
    def test_silent_output(self):
C
Cleber Rosa 已提交
432
        cmd_line = ('%s --show=none run --sysinfo=off --job-results-dir %s '
433
                    'passtest.py' % (AVOCADO, self.tmpdir))
434
        result = process.run(cmd_line, ignore_status=True)
435
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
436
        self.assertEqual(result.stdout, b'')
437

438
    def test_empty_args_list(self):
439
        cmd_line = AVOCADO
440
        result = process.run(cmd_line, ignore_status=True)
441
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL)
442 443
        self.assertIn(b'avocado: error: the following arguments are required',
                      result.stderr)
444

445
    def test_empty_test_list(self):
446 447
        cmd_line = '%s run --sysinfo=off --job-results-dir %s' % (AVOCADO,
                                                                  self.tmpdir)
448
        result = process.run(cmd_line, ignore_status=True)
449
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_JOB_FAIL)
450 451
        self.assertIn(b'No test references provided nor any other arguments '
                      b'resolved into tests', result.stderr)
452

453
    def test_not_found(self):
454 455
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s sbrubles'
                    % (AVOCADO, self.tmpdir))
456
        result = process.run(cmd_line, ignore_status=True)
457
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_JOB_FAIL)
458 459
        self.assertIn(b'Unable to resolve reference', result.stderr)
        self.assertNotIn(b'Unable to resolve reference', result.stdout)
460

461
    def test_invalid_unique_id(self):
462 463
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s --force-job-id '
                    'foobar passtest.py' % (AVOCADO, self.tmpdir))
464
        result = process.run(cmd_line, ignore_status=True)
465
        self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
466 467
        self.assertIn(b'needs to be a 40 digit hex', result.stderr)
        self.assertNotIn(b'needs to be a 40 digit hex', result.stdout)
468 469

    def test_valid_unique_id(self):
470
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
471
                    '--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 '
472
                    'passtest.py' % (AVOCADO, self.tmpdir))
473
        result = process.run(cmd_line, ignore_status=True)
474
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
475 476
        self.assertNotIn(b'needs to be a 40 digit hex', result.stderr)
        self.assertIn(b'PASS', result.stdout)
477

478
    def test_automatic_unique_id(self):
479 480
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    'passtest.py --json -' % (AVOCADO, self.tmpdir))
481
        result = process.run(cmd_line, ignore_status=True)
482
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
483
        r = json.loads(result.stdout_text)
484 485 486
        int(r['job_id'], 16)  # it's an hex number
        self.assertEqual(len(r['job_id']), 40)

487 488 489
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
490 491 492 493
    def test_early_latest_result(self):
        """
        Tests that the `latest` link to the latest job results is created early
        """
494 495
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'examples/tests/passtest.py' % (AVOCADO, self.tmpdir))
496
        avocado_process = process.SubProcess(cmd_line)
497 498 499
        try:
            avocado_process.start()
            link = os.path.join(self.tmpdir, 'latest')
500
            for _ in range(0, 50):
501 502 503 504 505 506 507 508
                time.sleep(0.1)
                if os.path.exists(link) and os.path.islink(link):
                    avocado_process.wait()
                    break
            self.assertTrue(os.path.exists(link))
            self.assertTrue(os.path.islink(link))
        finally:
            avocado_process.wait()
509

510
    def test_dry_run(self):
511 512 513 514
        cmd = ("%s run --sysinfo=off --dry-run --dry-run-no-cleanup --json - "
               "--mux-inject foo:1 bar:2 baz:3 foo:foo:a "
               "foo:bar:b foo:baz:c bar:bar:bar "
               "-- passtest.py failtest.py gendata.py " % AVOCADO)
515
        number_of_tests = 3
516
        result = json.loads(process.run(cmd).stdout_text)
517
        debuglog = result['debuglog']
518
        log = genio.read_file(debuglog)
519 520
        # Remove the result dir
        shutil.rmtree(os.path.dirname(os.path.dirname(debuglog)))
521
        self.assertIn(tempfile.gettempdir(), debuglog)   # Use tmp dir, not default location
522 523
        self.assertEqual(result['job_id'], u'0' * 40)
        # Check if all tests were skipped
524 525
        self.assertEqual(result['cancel'], number_of_tests)
        for i in range(number_of_tests):
526 527
            test = result['tests'][i]
            self.assertEqual(test['fail_reason'],
528
                             u'Test cancelled due to --dry-run')
529 530 531 532 533
        # Check if all params are listed
        # The "/:bar ==> 2 is in the tree, but not in any leave so inaccessible
        # from test.
        for line in ("/:foo ==> 1", "/:baz ==> 3", "/foo:foo ==> a",
                     "/foo:bar ==> b", "/foo:baz ==> c", "/bar:bar ==> bar"):
534
            self.assertEqual(log.count(line), number_of_tests,
535
                             "Avocado log count for param '%s' not as expected:\n%s" % (line, log))
536

537 538 539
    def test_invalid_python(self):
        test = script.make_script(os.path.join(self.tmpdir, 'test.py'),
                                  INVALID_PYTHON_TEST)
540 541
        cmd_line = ('%s --show test run --sysinfo=off '
                    '--job-results-dir %s %s') % (AVOCADO, self.tmpdir, test)
542 543 544 545 546
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
547
        self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
548
                      result.stdout_text)
549

A
Amador Pahim 已提交
550
    @unittest.skipIf(not READ_BINARY, "read binary not available.")
551 552 553
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
L
Lukáš Doktor 已提交
554
    def test_read(self):
555
        cmd = "%s run --sysinfo=off --job-results-dir %%s %%s" % AVOCADO
556
        cmd %= (self.tmpdir, READ_BINARY)
557
        result = process.run(cmd, timeout=10, ignore_status=True)
L
Lukáš Doktor 已提交
558 559 560 561 562
        self.assertLess(result.duration, 8, "Duration longer than expected."
                        "\n%s" % result)
        self.assertEqual(result.exit_status, 1, "Expected exit status is 1\n%s"
                         % result)

563 564 565 566 567 568 569 570 571 572 573 574
    def test_runner_test_parameters(self):
        cmd_line = ('%s --show=test run --sysinfo=off --job-results-dir %s '
                    '-p "sleep_length=0.01" -- sleeptest.py ' % (AVOCADO,
                                                                 self.tmpdir))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))
        self.assertIn(b"PARAMS (key=sleep_length, path=*, default=1) => '0.01'",
                      result.stdout)
        self.assertIn(b"Sleeping for 0.01 seconds", result.stdout)

575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
    def test_other_loggers(self):
        with script.TemporaryScript(
                'mytest.py',
                TEST_OTHER_LOGGERS_CONTENT,
                'avocado_functional_test_other_loggers') as mytest:

            cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                        '-- %s' % (AVOCADO, self.tmpdir, mytest))
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

            test_log_dir = glob.glob(os.path.join(self.tmpdir, 'job-*',
                                                  'test-results', '1-*'))[0]
            test_log_path = os.path.join(test_log_dir, 'debug.log')
            with open(test_log_path, 'rb') as test_log:
                self.assertIn(b'SHOULD BE ON debug.log', test_log.read())

595 596 597
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

598

599 600 601
class RunnerHumanOutputTest(unittest.TestCase):

    def setUp(self):
602 603
        prefix = temp_dir_prefix(__name__, self, 'setUp')
        self.tmpdir = tempfile.mkdtemp(prefix=prefix)
604
        os.chdir(BASEDIR)
605 606

    def test_output_pass(self):
607 608
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'passtest.py' % (AVOCADO, self.tmpdir))
609
        result = process.run(cmd_line, ignore_status=True)
610
        expected_rc = exit_codes.AVOCADO_ALL_OK
611 612 613
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
614
        self.assertIn(b'passtest.py:PassTest.test:  PASS', result.stdout)
615 616

    def test_output_fail(self):
617 618
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'failtest.py' % (AVOCADO, self.tmpdir))
619
        result = process.run(cmd_line, ignore_status=True)
620
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
621 622 623
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
624
        self.assertIn(b'failtest.py:FailTest.test:  FAIL', result.stdout)
625 626

    def test_output_error(self):
627 628
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'errortest.py' % (AVOCADO, self.tmpdir))
629
        result = process.run(cmd_line, ignore_status=True)
630
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
631 632 633
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
634
        self.assertIn(b'errortest.py:ErrorTest.test:  ERROR', result.stdout)
635

A
Amador Pahim 已提交
636
    def test_output_cancel(self):
637 638
        cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
                    'cancelonsetup.py' % (AVOCADO, self.tmpdir))
639
        result = process.run(cmd_line, ignore_status=True)
640
        expected_rc = exit_codes.AVOCADO_ALL_OK
641 642 643
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
644 645
        self.assertIn(b'PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | '
                      b'INTERRUPT 0 | CANCEL 1',
A
Amador Pahim 已提交
646
                      result.stdout)
647

648 649
    @unittest.skipIf(not GNU_ECHO_BINARY,
                     'GNU style echo binary not available')
650
    def test_ugly_echo_cmd(self):
651
        cmd_line = ('%s --show=test run --external-runner "%s -ne" '
652
                    '"foo\\\\\\n\\\'\\\\\\"\\\\\\nbar/baz" --job-results-dir %s'
653
                    ' --sysinfo=off' %
654
                    (AVOCADO, GNU_ECHO_BINARY, self.tmpdir))
655 656 657 658 659
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
660 661 662 663
        self.assertIn(b'[stdout] foo', result.stdout, result)
        self.assertIn(b'[stdout] \'"', result.stdout, result)
        self.assertIn(b'[stdout] bar/baz', result.stdout, result)
        self.assertIn(b'PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
664
                      result.stdout, result)
665 666 667 668 669 670 671
        # logdir name should escape special chars (/)
        test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
                                           'test-results', '*'))
        self.assertEqual(len(test_dirs), 1, "There are multiple directories in"
                         " test-results dir, but only one test was executed: "
                         "%s" % (test_dirs))
        self.assertEqual(os.path.basename(test_dirs[0]),
672
                         "1-foo__n_'____nbar_baz")
673

674
    def test_replay_skip_skipped(self):
675 676
        cmd = ("%s run --job-results-dir %s --json - "
               "cancelonsetup.py" % (AVOCADO, self.tmpdir))
677
        result = process.run(cmd)
678
        result = json.loads(result.stdout_text)
679
        jobid = str(result["job_id"])
680 681
        cmd = ("%s run --job-results-dir %s --replay %s "
               "--replay-test-status PASS" % (AVOCADO, self.tmpdir, jobid))
682
        process.run(cmd)
683

684 685 686
    def tearDown(self):
        shutil.rmtree(self.tmpdir)

687

688
class RunnerSimpleTest(unittest.TestCase):
689 690

    def setUp(self):
691 692
        prefix = temp_dir_prefix(__name__, self, 'setUp')
        self.tmpdir = tempfile.mkdtemp(prefix=prefix)
693
        self.pass_script = script.TemporaryScript(
694
            u'\u00e1 \u00e9 \u00ed \u00f3 \u00fa',
695
            "#!/bin/sh\ntrue",
696
            'avocado_simpletest_functional')
697
        self.pass_script.save()
L
Lukáš Doktor 已提交
698
        self.fail_script = script.TemporaryScript('avocado_fail.sh',
699
                                                  "#!/bin/sh\nfalse",
L
Lukáš Doktor 已提交
700 701
                                                  'avocado_simpletest_'
                                                  'functional')
702
        self.fail_script.save()
703
        os.chdir(BASEDIR)
704

705
    def test_simpletest_pass(self):
706 707
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' "%s"' % (AVOCADO, self.tmpdir, self.pass_script.path))
708
        result = process.run(cmd_line, ignore_status=True)
709
        expected_rc = exit_codes.AVOCADO_ALL_OK
710 711 712 713
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

714
    def test_simpletest_fail(self):
715 716
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' %s' % (AVOCADO, self.tmpdir, self.fail_script.path))
717
        result = process.run(cmd_line, ignore_status=True)
718
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
719 720 721 722
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

723
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
724 725
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
726 727
    def test_runner_onehundred_fail_timing(self):
        """
A
Amador Pahim 已提交
728
        We can be pretty sure that a failtest should return immediately. Let's
729
        run 100 of them and assure they not take more than 30 seconds to run.
730

731 732
        Notice: on a current machine this takes about 0.12s, so 30 seconds is
        considered to be pretty safe here.
733
        """
734
        one_hundred = 'failtest.py ' * 100
735 736
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
                    % (AVOCADO, self.tmpdir, one_hundred))
737 738 739
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
740
        self.assertLess(actual_time, 30.0)
741
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
742 743 744
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

745
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
746 747
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
748 749 750 751 752
    def test_runner_sleep_fail_sleep_timing(self):
        """
        Sleeptest is supposed to take 1 second, let's make a sandwich of
        100 failtests and check the test runner timing.
        """
753 754
        sleep_fail_sleep = ('sleeptest.py ' + 'failtest.py ' * 100 +
                            'sleeptest.py')
755 756
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s'
                    % (AVOCADO, self.tmpdir, sleep_fail_sleep))
757 758 759
        initial_time = time.time()
        result = process.run(cmd_line, ignore_status=True)
        actual_time = time.time() - initial_time
760
        self.assertLess(actual_time, 33.0)
761
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
762 763 764
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" % (expected_rc, result))

765 766 767 768
    def test_simplewarning(self):
        """
        simplewarning.sh uses the avocado-bash-utils
        """
769
        # simplewarning.sh calls "avocado" without specifying a path
770 771 772
        # let's add the path that was defined at the global module
        # scope here
        os.environ['PATH'] += ":" + os.path.dirname(AVOCADO)
773 774
        # simplewarning.sh calls "avocado exec-path" which hasn't
        # access to an installed location for the libexec scripts
775
        os.environ['PATH'] += ":" + os.path.join(BASEDIR, 'libexec')
776 777
        cmd_line = ('%s --show=test run --job-results-dir %s --sysinfo=off '
                    'examples/tests/simplewarning.sh'
778
                    % (AVOCADO, self.tmpdir))
779
        result = process.run(cmd_line, ignore_status=True)
780 781 782 783
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %s:\n%s" %
                         (expected_rc, result))
784 785 786 787 788 789
        self.assertIn(b'DEBUG| Debug message', result.stdout, result)
        self.assertIn(b'INFO | Info message', result.stdout, result)
        self.assertIn(b'WARN | Warning message (should cause this test to '
                      b'finish with warning)', result.stdout, result)
        self.assertIn(b'ERROR| Error message (ordinary message not changing '
                      b'the results)', result.stdout, result)
790 791
        self.assertIn(b'Test passed but there were warnings', result.stdout,
                      result)
792

793 794
    @unittest.skipIf(not GNU_ECHO_BINARY, "Uses echo as test")
    def test_fs_unfriendly_run(self):
795
        os.chdir(BASEDIR)
796 797 798 799 800 801 802 803 804
        commands_path = os.path.join(self.tmpdir, "commands")
        script.make_script(commands_path, "echo '\"\\/|?*<>'")
        config_path = os.path.join(self.tmpdir, "config.conf")
        script.make_script(config_path,
                           "[sysinfo.collectibles]\ncommands = %s"
                           % commands_path)
        cmd_line = ("%s --show all --config %s run --job-results-dir %s "
                    "--sysinfo=on --external-runner %s -- \"'\\\"\\/|?*<>'\""
                    % (AVOCADO, config_path, self.tmpdir, GNU_ECHO_BINARY))
805
        process.run(cmd_line)
806 807 808 809 810 811 812
        self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "latest",
                                                    "test-results",
                                                    "1-\'________\'/")))
        self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "latest",
                                                    "sysinfo", "pre",
                                                    "echo \'________\'")))

813
        if python_module_available('avocado-framework-plugin-result-html'):
814 815 816 817
            with open(os.path.join(self.tmpdir, "latest",
                                   "results.html")) as html_res:
                html_results = html_res.read()
            # test results should replace odd chars with "_"
818 819 820 821
            # HTML could contain either the literal char, or an entity reference
            test1_href = (os.path.join("test-results",
                                       "1-'________'") in html_results or
                          os.path.join("test-results",
822
                                       "1-&#39;________&#39;") in html_results)
823
            self.assertTrue(test1_href)
824
            # sysinfo replaces "_" with " "
825
            sysinfo = ("echo '________'" in html_results or
826
                       "echo &#39;________&#39;" in html_results)
827
            self.assertTrue(sysinfo)
828

829
    def test_non_absolute_path(self):
830
        avocado_path = os.path.join(BASEDIR, 'scripts', 'avocado')
831 832
        test_base_dir = os.path.dirname(self.pass_script.path)
        os.chdir(test_base_dir)
833
        test_file_name = os.path.basename(self.pass_script.path)
834 835 836
        cmd_line = ('%s %s run --job-results-dir %s --sysinfo=off'
                    ' "%s"' % (sys.executable, avocado_path, self.tmpdir,
                               test_file_name))
837 838 839 840 841 842
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

A
Amador Pahim 已提交
843
    @unittest.skipIf(not SLEEP_BINARY, 'sleep binary not available')
844
    @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
845 846
                     "Skipping test that take a long time to run, are "
                     "resource intensive or time sensitve")
847
    def test_kill_stopped_sleep(self):
848 849 850 851
        proc = aexpect.Expect("%s run 60 --job-results-dir %s "
                              "--external-runner %s --sysinfo=off "
                              "--job-timeout 3"
                              % (AVOCADO, self.tmpdir, SLEEP_BINARY))
L
Lukáš Doktor 已提交
852
        proc.read_until_output_matches([r"\(1/1\)"], timeout=3,
853
                                       internal_timeout=0.01)
854 855 856 857
        # We need pid of the avocado process, not the shell executing it
        avocado_shell = psutil.Process(proc.get_pid())
        avocado_proc = avocado_shell.children()[0]
        pid = avocado_proc.pid
858
        os.kill(pid, signal.SIGTSTP)   # This freezes the process
L
Lukáš Doktor 已提交
859 860 861
        # The deadline is 3s timeout + 10s test postprocess before kill +
        # 10s reserve for additional steps (still below 60s)
        deadline = time.time() + 20
862 863 864
        while time.time() < deadline:
            if not proc.is_alive():
                break
865
            time.sleep(0.1)
866 867
        else:
            proc.kill(signal.SIGKILL)
L
Lukáš Doktor 已提交
868 869
            self.fail("Avocado process still alive 17s after "
                      "job-timeout:\n%s" % proc.get_output())
870 871 872 873 874 875
        output = proc.get_output()
        self.assertIn("ctrl+z pressed, stopping test", output, "SIGTSTP "
                      "message not in the output, test was probably not "
                      "stopped.")
        self.assertIn("TIME", output, "TIME not in the output, avocado "
                      "probably died unexpectadly")
876
        self.assertEqual(proc.get_status(), 8, "Avocado did not finish with "
877
                         "1.")
878 879

        sleep_dir = astring.string_to_safe_path("1-60")
880 881 882 883
        debug_log_path = os.path.join(self.tmpdir, "latest", "test-results",
                                      sleep_dir, "debug.log")

        debug_log = genio.read_file(debug_log_path)
884 885 886 887 888 889 890
        self.assertIn("Runner error occurred: Timeout reached", debug_log,
                      "Runner error occurred: Timeout reached message not "
                      "in the test's debug.log:\n%s" % debug_log)
        self.assertNotIn("Traceback (most recent", debug_log, "Traceback "
                         "present in the test's debug.log file, but it was "
                         "suppose to be stopped and unable to produce it.\n"
                         "%s" % debug_log)
891

892
    def tearDown(self):
893 894
        self.pass_script.remove()
        self.fail_script.remove()
895
        shutil.rmtree(self.tmpdir)
896 897


A
Amador Pahim 已提交
898 899 900
class RunnerSimpleTestStatus(unittest.TestCase):

    def setUp(self):
901 902
        prefix = temp_dir_prefix(__name__, self, 'setUp')
        self.tmpdir = tempfile.mkdtemp(prefix=prefix)
A
Amador Pahim 已提交
903 904 905 906

        self.config_file = script.TemporaryScript('avocado.conf',
                                                  "[simpletests.status]\n"
                                                  "warn_regex = ^WARN$\n"
907 908
                                                  "skip_regex = ^SKIP$\n"
                                                  "skip_location = stdout\n")
A
Amador Pahim 已提交
909
        self.config_file.save()
910
        os.chdir(BASEDIR)
A
Amador Pahim 已提交
911 912

    def test_simpletest_status(self):
913
        # Multi-line warning in STDERR should by default be handled
A
Amador Pahim 已提交
914
        warn_script = script.TemporaryScript('avocado_warn.sh',
915
                                             '#!/bin/sh\n'
916
                                             '>&2 echo -e "\\n\\nWARN\\n"',
A
Amador Pahim 已提交
917 918 919 920 921 922
                                             'avocado_simpletest_'
                                             'functional')
        warn_script.save()
        cmd_line = ('%s --config %s run --job-results-dir %s --sysinfo=off'
                    ' %s --json -' % (AVOCADO, self.config_file.path,
                                      self.tmpdir, warn_script.path))
923 924
        result = process.run(cmd_line, ignore_status=True)
        json_results = json.loads(result.stdout_text)
925
        self.assertEqual(json_results['tests'][0]['status'], 'WARN')
A
Amador Pahim 已提交
926
        warn_script.remove()
927
        # Skip in STDOUT should be handled because of config
A
Amador Pahim 已提交
928 929 930 931 932 933 934 935
        skip_script = script.TemporaryScript('avocado_skip.sh',
                                             "#!/bin/sh\necho SKIP",
                                             'avocado_simpletest_'
                                             'functional')
        skip_script.save()
        cmd_line = ('%s --config %s run --job-results-dir %s --sysinfo=off'
                    ' %s --json -' % (AVOCADO, self.config_file.path,
                                      self.tmpdir, skip_script.path))
936 937
        result = process.run(cmd_line, ignore_status=True)
        json_results = json.loads(result.stdout_text)
938
        self.assertEqual(json_results['tests'][0]['status'], 'SKIP')
A
Amador Pahim 已提交
939
        skip_script.remove()
940 941 942 943 944 945 946 947 948 949 950
        # STDERR skip should not be handled
        skip2_script = script.TemporaryScript('avocado_skip.sh',
                                              "#!/bin/sh\n>&2 echo SKIP",
                                              'avocado_simpletest_'
                                              'functional')
        skip2_script.save()
        cmd_line = ('%s --config %s run --job-results-dir %s --sysinfo=off'
                    ' %s --json -' % (AVOCADO, self.config_file.path,
                                      self.tmpdir, skip2_script.path))
        result = process.run(cmd_line, ignore_status=True)
        json_results = json.loads(result.stdout_text)
951
        self.assertEqual(json_results['tests'][0]['status'], 'PASS')
952
        skip2_script.remove()
A
Amador Pahim 已提交
953 954 955 956 957 958

    def tearDown(self):
        self.config_file.remove()
        shutil.rmtree(self.tmpdir)


959
class ExternalRunnerTest(unittest.TestCase):
C
Cleber Rosa 已提交
960 961

    def setUp(self):
962 963
        prefix = temp_dir_prefix(__name__, self, 'setUp')
        self.tmpdir = tempfile.mkdtemp(prefix=prefix)
C
Cleber Rosa 已提交
964 965
        self.pass_script = script.TemporaryScript(
            'pass',
966
            "exit 0",
967
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
968 969 970
        self.pass_script.save()
        self.fail_script = script.TemporaryScript(
            'fail',
971
            "exit 1",
972
            'avocado_externalrunner_functional')
C
Cleber Rosa 已提交
973
        self.fail_script.save()
974
        os.chdir(BASEDIR)
C
Cleber Rosa 已提交
975

976
    def test_externalrunner_pass(self):
977 978 979
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh %s'
                    % (AVOCADO, self.tmpdir, self.pass_script.path))
C
Cleber Rosa 已提交
980
        result = process.run(cmd_line, ignore_status=True)
981
        expected_rc = exit_codes.AVOCADO_ALL_OK
C
Cleber Rosa 已提交
982 983 984 985
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

986
    def test_externalrunner_fail(self):
987 988 989
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh %s'
                    % (AVOCADO, self.tmpdir, self.fail_script.path))
C
Cleber Rosa 已提交
990
        result = process.run(cmd_line, ignore_status=True)
991
        expected_rc = exit_codes.AVOCADO_TESTS_FAIL
C
Cleber Rosa 已提交
992 993 994 995
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

996
    def test_externalrunner_chdir_no_testdir(self):
997 998 999
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=/bin/sh --external-runner-chdir=test %s'
                    % (AVOCADO, self.tmpdir, self.pass_script.path))
C
Cleber Rosa 已提交
1000
        result = process.run(cmd_line, ignore_status=True)
1001 1002
        expected_output = (b'Option "--external-runner-chdir=test" requires '
                           b'"--external-runner-testdir" to be set')
C
Cleber Rosa 已提交
1003
        self.assertIn(expected_output, result.stderr)
1004
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
1005 1006 1007 1008
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
    def test_externalrunner_chdir_runner_relative(self):
        avocado_abs = " ".join([os.path.abspath(_) for _ in AVOCADO.split(" ")])
        pass_abs = os.path.abspath(self.pass_script.path)
        os.chdir('/')
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=bin/sh --external-runner-chdir=runner -- %s'
                    % (avocado_abs, self.tmpdir, pass_abs))
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

1022
    def test_externalrunner_no_url(self):
1023 1024
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
                    '--external-runner=%s' % (AVOCADO, self.tmpdir, TRUE_CMD))
1025
        result = process.run(cmd_line, ignore_status=True)
1026 1027
        expected_output = (b'No test references provided nor any other '
                           b'arguments resolved into tests')
1028 1029
        self.assertIn(expected_output, result.stderr)
        expected_rc = exit_codes.AVOCADO_JOB_FAIL
C
Cleber Rosa 已提交
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))

    def tearDown(self):
        self.pass_script.remove()
        self.fail_script.remove()
        shutil.rmtree(self.tmpdir)


1040
class AbsPluginsTest:
1041

1042
    def setUp(self):
1043 1044
        prefix = temp_dir_prefix(__name__, self, 'setUp')
        self.base_outputdir = tempfile.mkdtemp(prefix=prefix)
1045
        os.chdir(BASEDIR)
1046

1047 1048 1049 1050 1051 1052
    def tearDown(self):
        shutil.rmtree(self.base_outputdir)


class PluginsTest(AbsPluginsTest, unittest.TestCase):

1053
    def test_sysinfo_plugin(self):
1054
        cmd_line = '%s sysinfo %s' % (AVOCADO, self.base_outputdir)
1055
        result = process.run(cmd_line, ignore_status=True)
1056
        expected_rc = exit_codes.AVOCADO_ALL_OK
1057 1058 1059 1060 1061 1062
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        sysinfo_files = os.listdir(self.base_outputdir)
        self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")

1063
    def test_list_plugin(self):
1064
        cmd_line = '%s list' % AVOCADO
1065
        result = process.run(cmd_line, ignore_status=True)
1066
        expected_rc = exit_codes.AVOCADO_ALL_OK
1067 1068 1069
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1070 1071
        self.assertNotIn(b'No tests were found on current tests dir',
                         result.stdout)
1072

1073
    def test_list_error_output(self):
1074
        cmd_line = '%s list sbrubles' % AVOCADO
1075
        result = process.run(cmd_line, ignore_status=True)
1076
        expected_rc = exit_codes.AVOCADO_FAIL
1077 1078 1079
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1080
        self.assertIn(b"Unable to resolve reference", result.stderr)
1081

1082 1083 1084 1085 1086 1087 1088
    def test_list_no_file_loader(self):
        cmd_line = ("%s list --loaders external --verbose -- "
                    "this-wont-be-matched" % AVOCADO)
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
                         "Avocado did not return rc %d:\n%s"
                         % (exit_codes.AVOCADO_ALL_OK, result))
1089
        exp = (b"Type    Test                 Tag(s)\n"
1090
               b"MISSING this-wont-be-matched\n\n"
1091 1092 1093 1094
               b"TEST TYPES SUMMARY\n"
               b"==================\n"
               b"EXTERNAL: 0\n"
               b"MISSING: 1\n")
1095 1096 1097
        self.assertEqual(exp, result.stdout, "Stdout mismatch:\n%s\n\n%s"
                         % (exp, result))

1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
    def test_list_verbose_tags(self):
        """
        Runs list verbosely and check for tag related output
        """
        test = script.make_script(os.path.join(self.base_outputdir, 'test.py'),
                                  VALID_PYTHON_TEST_WITH_TAGS)
        cmd_line = ("%s list --loaders file --verbose %s" % (AVOCADO,
                                                             test))
        result = process.run(cmd_line, ignore_status=True)
        self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
                         "Avocado did not return rc %d:\n%s"
                         % (exit_codes.AVOCADO_ALL_OK, result))
1110
        stdout_lines = result.stdout_text.splitlines()
1111 1112
        self.assertIn("Tag(s)", stdout_lines[0])
        full_test_name = "%s:MyTest.test" % test
1113 1114
        self.assertEqual("INSTRUMENTED %s BIG_TAG_NAME" % full_test_name,
                         stdout_lines[1])
1115 1116 1117
        self.assertIn("TEST TYPES SUMMARY", stdout_lines)
        self.assertIn("INSTRUMENTED: 1", stdout_lines)
        self.assertIn("TEST TAGS SUMMARY", stdout_lines)
1118
        self.assertEqual("BIG_TAG_NAME: 1", stdout_lines[-1])
1119

1120
    def test_plugin_list(self):
1121
        cmd_line = '%s plugins' % AVOCADO
1122
        result = process.run(cmd_line, ignore_status=True)
1123
        expected_rc = exit_codes.AVOCADO_ALL_OK
1124 1125 1126
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1127
        self.assertNotIn(b'Disabled', result.stdout)
1128

1129
    def test_config_plugin(self):
1130
        cmd_line = '%s config --paginator off' % AVOCADO
1131
        result = process.run(cmd_line, ignore_status=True)
1132
        expected_rc = exit_codes.AVOCADO_ALL_OK
1133 1134 1135
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1136
        self.assertNotIn(b'Disabled', result.stdout)
1137 1138

    def test_config_plugin_datadir(self):
1139
        cmd_line = '%s config --datadir --paginator off' % AVOCADO
1140
        result = process.run(cmd_line, ignore_status=True)
1141
        expected_rc = exit_codes.AVOCADO_ALL_OK
1142 1143 1144
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1145
        self.assertNotIn(b'Disabled', result.stdout)
1146

1147
    def test_disable_plugin(self):
1148
        cmd_line = '%s plugins' % AVOCADO
1149 1150 1151 1152 1153
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1154
        self.assertIn(b"Collect system information", result.stdout)
1155 1156 1157 1158 1159

        config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
        config = script.TemporaryScript("disable_sysinfo_cmd.conf",
                                        config_content)
        with config:
1160
            cmd_line = '%s --config %s plugins' % (AVOCADO, config)
1161 1162 1163 1164 1165
            result = process.run(cmd_line, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))
1166
            self.assertNotIn(b"Collect system information", result.stdout)
1167

1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
    def test_plugin_order(self):
        """
        Tests plugin order by configuration file

        First it checks if html, json, xunit and zip_archive plugins are enabled.
        Then it runs a test with zip_archive running first, which means the html,
        json and xunit output files do not make into the archive.

        Then it runs with zip_archive set to run last, which means the html,
        json and xunit output files *do* make into the archive.
        """
        def run_config(config_path):
1180
            cmd = ('%s --config %s run passtest.py --archive '
1181
                   '--job-results-dir %s --sysinfo=off'
1182
                   % (AVOCADO, config_path, self.base_outputdir))
1183 1184 1185 1186 1187 1188 1189 1190
            result = process.run(cmd, ignore_status=True)
            expected_rc = exit_codes.AVOCADO_ALL_OK
            self.assertEqual(result.exit_status, expected_rc,
                             "Avocado did not return rc %d:\n%s" %
                             (expected_rc, result))

        result_plugins = ["json", "xunit", "zip_archive"]
        result_outputs = ["results.json", "results.xml"]
1191
        if python_module_available('avocado-framework-plugin-result-html'):
1192
            result_plugins.append("html")
1193
            result_outputs.append("results.html")
1194

1195
        cmd_line = '%s plugins' % AVOCADO
1196 1197 1198 1199 1200 1201
        result = process.run(cmd_line, ignore_status=True)
        expected_rc = exit_codes.AVOCADO_ALL_OK
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
        for result_plugin in result_plugins:
1202
            self.assertIn(result_plugin, result.stdout_text)
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217

        config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
        config_zip_first = script.TemporaryScript("zip_first.conf",
                                                  config_content_zip_first)
        with config_zip_first:
            run_config(config_zip_first)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertNotIn(result_output, zip_file_list)
            os.unlink(archives[0])

        config_content_zip_last = ("[plugins.result]\norder=['html', 'json',"
1218 1219
                                   "'xunit', 'non_existing_plugin_is_ignored'"
                                   ",'zip_archive']")
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
        config_zip_last = script.TemporaryScript("zip_last.conf",
                                                 config_content_zip_last)
        with config_zip_last:
            run_config(config_zip_last)
            archives = glob.glob(os.path.join(self.base_outputdir, '*.zip'))
            self.assertEqual(len(archives), 1, "ZIP Archive not generated")
            zip_file = zipfile.ZipFile(archives[0], 'r')
            zip_file_list = zip_file.namelist()
            for result_output in result_outputs:
                self.assertIn(result_output, zip_file_list)

1231
    def test_Namespace_object_has_no_attribute(self):
1232
        cmd_line = '%s plugins' % AVOCADO
1233
        result = process.run(cmd_line, ignore_status=True)
1234
        expected_rc = exit_codes.AVOCADO_ALL_OK
1235 1236 1237
        self.assertEqual(result.exit_status, expected_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (expected_rc, result))
1238
        self.assertNotIn(b"'Namespace' object has no attribute", result.stderr)
1239

1240

1241 1242 1243 1244
class ParseXMLError(Exception):
    pass


1245
class PluginsXunitTest(AbsPluginsTest, unittest.TestCase):
1246

1247
    @unittest.skipUnless(SCHEMA_CAPABLE,
1248
                         'Unable to validate schema due to missing xmlschema library')
1249
    def setUp(self):
1250 1251
        prefix = temp_dir_prefix(__name__, self, 'setUp')
        self.tmpdir = tempfile.mkdtemp(prefix=prefix)
L
Lucas Meneghel Rodrigues 已提交
1252
        junit_xsd = os.path.join(os.path.dirname(__file__),
1253
                                 os.path.pardir, ".data", 'jenkins-junit.xsd')
1254
        self.xml_schema = xmlschema.XMLSchema(junit_xsd)
1255 1256
        super(PluginsXunitTest, self).setUp()

1257
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
C
Caio Carrara 已提交
1258
                      e_nnotfound, e_nfailures, e_nskip):  # pylint: disable=W0613
1259 1260
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off'
                    ' --xunit - %s' % (AVOCADO, self.tmpdir, testname))
1261 1262 1263 1264 1265 1266 1267
        result = process.run(cmd_line, ignore_status=True)
        xml_output = result.stdout
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            xunit_doc = xml.dom.minidom.parseString(xml_output)
1268
        except Exception as detail:
1269 1270 1271
            raise ParseXMLError("Failed to parse content: %s\n%s" %
                                (detail, xml_output))

1272
        # pylint: disable=I1101
1273 1274
        xunit_file_output = os.path.join(self.tmpdir, 'latest', 'results.xml')
        self.assertTrue(self.xml_schema.is_valid(xunit_file_output))
1275

1276 1277 1278 1279
        testsuite_list = xunit_doc.getElementsByTagName('testsuite')
        self.assertEqual(len(testsuite_list), 1, 'More than one testsuite tag')

        testsuite_tag = testsuite_list[0]
1280 1281
        self.assertEqual(len(testsuite_tag.attributes), 7,
                         'The testsuite tag does not have 7 attributes. '
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
                         'XML:\n%s' % xml_output)

        n_tests = int(testsuite_tag.attributes['tests'].value)
        self.assertEqual(n_tests, e_ntests,
                         "Unexpected number of executed tests, "
                         "XML:\n%s" % xml_output)

        n_errors = int(testsuite_tag.attributes['errors'].value)
        self.assertEqual(n_errors, e_nerrors,
                         "Unexpected number of test errors, "
                         "XML:\n%s" % xml_output)

        n_failures = int(testsuite_tag.attributes['failures'].value)
        self.assertEqual(n_failures, e_nfailures,
                         "Unexpected number of test failures, "
                         "XML:\n%s" % xml_output)

1299
        n_skip = int(testsuite_tag.attributes['skipped'].value)
1300 1301 1302 1303
        self.assertEqual(n_skip, e_nskip,
                         "Unexpected number of test skips, "
                         "XML:\n%s" % xml_output)

1304
    def test_xunit_plugin_passtest(self):
1305
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1306
                           1, 0, 0, 0, 0)
1307 1308

    def test_xunit_plugin_failtest(self):
1309
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1310
                           1, 0, 0, 1, 0)
1311

1312
    def test_xunit_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1313
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1314
                           1, 0, 0, 0, 1)
1315

1316
    def test_xunit_plugin_errortest(self):
1317
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1318
                           1, 1, 0, 0, 0)
1319

1320 1321 1322 1323
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsXunitTest, self).tearDown()

1324 1325 1326 1327 1328

class ParseJSONError(Exception):
    pass


1329
class PluginsJSONTest(AbsPluginsTest, unittest.TestCase):
1330

1331
    def setUp(self):
1332 1333
        prefix = temp_dir_prefix(__name__, self, 'setUp')
        self.tmpdir = tempfile.mkdtemp(prefix=prefix)
1334 1335
        super(PluginsJSONTest, self).setUp()

1336
    def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
1337
                      e_nfailures, e_nskip, e_ncancel=0, external_runner=None):
1338 1339
        cmd_line = ('%s run --job-results-dir %s --sysinfo=off --json - '
                    '--archive %s' % (AVOCADO, self.tmpdir, testname))
1340 1341
        if external_runner is not None:
            cmd_line += " --external-runner '%s'" % external_runner
1342
        result = process.run(cmd_line, ignore_status=True)
1343
        json_output = result.stdout_text
1344 1345 1346 1347 1348
        self.assertEqual(result.exit_status, e_rc,
                         "Avocado did not return rc %d:\n%s" %
                         (e_rc, result))
        try:
            json_data = json.loads(json_output)
1349
        except Exception as detail:
1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
            raise ParseJSONError("Failed to parse content: %s\n%s" %
                                 (detail, json_output))
        self.assertTrue(json_data, "Empty JSON result:\n%s" % json_output)
        self.assertIsInstance(json_data['tests'], list,
                              "JSON result lacks 'tests' list")
        n_tests = len(json_data['tests'])
        self.assertEqual(n_tests, e_ntests,
                         "Different number of expected tests")
        n_errors = json_data['errors']
        self.assertEqual(n_errors, e_nerrors,
                         "Different number of expected tests")
        n_failures = json_data['failures']
        self.assertEqual(n_failures, e_nfailures,
                         "Different number of expected tests")
        n_skip = json_data['skip']
        self.assertEqual(n_skip, e_nskip,
                         "Different number of skipped tests")
1367 1368
        n_cancel = json_data['cancel']
        self.assertEqual(n_cancel, e_ncancel)
1369
        return json_data
1370

1371
    def test_json_plugin_passtest(self):
1372
        self.run_and_check('passtest.py', exit_codes.AVOCADO_ALL_OK,
1373
                           1, 0, 0, 0)
1374 1375

    def test_json_plugin_failtest(self):
1376
        self.run_and_check('failtest.py', exit_codes.AVOCADO_TESTS_FAIL,
1377
                           1, 0, 1, 0)
1378

1379
    def test_json_plugin_skiponsetuptest(self):
A
Amador Pahim 已提交
1380
        self.run_and_check('cancelonsetup.py', exit_codes.AVOCADO_ALL_OK,
1381
                           1, 0, 0, 0, 1)
1382

1383
    def test_json_plugin_errortest(self):
1384
        self.run_and_check('errortest.py', exit_codes.AVOCADO_TESTS_FAIL,
1385
                           1, 1, 0, 0)
1386

1387
    @unittest.skipIf(not GNU_ECHO_BINARY, 'echo binary not available')
1388
    def test_ugly_echo_cmd(self):
1389
        data = self.run_and_check('"-ne foo\\\\\\n\\\'\\\\\\"\\\\\\'
1390
                                  'nbar/baz"', exit_codes.AVOCADO_ALL_OK, 1, 0,
1391
                                  0, 0, external_runner=GNU_ECHO_BINARY)
1392
        # The executed test should be this
1393
        self.assertEqual(data['tests'][0]['id'],
1394
                         '1--ne foo\\\\n\\\'\\"\\\\nbar/baz')
1395 1396
        # logdir name should escape special chars (/)
        self.assertEqual(os.path.basename(data['tests'][0]['logdir']),
1397
                         "1--ne foo__n_'____nbar_baz")
1398

1399 1400 1401 1402
    def tearDown(self):
        shutil.rmtree(self.tmpdir)
        super(PluginsJSONTest, self).tearDown()

L
Lukáš Doktor 已提交
1403

1404 1405
if __name__ == '__main__':
    unittest.main()