test.py 22.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; specifically version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
11 12 13
# This code was inspired in the autotest project,
# client/shared/test.py
# Authors: Martin J Bligh <mbligh@google.com>, Andy Whitcroft <apw@shadowen.org>
14
import re
15

L
Lucas Meneghel Rodrigues 已提交
16 17 18 19 20
"""
Contains the base test implementation, used as a base for the actual
framework tests.
"""

21
import inspect
L
Lucas Meneghel Rodrigues 已提交
22
import logging
23
import os
24
import pipes
25
import shutil
26 27
import sys
import time
28 29 30 31 32

if sys.version_info[:2] == (2, 6):
    import unittest2 as unittest
else:
    import unittest
33

34 35
from avocado import multiplexer
from avocado.core import sysinfo
36
from avocado.core import data_dir
L
Lucas Meneghel Rodrigues 已提交
37
from avocado.core import exceptions
38
from avocado.utils import genio
39
from avocado.utils import path as utils_path
40
from avocado.utils import process
41
from avocado.utils import stacktrace
42
from avocado.version import VERSION
L
Lucas Meneghel Rodrigues 已提交
43

44

45
class Test(unittest.TestCase):
46 47

    """
L
Lucas Meneghel Rodrigues 已提交
48 49
    Base implementation for the test class.

L
Lucas Meneghel Rodrigues 已提交
50
    You'll inherit from this to write your own tests. Typically you'll want
51
    to implement setUp(), runTest() and tearDown() methods on your own tests.
52
    """
53
    default_params = {}
54

55
    def __init__(self, methodName='runTest', name=None, params=None,
56
                 base_logdir=None, tag=None, job=None, runner_queue=None):
L
Lucas Meneghel Rodrigues 已提交
57 58 59
        """
        Initializes the test.

60 61 62
        :param methodName: Name of the main method to run. For the sake of
                           compatibility with the original unittest class,
                           you should not set this.
63 64 65
        :param name: Pretty name of the test name. For normal tests, written
                     with the avocado API, this should not be set, this is
                     reserved for running random executables as tests.
66
        :param base_logdir: Directory where test logs should go. If None
67
                            provided, it'll use
68
                            :func:`avocado.core.data_dir.create_job_logs_dir`.
L
Lucas Meneghel Rodrigues 已提交
69
        :param tag: Tag that differentiates 2 executions of the same test name.
70 71
                    Example: 'long', 'short', so we can differentiate
                    'sleeptest.long' and 'sleeptest.short'.
72
        :param job: The job that this test is part of.
L
Lucas Meneghel Rodrigues 已提交
73
        """
74 75
        def record_and_warn(*args, **kwargs):
            """ Record call to this function and log warning """
76 77
            if not self.__log_warn_used:
                self.__log_warn_used = True
78 79
            return original_log_warn(*args, **kwargs)

80 81 82 83
        if name is not None:
            self.name = name
        else:
            self.name = self.__class__.__name__
84

85
        self.tag = tag or None
86

87
        self.job = job
88 89 90

        basename = os.path.basename(self.name)

91
        tmpdir = data_dir.get_tmp_dir()
92

93 94
        self.filename = inspect.getfile(self.__class__).rstrip('co')
        self.basedir = os.path.dirname(self.filename)
95
        self.datadir = self.filename + '.data'
96 97 98 99 100 101

        self.expected_stdout_file = os.path.join(self.datadir,
                                                 'stdout.expected')
        self.expected_stderr_file = os.path.join(self.datadir,
                                                 'stderr.expected')

102 103
        self.workdir = utils_path.init_dir(tmpdir, basename)
        self.srcdir = utils_path.init_dir(self.workdir, 'src')
104
        if base_logdir is None:
105
            base_logdir = data_dir.create_job_logs_dir()
106
        base_logdir = os.path.join(base_logdir, 'test-results')
107
        self.tagged_name = self.get_tagged_name(base_logdir)
108

109 110 111 112 113 114
        # Let's avoid trouble at logdir init time, since we're interested
        # in a relative directory here
        tagged_name = self.tagged_name
        if tagged_name.startswith('/'):
            tagged_name = tagged_name[1:]

115
        self.logdir = utils_path.init_dir(base_logdir, tagged_name)
116
        genio.set_log_file_dir(self.logdir)
117
        self.logfile = os.path.join(self.logdir, 'debug.log')
118

119 120
        self.stdout_file = os.path.join(self.logdir, 'stdout')
        self.stderr_file = os.path.join(self.logdir, 'stderr')
121

122 123
        self.outputdir = utils_path.init_dir(self.logdir, 'data')
        self.sysinfodir = utils_path.init_dir(self.logdir, 'sysinfo')
124
        self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfodir)
125 126

        self.log = logging.getLogger("avocado.test")
127 128 129
        original_log_warn = self.log.warning
        self.__log_warn_used = False
        self.log.warn = self.log.warning = record_and_warn
130

131 132 133
        self.stdout_log = logging.getLogger("avocado.test.stdout")
        self.stderr_log = logging.getLogger("avocado.test.stderr")

134
        mux_entry = ['/test/*']
135 136 137 138 139 140
        if isinstance(params, dict):
            self.default_params = self.default_params.copy()
            self.default_params.update(params)
            params = []
        elif params is None:
            params = []
141 142
        elif isinstance(params, tuple):
            params, mux_entry = params[0], params[1]
143
        self.params = multiplexer.AvocadoParams(params, self.name, self.tag,
144
                                                mux_entry,
145
                                                self.default_params)
146

147
        self.log.info('START %s', self.tagged_name)
148 149
        self.log.debug('')

150 151
        self.debugdir = None
        self.resultsdir = None
152
        self.status = None
153
        self.fail_reason = None
154
        self.fail_class = None
155
        self.traceback = None
156
        self.text_output = None
157

158
        self.whiteboard = ''
C
Cleber Rosa 已提交
159

160 161 162
        self.running = False
        self.time_start = None
        self.time_end = None
163 164
        self.paused = False
        self.paused_msg = ''
165

166 167
        self.runner_queue = runner_queue

168
        self.time_elapsed = None
169
        unittest.TestCase.__init__(self, methodName=methodName)
170 171 172 173 174 175

    def __str__(self):
        return str(self.name)

    def __repr__(self):
        return "Test(%r)" % self.tagged_name
176

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
    def tag_start(self):
        self.running = True
        self.time_start = time.time()

    def tag_end(self):
        self.running = False
        self.time_end = time.time()
        # for consistency sake, always use the same stupid method
        self.update_time_elapsed(self.time_end)

    def update_time_elapsed(self, current_time=None):
        if current_time is None:
            current_time = time.time()
        self.time_elapsed = current_time - self.time_start

192
    def report_state(self):
193 194 195 196 197 198
        """
        Send the current test state to the test runner process
        """
        if self.runner_queue is not None:
            self.runner_queue.put(self.get_state())

199
    def get_state(self):
200
        """
201
        Serialize selected attributes representing the test state
202

203 204
        :returns: a dictionary containing relevant test state data
        :rtype: dict
205
        """
206 207
        if self.running and self.time_start:
            self.update_time_elapsed()
208
        preserve_attr = ['basedir', 'debugdir', 'depsdir',
209 210 211
                         'fail_reason', 'logdir', 'logfile', 'name',
                         'resultsdir', 'srcdir', 'status', 'sysinfodir',
                         'tag', 'tagged_name', 'text_output', 'time_elapsed',
212
                         'traceback', 'workdir', 'whiteboard', 'time_start',
213
                         'time_end', 'running', 'paused', 'paused_msg',
214
                         'fail_class', 'params']
215
        state = dict([(key, self.__dict__.get(key)) for key in preserve_attr])
216 217 218 219
        state['class_name'] = self.__class__.__name__
        state['job_logdir'] = self.job.logdir
        state['job_unique_id'] = self.job.unique_id
        return state
220

221
    def get_data_path(self, basename):
222
        """
223 224 225 226
        Find a test dependency path inside the test data dir.

        This is a short hand for an operation that will be commonly
        used on avocado tests, so we feel it deserves its own API.
227 228 229 230 231

        :param basename: Basename of the dep file. Ex: ``testsuite.tar.bz2``.

        :return: Path where dependency is supposed to be found.
        """
232
        return os.path.join(self.datadir, basename)
233

234 235 236 237 238 239 240 241
    def _register_log_file_handler(self, logger, formatter, filename,
                                   log_level=logging.DEBUG):
        file_handler = logging.FileHandler(filename=filename)
        file_handler.setLevel(log_level)
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
        return file_handler

242 243 244 245 246 247 248 249 250 251 252 253 254
    def start_logging(self):
        """
        Simple helper for adding a file logger to the root logger.
        """
        self.file_handler = logging.FileHandler(filename=self.logfile)
        self.file_handler.setLevel(logging.DEBUG)

        fmt = '%(asctime)s %(levelname)-5.5s| %(message)s'
        formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')

        self.file_handler.setFormatter(formatter)
        self.log.addHandler(self.file_handler)

255 256 257
        stream_fmt = '%(message)s'
        stream_formatter = logging.Formatter(fmt=stream_fmt)

258 259 260 261
        self.stdout_file_handler = self._register_log_file_handler(self.stdout_log, stream_formatter,
                                                                   self.stdout_file)
        self.stderr_file_handler = self._register_log_file_handler(self.stderr_log, stream_formatter,
                                                                   self.stderr_file)
262

263
    def stop_logging(self):
264 265 266
        """
        Stop the logging activity of the test by cleaning the logger handlers.
        """
267 268
        self.log.removeHandler(self.file_handler)

269
    def get_tagged_name(self, logdir):
270 271 272
        """
        Get a test tagged name.

273 274 275
        Combines name + tag (if present) to obtain unique name. When associated
        directory already exists, appends ".$number" until unused name
        is generated to avoid clashes.
276 277 278

        :param logdir: Log directory being in use for result storage.

279
        :return: Unique test name
280
        """
281
        name = self.name
282
        if self.tag is not None:
283
            name += ".%s" % self.tag
284
        tag = 0
285 286
        tagged_name = name
        while os.path.isdir(os.path.join(logdir, tagged_name)):
287
            tag += 1
288 289
            tagged_name = "%s.%s" % (name, tag)
        self.tag = "%s.%s" % (self.tag, tag) if self.tag else str(tag)
290

291 292
        return tagged_name

293
    def setUp(self):
L
Lucas Meneghel Rodrigues 已提交
294
        """
295
        Setup stage that the test needs before passing to the actual runTest.
L
Lucas Meneghel Rodrigues 已提交
296 297 298 299 300

        Must be implemented by tests if they want such an stage. Commonly we'll
        download/compile test suites, create files needed for a test, among
        other possibilities.
        """
301 302
        pass

303
    def runTest(self):
L
Lucas Meneghel Rodrigues 已提交
304 305 306 307 308 309 310 311
        """
        Actual test payload. Must be implemented by tests.

        In case of an existing test suite wrapper, it'll execute the suite,
        or perform a series of operations, and based in the results of the
        operations decide if the test pass (let the test complete) or fail
        (raise a test related exception).
        """
312
        pass
313

314
    def tearDown(self):
L
Lucas Meneghel Rodrigues 已提交
315
        """
316
        Cleanup stage after the runTest is done.
L
Lucas Meneghel Rodrigues 已提交
317

318
        Examples of cleanup runTests are deleting temporary files, restoring
L
Lucas Meneghel Rodrigues 已提交
319 320 321
        firewall configurations or other system settings that were changed
        in setup.
        """
322
        pass
323

324
    def record_reference_stdout(self):
325
        utils_path.init_dir(self.datadir)
326 327 328
        shutil.copyfile(self.stdout_file, self.expected_stdout_file)

    def record_reference_stderr(self):
329
        utils_path.init_dir(self.datadir)
330 331 332 333
        shutil.copyfile(self.stderr_file, self.expected_stderr_file)

    def check_reference_stdout(self):
        if os.path.isfile(self.expected_stdout_file):
334 335
            expected = genio.read_file(self.expected_stdout_file)
            actual = genio.read_file(self.stdout_file)
336 337 338 339 340 341
            msg = ('Actual test sdtout differs from expected one:\n'
                   'Actual:\n%s\nExpected:\n%s' % (actual, expected))
            self.assertEqual(expected, actual, msg)

    def check_reference_stderr(self):
        if os.path.isfile(self.expected_stderr_file):
342 343
            expected = genio.read_file(self.expected_stderr_file)
            actual = genio.read_file(self.stderr_file)
344 345 346 347
            msg = ('Actual test sdterr differs from expected one:\n'
                   'Actual:\n%s\nExpected:\n%s' % (actual, expected))
            self.assertEqual(expected, actual, msg)

348
    def run(self, result=None):
349 350
        """
        Run test method, for compatibility with unittest.TestCase.
351 352

        :result: Unused param, compatibiltiy with :class:`unittest.TestCase`.
353
        """
354
        testMethod = getattr(self, self._testMethodName)
355
        self.start_logging()
356
        self.sysinfo_logger.start_test_hook()
357
        runTest_exception = None
358
        cleanup_exception = None
359 360
        stdout_check_exception = None
        stderr_check_exception = None
361
        try:
362
            self.setUp()
363
        except Exception, details:
364
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
365
            raise exceptions.TestSetupFail(details)
366
        try:
367
            testMethod()
368
        except Exception, details:
369
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
370
            runTest_exception = details
371 372
        finally:
            try:
373
                self.tearDown()
374
            except Exception, details:
375
                stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
376
                cleanup_exception = details
377

378
        whiteboard_file = os.path.join(self.logdir, 'whiteboard')
379
        genio.write_file(whiteboard_file, self.whiteboard)
380

381
        if self.job is not None:
382 383 384
            job_standalone = getattr(self.job.args, 'standalone', False)
            output_check_record = getattr(self.job.args,
                                          'output_check_record', 'none')
385
            no_record_mode = (not job_standalone and
386
                              output_check_record == 'none')
387
            disable_output_check = (not job_standalone and
388
                                    getattr(self.job.args,
389
                                            'output_check', 'on') == 'off')
390 391

            if job_standalone or no_record_mode:
392 393 394 395
                if not disable_output_check:
                    try:
                        self.check_reference_stdout()
                    except Exception, details:
396
                        stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
397 398 399 400
                        stdout_check_exception = details
                    try:
                        self.check_reference_stderr()
                    except Exception, details:
401
                        stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
402
                        stderr_check_exception = details
403
            elif not job_standalone:
404
                if output_check_record in ['all', 'stdout']:
405
                    self.record_reference_stdout()
406
                if output_check_record in ['all', 'stderr']:
407 408
                    self.record_reference_stderr()

409
        # pylint: disable=E0702
410 411
        if runTest_exception is not None:
            raise runTest_exception
412 413
        elif cleanup_exception is not None:
            raise exceptions.TestSetupFail(cleanup_exception)
414 415 416 417
        elif stdout_check_exception is not None:
            raise stdout_check_exception
        elif stderr_check_exception is not None:
            raise stderr_check_exception
418 419 420 421
        elif self.__log_warn_used:
            raise exceptions.TestWarn("Test passed but there were warnings "
                                      "during execution. Check the log for "
                                      "details.")
422

423
        self.status = 'PASS'
424
        self.sysinfo_logger.end_test_hook()
425

426 427 428 429 430 431 432 433 434 435 436
    def _setup_environment_variables(self):
        os.environ['AVOCADO_VERSION'] = VERSION
        os.environ['AVOCADO_TEST_BASEDIR'] = self.basedir
        os.environ['AVOCADO_TEST_DATADIR'] = self.datadir
        os.environ['AVOCADO_TEST_WORKDIR'] = self.workdir
        os.environ['AVOCADO_TEST_SRCDIR'] = self.srcdir
        os.environ['AVOCADO_TEST_LOGDIR'] = self.logdir
        os.environ['AVOCADO_TEST_LOGFILE'] = self.logfile
        os.environ['AVOCADO_TEST_OUTPUTDIR'] = self.outputdir
        os.environ['AVOCADO_TEST_SYSINFODIR'] = self.sysinfodir

437 438
    def run_avocado(self, result=None):
        """
439
        Wraps the run method, for execution inside the avocado runner.
440

L
Lucas Meneghel Rodrigues 已提交
441
        :result: Unused param, compatibility with :class:`unittest.TestCase`.
442
        """
443
        self._setup_environment_variables()
444
        try:
445
            self.tag_start()
446
            self.run(result)
447 448
        except exceptions.TestBaseException, detail:
            self.status = detail.status
449
            self.fail_class = detail.__class__.__name__
450
            self.fail_reason = detail
451
            self.traceback = stacktrace.prepare_exc_info(sys.exc_info())
452 453
        except AssertionError, detail:
            self.status = 'FAIL'
454
            self.fail_class = detail.__class__.__name__
455
            self.fail_reason = detail
456
            self.traceback = stacktrace.prepare_exc_info(sys.exc_info())
457
        except Exception, detail:
458
            self.status = 'FAIL'
459 460
            tb_info = stacktrace.tb_info(sys.exc_info())
            self.traceback = stacktrace.prepare_exc_info(sys.exc_info())
461 462 463 464 465 466 467
            try:
                self.fail_class = str(detail.__class__.__name__)
                self.fail_reason = str(detail)
            except TypeError:
                self.fail_class = "Exception"
                self.fail_reason = ("Unable to get exception, check the "
                                    "traceback for details.")
468
            for e_line in tb_info:
469 470
                self.log.error(e_line)
        finally:
471
            self.tag_end()
472
            self.report()
473
            self.log.info("")
474 475
            with open(self.logfile, 'r') as log_file_obj:
                self.text_output = log_file_obj.read()
476 477
            self.stop_logging()

478
    def report(self):
479 480 481
        """
        Report result to the logging system.
        """
482 483 484
        if self.fail_reason is not None:
            self.log.error("%s %s -> %s: %s", self.status,
                           self.tagged_name,
485
                           self.fail_class,
486 487 488
                           self.fail_reason)

        else:
489 490
            if self.status is None:
                self.status = 'INTERRUPTED'
491 492
            self.log.info("%s %s", self.status,
                          self.tagged_name)
493 494


495
class SimpleTest(Test):
496 497 498 499 500

    """
    Run an arbitrary command that returns either 0 (PASS) or !=0 (FAIL).
    """

501 502 503
    re_avocado_log = re.compile(r'^\d\d:\d\d:\d\d DEBUG\| \[stdout\]'
                                r' \d\d:\d\d:\d\d WARN \|')

504 505 506
    def __init__(self, name, params=None, base_logdir=None, tag=None, job=None):
        self.path = os.path.abspath(name)
        super(SimpleTest, self).__init__(name=name, base_logdir=base_logdir,
507
                                         params=params, tag=tag, job=job)
508 509 510 511 512 513 514 515
        basedir = os.path.dirname(self.path)
        basename = os.path.basename(self.path)
        datadirname = basename + '.data'
        self.datadir = os.path.join(basedir, datadirname)
        self.expected_stdout_file = os.path.join(self.datadir,
                                                 'stdout.expected')
        self.expected_stderr_file = os.path.join(self.datadir,
                                                 'stderr.expected')
516

517
    def _log_detailed_cmd_info(self, result):
518 519 520 521 522
        """
        Log detailed command information.

        :param result: :class:`avocado.utils.process.CmdResult` instance.
        """
523 524
        self.log.info("Exit status: %s", result.exit_status)
        self.log.info("Duration: %s", result.duration)
525

526
    def runTest(self):
527 528 529
        """
        Run the executable, and log its detailed execution.
        """
530
        try:
531 532
            test_params = dict([(str(key), str(val)) for key, val in
                                self.params.iteritems()])
533 534 535
            # process.run uses shlex.split(), the self.path needs to be escaped
            result = process.run(pipes.quote(self.path), verbose=True,
                                 env=test_params)
536 537 538 539
            self._log_detailed_cmd_info(result)
        except exceptions.CmdError, details:
            self._log_detailed_cmd_info(details.result)
            raise exceptions.TestFail(details)
540

541 542
    def run(self, result=None):
        super(SimpleTest, self).run(result)
543 544 545 546 547 548
        for line in open(self.logfile):
            if self.re_avocado_log.match(line):
                raise exceptions.TestWarn("Test passed but there were warnings"
                                          " on stdout during execution. Check "
                                          "the log for details.")

549 550 551 552 553 554 555

class MissingTest(Test):

    """
    Handle when there is no such test module in the test directory.
    """

556
    def runTest(self):
557
        e_msg = ('Test %s could not be found in the test dir %s '
558 559 560
                 '(or test path does not exist)' %
                 (self.name, data_dir.get_test_dir()))
        raise exceptions.TestNotFoundError(e_msg)
561 562


563 564 565 566 567 568 569 570 571
class BuggyTest(Test):

    """
    Used when the python module could not be imported.

    That means it is possibly a buggy test, but it could also be a random
    buggy python module.
    """

572
    def runTest(self):
573 574 575 576 577 578 579 580 581 582 583 584 585
        # pylint: disable=E0702
        raise self.params.get('exception')


class NotATest(Test):

    """
    The file is not a test.

    Either a non executable python module with no avocado test class in it,
    or a regular, non executable file.
    """

586
    def runTest(self):
587 588 589
        e_msg = ('File %s is not executable and does not contain an avocado '
                 'test class in it ' % self.name)
        raise exceptions.NotATestError(e_msg)
590 591 592 593 594 595 596 597 598 599 600 601 602 603


class TimeOutSkipTest(Test):

    """
    Skip test due job timeout.

    This test is skipped due a job timeout.
    It will never have a chance to execute.
    """

    def runTest(self):
        e_msg = 'Test skipped due a job timeout!'
        raise exceptions.TestNAError(e_msg)