test.py 23.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; specifically version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
11 12 13
# This code was inspired in the autotest project,
# client/shared/test.py
# Authors: Martin J Bligh <mbligh@google.com>, Andy Whitcroft <apw@shadowen.org>
14

L
Lucas Meneghel Rodrigues 已提交
15 16 17 18 19
"""
Contains the base test implementation, used as a base for the actual
framework tests.
"""

20
import inspect
L
Lucas Meneghel Rodrigues 已提交
21
import logging
22
import os
23
import shutil
24 25 26 27
import sys
import time
import unittest

28
from avocado.core import data_dir
L
Lucas Meneghel Rodrigues 已提交
29
from avocado.core import exceptions
30
from avocado.utils import io
31
from avocado.utils import path as utils_path
32
from avocado.utils import process
33
from avocado.utils import stacktrace
34
from avocado.utils.params import Params
35
from avocado import sysinfo
36
from avocado.version import VERSION
L
Lucas Meneghel Rodrigues 已提交
37

38

39
class Test(unittest.TestCase):
40 41

    """
L
Lucas Meneghel Rodrigues 已提交
42 43 44 45
    Base implementation for the test class.

    You'll inherit from this to write your own tests. Tipically you'll want
    to implement setup(), action() and cleanup() methods on your own tests.
46
    """
47
    default_params = {}
48

49
    def __init__(self, methodName='runTest', name=None, params=None,
50
                 base_logdir=None, tag=None, job=None, runner_queue=None):
L
Lucas Meneghel Rodrigues 已提交
51 52 53
        """
        Initializes the test.

54 55 56
        :param methodName: Name of the main method to run. For the sake of
                           compatibility with the original unittest class,
                           you should not set this.
57 58 59
        :param name: Pretty name of the test name. For normal tests, written
                     with the avocado API, this should not be set, this is
                     reserved for running random executables as tests.
60
        :param base_logdir: Directory where test logs should go. If None
61 62
                            provided, it'll use
                            :func:`avocado.core.data_dir.get_job_logs_dir`.
L
Lucas Meneghel Rodrigues 已提交
63
        :param tag: Tag that differentiates 2 executions of the same test name.
64 65
                    Example: 'long', 'short', so we can differentiate
                    'sleeptest.long' and 'sleeptest.short'.
66
        :param job: The job that this test is part of.
L
Lucas Meneghel Rodrigues 已提交
67
        """
68 69 70 71
        if name is not None:
            self.name = name
        else:
            self.name = self.__class__.__name__
72

73 74 75
        if params is None:
            params = {}
        self.params = Params(params)
76
        self._raw_params = params
77

78
        self.tag = tag or self.params.get('tag')
79
        self.job = job
80 81 82

        basename = os.path.basename(self.name)

83
        tmpdir = data_dir.get_tmp_dir()
84

85
        self.basedir = os.path.dirname(inspect.getfile(self.__class__))
86
        self.datadir = os.path.join(self.basedir, '%s.data' % basename)
87 88 89 90 91 92

        self.expected_stdout_file = os.path.join(self.datadir,
                                                 'stdout.expected')
        self.expected_stderr_file = os.path.join(self.datadir,
                                                 'stderr.expected')

93 94
        self.workdir = utils_path.init_dir(tmpdir, basename)
        self.srcdir = utils_path.init_dir(self.workdir, 'src')
95
        if base_logdir is None:
96
            base_logdir = data_dir.get_job_logs_dir()
97
        base_logdir = os.path.join(base_logdir, 'test-results')
98
        self.tagged_name = self.get_tagged_name(base_logdir)
99

100 101 102 103 104 105
        # Let's avoid trouble at logdir init time, since we're interested
        # in a relative directory here
        tagged_name = self.tagged_name
        if tagged_name.startswith('/'):
            tagged_name = tagged_name[1:]

106
        self.logdir = utils_path.init_dir(base_logdir, tagged_name)
107
        io.set_log_file_dir(self.logdir)
108
        self.logfile = os.path.join(self.logdir, 'debug.log')
109

110 111
        self.stdout_file = os.path.join(self.logdir, 'stdout')
        self.stderr_file = os.path.join(self.logdir, 'stderr')
112

113 114
        self.outputdir = utils_path.init_dir(self.logdir, 'data')
        self.sysinfodir = utils_path.init_dir(self.logdir, 'sysinfo')
115
        self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfodir)
116 117 118

        self.log = logging.getLogger("avocado.test")

119 120 121
        self.stdout_log = logging.getLogger("avocado.test.stdout")
        self.stderr_log = logging.getLogger("avocado.test.stderr")

122 123
        self.log.info('START %s', self.tagged_name)
        self.log.debug('')
124 125 126
        self.log.debug('Test instance parameters:')

        # Set the helper set_default to the params object
127
        setattr(self.params, 'set_default', self._set_default)
128 129

        # Apply what comes from the params dict
130 131 132 133
        for key in sorted(self.params.keys()):
            self.log.debug('    %s = %s', key, self.params.get(key))
        self.log.debug('')

134 135 136 137 138 139 140 141 142
        # Apply what comes from the default_params dict
        self.log.debug('Default parameters:')
        for key in sorted(self.default_params.keys()):
            self.log.debug('    %s = %s', key, self.default_params.get(key))
            self.params.set_default(key, self.default_params[key])
        self.log.debug('')
        self.log.debug('Test instance params override defaults whenever available')
        self.log.debug('')

143
        # If there's a timeout set, log a timeout reminder
144
        if self.params.timeout:
145 146 147 148 149
            self.log.info('Test timeout set. Will wait %.2f s for '
                          'PID %s to end',
                          float(self.params.timeout), os.getpid())
            self.log.info('')

150 151
        self.debugdir = None
        self.resultsdir = None
152
        self.status = None
153
        self.fail_reason = None
154
        self.fail_class = None
155
        self.traceback = None
156
        self.text_output = None
157

158
        self.whiteboard = ''
C
Cleber Rosa 已提交
159

160 161 162
        self.running = False
        self.time_start = None
        self.time_end = None
163 164
        self.paused = False
        self.paused_msg = ''
165

166 167
        self.runner_queue = runner_queue

168
        self.time_elapsed = None
169 170 171 172 173 174 175
        unittest.TestCase.__init__(self)

    def __str__(self):
        return str(self.name)

    def __repr__(self):
        return "Test(%r)" % self.tagged_name
176

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
    def tag_start(self):
        self.running = True
        self.time_start = time.time()

    def tag_end(self):
        self.running = False
        self.time_end = time.time()
        # for consistency sake, always use the same stupid method
        self.update_time_elapsed(self.time_end)

    def update_time_elapsed(self, current_time=None):
        if current_time is None:
            current_time = time.time()
        self.time_elapsed = current_time - self.time_start

192
    def report_state(self):
193 194 195 196 197 198
        """
        Send the current test state to the test runner process
        """
        if self.runner_queue is not None:
            self.runner_queue.put(self.get_state())

199
    def get_state(self):
200
        """
201
        Serialize selected attributes representing the test state
202

203 204
        :returns: a dictionary containing relevant test state data
        :rtype: dict
205
        """
206 207
        if self.running and self.time_start:
            self.update_time_elapsed()
208 209
        orig = dict(self.__dict__)
        d = {}
210
        preserve_attr = ['basedir', 'debugdir', 'depsdir',
211 212 213
                         'fail_reason', 'logdir', 'logfile', 'name',
                         'resultsdir', 'srcdir', 'status', 'sysinfodir',
                         'tag', 'tagged_name', 'text_output', 'time_elapsed',
214
                         'traceback', 'workdir', 'whiteboard', 'time_start',
215
                         'time_end', 'running', 'paused', 'paused_msg']
216
        convert_attr = ['fail_class']
217 218 219
        for key in sorted(orig):
            if key in preserve_attr:
                d[key] = orig[key]
220 221
            elif key in convert_attr:
                d[key] = str(orig[key])
222
        d['params'] = dict(orig['params'])
223
        d['class_name'] = self.__class__.__name__
224
        d['job_logdir'] = self.job.logdir
C
Cleber Rosa 已提交
225
        d['job_unique_id'] = self.job.unique_id
226 227
        return d

228 229 230 231 232 233
    def _set_default(self, key, default):
        try:
            self.params[key]
        except Exception:
            self.params[key] = default

234
    def get_data_path(self, basename):
235
        """
236 237 238 239
        Find a test dependency path inside the test data dir.

        This is a short hand for an operation that will be commonly
        used on avocado tests, so we feel it deserves its own API.
240 241 242 243 244

        :param basename: Basename of the dep file. Ex: ``testsuite.tar.bz2``.

        :return: Path where dependency is supposed to be found.
        """
245
        return os.path.join(self.datadir, basename)
246

247 248 249 250 251 252 253 254
    def _register_log_file_handler(self, logger, formatter, filename,
                                   log_level=logging.DEBUG):
        file_handler = logging.FileHandler(filename=filename)
        file_handler.setLevel(log_level)
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
        return file_handler

255 256 257 258 259 260 261 262 263 264 265 266 267
    def start_logging(self):
        """
        Simple helper for adding a file logger to the root logger.
        """
        self.file_handler = logging.FileHandler(filename=self.logfile)
        self.file_handler.setLevel(logging.DEBUG)

        fmt = '%(asctime)s %(levelname)-5.5s| %(message)s'
        formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')

        self.file_handler.setFormatter(formatter)
        self.log.addHandler(self.file_handler)

268 269 270
        stream_fmt = '%(message)s'
        stream_formatter = logging.Formatter(fmt=stream_fmt)

271 272 273 274
        self.stdout_file_handler = self._register_log_file_handler(self.stdout_log, stream_formatter,
                                                                   self.stdout_file)
        self.stderr_file_handler = self._register_log_file_handler(self.stderr_log, stream_formatter,
                                                                   self.stderr_file)
275

276
    def stop_logging(self):
277 278 279
        """
        Stop the logging activity of the test by cleaning the logger handlers.
        """
280 281
        self.log.removeHandler(self.file_handler)

282
    def get_tagged_name(self, logdir):
283 284 285 286 287 288 289 290 291 292 293
        """
        Get a test tagged name.

        If a test tag is defined, just return name.tag. If tag is absent,
        it'll try to find a tag that is not already taken (so there are no
        clashes in the results directory).

        :param logdir: Log directory being in use for result storage.

        :return: String `test.tag`.
        """
294
        if self.tag is not None:
295
            return "%s.%s" % (self.name, self.tag)
296 297 298 299 300 301

        tag = 0
        if tag == 0:
            tagged_name = self.name
        else:
            tagged_name = "%s.%s" % (self.name, tag)
302 303 304
        test_logdir = os.path.join(logdir, tagged_name)
        while os.path.isdir(test_logdir):
            tag += 1
305
            tagged_name = "%s.%s" % (self.name, tag)
306
            test_logdir = os.path.join(logdir, tagged_name)
307
        self.tag = str(tag)
308

309 310
        return tagged_name

311
    def setup(self):
L
Lucas Meneghel Rodrigues 已提交
312 313 314 315 316 317 318
        """
        Setup stage that the test needs before passing to the actual action.

        Must be implemented by tests if they want such an stage. Commonly we'll
        download/compile test suites, create files needed for a test, among
        other possibilities.
        """
319 320 321
        pass

    def action(self):
L
Lucas Meneghel Rodrigues 已提交
322 323 324 325 326 327 328 329 330 331
        """
        Actual test payload. Must be implemented by tests.

        In case of an existing test suite wrapper, it'll execute the suite,
        or perform a series of operations, and based in the results of the
        operations decide if the test pass (let the test complete) or fail
        (raise a test related exception).
        """
        raise NotImplementedError('Test subclasses must implement an action '
                                  'method')
332

333
    def cleanup(self):
L
Lucas Meneghel Rodrigues 已提交
334 335 336 337 338 339 340
        """
        Cleanup stage after the action is done.

        Examples of cleanup actions are deleting temporary files, restoring
        firewall configurations or other system settings that were changed
        in setup.
        """
341
        pass
342

343
    def record_reference_stdout(self):
344
        utils_path.init_dir(self.datadir)
345 346 347
        shutil.copyfile(self.stdout_file, self.expected_stdout_file)

    def record_reference_stderr(self):
348
        utils_path.init_dir(self.datadir)
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
        shutil.copyfile(self.stderr_file, self.expected_stderr_file)

    def check_reference_stdout(self):
        if os.path.isfile(self.expected_stdout_file):
            expected = io.read_file(self.expected_stdout_file)
            actual = io.read_file(self.stdout_file)
            msg = ('Actual test sdtout differs from expected one:\n'
                   'Actual:\n%s\nExpected:\n%s' % (actual, expected))
            self.assertEqual(expected, actual, msg)

    def check_reference_stderr(self):
        if os.path.isfile(self.expected_stderr_file):
            expected = io.read_file(self.expected_stderr_file)
            actual = io.read_file(self.stderr_file)
            msg = ('Actual test sdterr differs from expected one:\n'
                   'Actual:\n%s\nExpected:\n%s' % (actual, expected))
            self.assertEqual(expected, actual, msg)

367 368 369
    def runTest(self, result=None):
        """
        Run test method, for compatibility with unittest.TestCase.
370 371

        :result: Unused param, compatibiltiy with :class:`unittest.TestCase`.
372
        """
373
        self.start_logging()
374
        self.sysinfo_logger.start_test_hook()
375 376
        action_exception = None
        cleanup_exception = None
377 378
        stdout_check_exception = None
        stderr_check_exception = None
379 380 381
        try:
            self.setup()
        except Exception, details:
382
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
383
            raise exceptions.TestSetupFail(details)
384
        try:
385
            self.action()
386
        except Exception, details:
387
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
388 389 390 391 392
            action_exception = details
        finally:
            try:
                self.cleanup()
            except Exception, details:
393
                stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
394
                cleanup_exception = details
395

396 397 398
        whiteboard_file = os.path.join(self.logdir, 'whiteboard')
        io.write_file(whiteboard_file, self.whiteboard)

399 400 401
        if self.job is not None:
            job_standalone = self.job.args is None
            no_record_mode = (not job_standalone and
402 403 404
                              self.job.args.output_check_record == 'none')
            disable_output_check = (not job_standalone and
                                    self.job.args.disable_output_check)
405 406

            if job_standalone or no_record_mode:
407 408 409 410
                if not disable_output_check:
                    try:
                        self.check_reference_stdout()
                    except Exception, details:
411
                        stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
412 413 414 415
                        stdout_check_exception = details
                    try:
                        self.check_reference_stderr()
                    except Exception, details:
416
                        stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
417
                        stderr_check_exception = details
418 419 420 421 422 423 424

            elif not job_standalone:
                if self.job.args.output_check_record in ['all', 'stdout']:
                    self.record_reference_stdout()
                if self.job.args.output_check_record in ['all', 'stderr']:
                    self.record_reference_stderr()

425 426 427 428 429
        # pylint: disable=E0702
        if action_exception is not None:
            raise action_exception
        elif cleanup_exception is not None:
            raise exceptions.TestSetupFail(cleanup_exception)
430 431 432 433
        elif stdout_check_exception is not None:
            raise stdout_check_exception
        elif stderr_check_exception is not None:
            raise stderr_check_exception
434

435
        self.status = 'PASS'
436
        self.sysinfo_logger.end_test_hook()
437

438 439 440 441 442 443 444 445 446 447 448
    def _setup_environment_variables(self):
        os.environ['AVOCADO_VERSION'] = VERSION
        os.environ['AVOCADO_TEST_BASEDIR'] = self.basedir
        os.environ['AVOCADO_TEST_DATADIR'] = self.datadir
        os.environ['AVOCADO_TEST_WORKDIR'] = self.workdir
        os.environ['AVOCADO_TEST_SRCDIR'] = self.srcdir
        os.environ['AVOCADO_TEST_LOGDIR'] = self.logdir
        os.environ['AVOCADO_TEST_LOGFILE'] = self.logfile
        os.environ['AVOCADO_TEST_OUTPUTDIR'] = self.outputdir
        os.environ['AVOCADO_TEST_SYSINFODIR'] = self.sysinfodir

449 450 451
    def run_avocado(self, result=None):
        """
        Wraps the runTest metod, for execution inside the avocado runner.
452 453

        :result: Unused param, compatibiltiy with :class:`unittest.TestCase`.
454
        """
455
        self._setup_environment_variables()
456
        try:
457
            self.tag_start()
458
            self.runTest(result)
459 460
        except exceptions.TestBaseException, detail:
            self.status = detail.status
461
            self.fail_class = detail.__class__.__name__
462
            self.fail_reason = detail
463
            self.traceback = stacktrace.prepare_exc_info(sys.exc_info())
464 465
        except AssertionError, detail:
            self.status = 'FAIL'
466
            self.fail_class = detail.__class__.__name__
467
            self.fail_reason = detail
468
            self.traceback = stacktrace.prepare_exc_info(sys.exc_info())
469
        except Exception, detail:
470 471 472
            self.status = 'FAIL'
            self.fail_class = detail.__class__.__name__
            self.fail_reason = detail
473 474
            tb_info = stacktrace.tb_info(sys.exc_info())
            self.traceback = stacktrace.prepare_exc_info(sys.exc_info())
475
            for e_line in tb_info:
476 477
                self.log.error(e_line)
        finally:
478
            self.tag_end()
479
            self.report()
480
            self.log.info("")
481 482
            with open(self.logfile, 'r') as log_file_obj:
                self.text_output = log_file_obj.read()
483 484
            self.stop_logging()

485
    def report(self):
486 487 488
        """
        Report result to the logging system.
        """
489 490 491 492 493 494 495
        if self.fail_reason is not None:
            self.log.error("%s %s -> %s: %s", self.status,
                           self.tagged_name,
                           self.fail_reason.__class__.__name__,
                           self.fail_reason)

        else:
496 497
            if self.status is None:
                self.status = 'INTERRUPTED'
498 499
            self.log.info("%s %s", self.status,
                          self.tagged_name)
500 501


502
class SimpleTest(Test):
503 504 505 506 507

    """
    Run an arbitrary command that returns either 0 (PASS) or !=0 (FAIL).
    """

508
    def __init__(self, path, params=None, base_logdir=None, tag=None, job=None):
509
        self.path = os.path.abspath(path)
510
        super(SimpleTest, self).__init__(name=path, base_logdir=base_logdir,
511
                                         params=params, tag=tag, job=job)
512 513 514 515 516 517 518 519
        basedir = os.path.dirname(self.path)
        basename = os.path.basename(self.path)
        datadirname = basename + '.data'
        self.datadir = os.path.join(basedir, datadirname)
        self.expected_stdout_file = os.path.join(self.datadir,
                                                 'stdout.expected')
        self.expected_stderr_file = os.path.join(self.datadir,
                                                 'stderr.expected')
520

521
    def _log_detailed_cmd_info(self, result):
522 523 524 525 526
        """
        Log detailed command information.

        :param result: :class:`avocado.utils.process.CmdResult` instance.
        """
527 528 529 530
        run_info = str(result)
        for line in run_info.splitlines():
            self.log.info(line)

531
    def action(self):
532 533 534
        """
        Run the executable, and log its detailed execution.
        """
535
        try:
536 537 538
            test_params = {str(key): str(val)
                           for key, val in self.params.iteritems()}
            result = process.run(self.path, verbose=True, env=test_params)
539 540 541 542
            self._log_detailed_cmd_info(result)
        except exceptions.CmdError, details:
            self._log_detailed_cmd_info(details.result)
            raise exceptions.TestFail(details)
543 544 545 546 547 548 549 550


class MissingTest(Test):

    """
    Handle when there is no such test module in the test directory.
    """

551
    def __init__(self, name=None, params=None, base_logdir=None, tag=None,
552
                 job=None, runner_queue=None):
553 554
        super(MissingTest, self).__init__(name=name,
                                          base_logdir=base_logdir,
555 556
                                          tag=tag, job=job,
                                          runner_queue=runner_queue)
557 558

    def action(self):
559
        e_msg = ('Test %s could not be found in the test dir %s '
560 561 562
                 '(or test path does not exist)' %
                 (self.name, data_dir.get_test_dir()))
        raise exceptions.TestNotFoundError(e_msg)
563 564


565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
class BuggyTest(Test):

    """
    Used when the python module could not be imported.

    That means it is possibly a buggy test, but it could also be a random
    buggy python module.
    """

    def __init__(self, name=None, params=None, base_logdir=None, tag=None,
                 job=None, runner_queue=None):
        super(BuggyTest, self).__init__(name=name,
                                        base_logdir=base_logdir,
                                        params=params,
                                        tag=tag, job=job,
                                        runner_queue=runner_queue)

    def action(self):
        # pylint: disable=E0702
        raise self.params.get('exception')


class NotATest(Test):

    """
    The file is not a test.

    Either a non executable python module with no avocado test class in it,
    or a regular, non executable file.
    """

    def __init__(self, name=None, params=None, base_logdir=None, tag=None,
                 job=None, runner_queue=None):
        super(NotATest, self).__init__(name=name,
                                       base_logdir=base_logdir,
                                       tag=tag, job=job,
                                       runner_queue=runner_queue)

    def action(self):
        e_msg = ('File %s is not executable and does not contain an avocado '
                 'test class in it ' % self.name)
        raise exceptions.NotATestError(e_msg)


609 610 611 612 613 614
class RemoteTest(object):

    """
    Mimics :class:`avocado.test.Test` for remote tests.
    """

615
    def __init__(self, name, status, time, start, end):
616 617 618 619 620
        note = "Not supported yet"
        self.name = name
        self.tagged_name = name
        self.status = status
        self.time_elapsed = time
621 622
        self.time_start = start
        self.time_end = end
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
        self.fail_class = note
        self.traceback = note
        self.text_output = note
        self.fail_reason = note
        self.whiteboard = ''
        self.job_unique_id = ''

    def get_state(self):
        """
        Serialize selected attributes representing the test state

        :returns: a dictionary containing relevant test state data
        :rtype: dict
        """
        d = self.__dict__
        d['class_name'] = self.__class__.__name__
        return d