未验证 提交 e50041a8 编写于 作者: L Lukáš Doktor

Merging pull request 1992

Signed-off-by: NLukáš Doktor <ldoktor@redhat.com>

* https://github.com/avocado-framework/avocado:
  Human interface: do not show tests execution time
  Human Interface: show job execution time
  Job: add time accounting
  Job Pre/Post plugins: move them to the appropriate location
......@@ -170,7 +170,7 @@ To run a test, call the ``run`` command::
JOB LOG : <job-results>/job-<date>-<shortid>/job.log
(1/1) <examples_path>/tests/passtest.sh: PASS (0.04 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0
TESTS TIME : 0.04 s
JOB TIME : 0.14 s
To continue exploring Avocado, check out the output of ``avocado --help``
and the test runner man-page, accessible via ``man avocado``.
......
......@@ -24,6 +24,7 @@ import re
import shutil
import sys
import tempfile
import time
import traceback
from . import version
......@@ -108,6 +109,15 @@ class Job(object):
self.result = result.Result(self)
self.sysinfo = None
self.timeout = getattr(self.args, 'job_timeout', 0)
#: The time at which the job has started or `-1` if it has not been
#: started by means of the `run()` method.
self.time_start = -1
#: The time at which the job has finished or `-1` if it has not been
#: started by means of the `run()` method.
self.time_end = -1
#: The total amount of time the job took from start to finish,
#: or `-1` if it has not been started by means of the `run()` method
self.time_elapsed = -1
self.__logging_handlers = {}
self.__start_job_logging()
self.funcatexit = data_structures.CallbackRegister("JobExit %s"
......@@ -122,9 +132,6 @@ class Job(object):
#: test was found during resolution.
self.test_suite = None
# A job may not have a dispatcher for pre/post tests execution plugins
self._job_pre_post_dispatcher = None
# The result events dispatcher is shared with the test runner.
# Because of our goal to support using the phases of a job
# freely, let's get the result events dispatcher ready early.
......@@ -422,11 +429,8 @@ class Job(object):
Run the pre tests execution hooks
By default this runs the plugins that implement the
:class:`avocado.core.plugin_interfaces.JobPre` interface.
:class:`avocado.core.plugin_interfaces.JobPreTests` interface.
"""
self._job_pre_post_dispatcher = dispatcher.JobPrePostDispatcher()
output.log_plugin_failures(self._job_pre_post_dispatcher.load_failures)
self._job_pre_post_dispatcher.map_method('pre', self)
self._result_events_dispatcher.map_method('pre_tests', self)
def run_tests(self):
......@@ -472,12 +476,9 @@ class Job(object):
Run the post tests execution hooks
By default this runs the plugins that implement the
:class:`avocado.core.plugin_interfaces.JobPost` interface.
:class:`avocado.core.plugin_interfaces.JobPostTests` interface.
"""
if self._job_pre_post_dispatcher is None:
self._job_pre_post_dispatcher = dispatcher.JobPrePostDispatcher()
output.log_plugin_failures(self._job_pre_post_dispatcher.load_failures)
self._job_pre_post_dispatcher.map_method('post', self)
self._result_events_dispatcher.map_method('post_tests', self)
def run(self):
"""
......@@ -489,6 +490,8 @@ class Job(object):
:return: Integer with overall job status. See
:mod:`avocado.core.exit_codes` for more information.
"""
if self.time_start == -1:
self.time_start = time.time()
runtime.CURRENT_JOB = self
try:
self.create_test_suite()
......@@ -521,6 +524,9 @@ class Job(object):
return self.exitcode
finally:
self.post_tests()
if self.time_end == -1:
self.time_end = time.time()
self.time_elapsed = self.time_end - self.time_start
self.__stop_job_logging()
......
......@@ -573,7 +573,6 @@ class TestRunner(object):
if self.job.sysinfo is not None:
self.job.sysinfo.end_job_hook()
self.result.end_tests()
self.job._result_events_dispatcher.map_method('post_tests', self.job)
self.job.funcatexit.run()
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
return summary
......@@ -18,6 +18,7 @@ Human result UI
import logging
from avocado.core.plugin_interfaces import ResultEvents
from avocado.core.plugin_interfaces import JobPre, JobPost
from avocado.core import output
......@@ -98,4 +99,22 @@ class Human(ResultEvents):
job.result.errors, job.result.failed, job.result.skipped,
job.result.warned, job.result.interrupted,
job.result.cancelled)
self.log.info("TESTS TIME : %.2f s", job.result.tests_total_time)
class HumanJob(JobPre, JobPost):
"""
Human result UI
"""
name = 'human'
description = "Human Interface UI"
def pre(self, job):
pass
def post(self, job):
if job.time_elapsed != -1:
if not getattr(job.args, 'stdout_claimed_by', None):
log = logging.getLogger("avocado.app")
log.info("JOB TIME : %.2f s", job.time_elapsed)
......@@ -23,8 +23,10 @@ import sys
from avocado.core import exit_codes
from avocado.core import job
from avocado.core import loader
from avocado.core import output
from avocado.core.plugin_interfaces import CLICmd
from avocado.core.dispatcher import ResultDispatcher
from avocado.core.dispatcher import JobPrePostDispatcher
from avocado.core.settings import settings
from avocado.utils.data_structures import time_to_seconds
......@@ -165,8 +167,19 @@ class Run(CLICmd):
except ValueError as e:
log.error(e.message)
sys.exit(exit_codes.AVOCADO_FAIL)
job_instance = job.Job(args)
job_run = job_instance.run()
pre_post_dispatcher = JobPrePostDispatcher()
try:
# Run JobPre plugins
output.log_plugin_failures(pre_post_dispatcher.load_failures)
pre_post_dispatcher.map_method('pre', job_instance)
job_run = job_instance.run()
finally:
# Run JobPost plugins
pre_post_dispatcher.map_method('post', job_instance)
result_dispatcher = ResultDispatcher()
if result_dispatcher.extensions:
result_dispatcher.map_method('render',
......
......@@ -190,7 +190,7 @@ recognizable name::
JOB LOG : $HOME/avocado/job-results/job-2014-08-12T15.39-381b849a/job.log
(1/1) /bin/true: PASS (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.01 s
JOB TIME : 0.11 s
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.39-381b849a/html/results.html
You probably noticed that we used ``/bin/true`` as a test, and in accordance with our
......@@ -216,7 +216,7 @@ using the ``--dry-run`` argument::
JOB LOG : /tmp/avocado-dry-runSeWniM/job-2015-10-16T15.46-0000000/job.log
(1/1) /bin/true: SKIP
RESULTS : PASS 0 | ERROR 0 | FAIL 0 | SKIP 1 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.00 s
JOB TIME : 0.10 s
JOB HTML : /tmp/avocado-dry-runSeWniM/job-2015-10-16T15.46-0000000/html/results.html
which supports all ``run`` arguments, simulates the run and even lists the test params.
......@@ -304,7 +304,7 @@ instrumented and simple tests::
(5/6) synctest.py:SyncTest.test: PASS (2.44 s)
(6/6) /tmp/simple_test.sh.1: PASS (0.02 s)
RESULTS : PASS 4 | ERROR 0 | FAIL 2 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 5.88 s
JOB TIME : 5.98 s
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.42-86911e49/html/results.html
Interrupting The Job On First Failed Test (failfast)
......@@ -320,7 +320,7 @@ on first failed test::
(2/4) /bin/false: FAIL (0.01 s)
Interrupting job (failfast).
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 2 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.02 s
JOB TIME : 0.12 s
JOB HTML : /home/apahim/avocado/job-results/job-2016-07-19T09.43-eaf51b8/html/results.html
The ``--failfast`` option accepts the argument ``off``. Since it's disabled
......@@ -366,7 +366,7 @@ files with shell code could be considered tests::
(1/2) /tmp/pass: PASS (0.01 s)
(2/2) /tmp/fail: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.01 s
JOB TIME : 0.11 s
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
This example is pretty obvious, and could be achieved by giving
......@@ -383,7 +383,7 @@ But now consider the following example::
(1/2) http://local-avocado-server:9405/jobs/: PASS (0.02 s)
(2/2) http://remote-avocado-server:9405/jobs/: FAIL (3.02 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 3.04 s
JOB TIME : 3.14 s
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
This effectively makes `/bin/curl` an "external test runner", responsible for
......
......@@ -19,7 +19,7 @@ Let's see an example. First, running a simple job with two test references::
(1/2) /bin/true: PASS (0.01 s)
(2/2) /bin/false: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.02 s
JOB TIME : 0.12 s
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T16.14-825b860/html/results.html
Now we can replay the job by running::
......@@ -31,7 +31,7 @@ Now we can replay the job by running::
(1/2) /bin/true: PASS (0.01 s)
(2/2) /bin/false: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.01 s
JOB TIME : 0.11 s
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T16.18-55a0d10/html/results.html
The replay feature will retrieve the original test references, the variants
......@@ -90,7 +90,7 @@ mux yaml file::
(47/48) /bin/false;23: FAIL (0.01 s)
(48/48) /bin/false;24: FAIL (0.01 s)
RESULTS : PASS 24 | ERROR 0 | FAIL 24 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.29 s
JOB TIME : 0.19 s
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T21.56-bd6aa3b/html/results.html
We can replay the job as is, using ``$ avocado run --replay latest``,
......@@ -104,7 +104,7 @@ or replay the job ignoring the variants, as below::
(1/2) /bin/true: PASS (0.01 s)
(2/2) /bin/false: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.02 s
JOB TIME : 0.12 s
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T22.01-d5a4618/html/results.html
Also, it is possible to replay only the variants that faced a given
......@@ -163,7 +163,7 @@ result, using the option ``--replay-test-status``. See the example below::
(47/48) /bin/false;23: FAIL (0.01 s)
(48/48) /bin/false;24: FAIL (0.01 s)
RESULTS : PASS 0 | ERROR 0 | FAIL 24 | SKIP 24 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.19 s
JOB TIME : 0.29 s
JOB HTML : $HOME/avocado/job-results/job-2016-01-12T00.38-2e1dc41/html/results.html
Of which one special example is ``--replay-test-status INTERRUPTED``
......@@ -186,7 +186,7 @@ below::
JOB LOG : /tmp/avocado_results/job-2016-01-11T22.10-f1b1c87/job.log
(1/1) /bin/true: PASS (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.01 s
JOB TIME : 0.11 s
JOB HTML : /tmp/avocado_results/job-2016-01-11T22.10-f1b1c87/html/results.html
Trying to replay the job, it fails::
......@@ -202,5 +202,5 @@ In this case, we have to inform where the job results directory is located::
JOB LOG : $HOME/avocado/job-results/job-2016-01-11T22.15-19c76ab/job.log
(1/1) /bin/true: PASS (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.01 s
JOB TIME : 0.11 s
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T22.15-19c76ab/html/results.html
......@@ -31,7 +31,7 @@ that is, the job and its test(s) results are constantly updated::
(2/3) failtest.py:FailTest.test: FAIL (0.00 s)
(3/3) synctest.py:SyncTest.test: PASS (1.98 s)
RESULTS : PASS 1 | ERROR 1 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 3.17 s
JOB TIME : 3.27 s
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.57-5ffe4792/html/results.html
The most important thing is to remember that programs should never need to parse
......
......@@ -59,7 +59,7 @@ Once the remote machine is properly set, you may run your test. Example::
(1/2) examples/tests/sleeptest.py: PASS (1.00 s)
(2/2) examples/tests/failtest.py: FAIL (0.00 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 1.01 s
JOB TIME : 1.11 s
A bit of extra logging information is added to your job summary, mainly
to distinguish the regular execution from the remote one. Note here that
......@@ -136,7 +136,7 @@ Once the virtual machine is properly set, you may run your test. Example::
(1/2) examples/tests/sleeptest.py:SleepTest.test: PASS (1.00 s)
(2/2) examples/tests/failtest.py:FailTest.test: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 1.01 s
JOB TIME : 1.11 s
A bit of extra logging information is added to your job summary, mainly
to distinguish the regular execution from the remote one. Note here that
......@@ -212,7 +212,7 @@ command similar to::
(2/3) /avocado_remote_test_dir/$HOME/warntest.py:WarnTest.test: WARN (0.00 s)
(3/3) /avocado_remote_test_dir/$HOME/failtest.py:FailTest.test: FAIL (0.00 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 1 | INTERRUPT 0
TESTS TIME : 0.00 s
JOB TIME : 0.10 s
JOB HTML : $HOME/avocado/job-results/job-2016-07-25T08.01-db309f5/html/results.html
Environment Variables
......
......@@ -340,7 +340,7 @@ values). In total it'll produce 8 variants of each test::
(7/8) passtest.py:PassTest.test;7: PASS (0.01 s)
(8/8) passtest.py:PassTest.test;8: PASS (0.01 s)
RESULTS : PASS 8 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.06 s
JOB TIME : 0.16 s
There are other options to influence the params so please check out
``avocado run -h`` and for details use :doc:`TestParameters`.
......@@ -416,7 +416,7 @@ The outcome should be similar to::
progress: 1-plant.py:Plant.test_plant_organic: harvesting organic avocados on row 2
PASS (7.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 7.01 s
JOB TIME : 7.11 s
JOB HTML : /home/cleber/avocado/job-results/job-2016-03-18T10.29-af786f8/html/results.html
The custom ``progress`` stream is combined with the application output, which
......@@ -721,7 +721,7 @@ option --output-check-record all to the test runner::
JOB LOG : $HOME/avocado/job-results/job-2014-09-25T20.20-bcd05e4/job.log
(1/1) synctest.py:SyncTest.test: PASS (2.20 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 2.20 s
JOB TIME : 2.30 s
After the reference files are added, the check process is transparent, in the sense
......@@ -749,7 +749,7 @@ Let's record the output for this one::
JOB LOG : $HOME/avocado/job-results/job-2014-09-25T20.49-25c4244/job.log
(1/1) output_record.sh: PASS (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.01 s
JOB TIME : 0.11 s
After this is done, you'll notice that a the test data directory
appeared in the same level of our shell script, containing 2 files::
......@@ -773,7 +773,7 @@ happens if we change the ``stdout.expected`` file contents to ``Hello, Avocado!`
JOB LOG : $HOME/avocado/job-results/job-2014-09-25T20.52-f0521e5/job.log
(1/1) output_record.sh: FAIL (0.02 s)
RESULTS : PASS 0 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.02 s
JOB TIME : 0.12 s
Verifying the failure reason::
......@@ -902,7 +902,7 @@ the test parameters, as shown below.
JOB LOG : $HOME/avocado/job-results/job-2016-11-02T11.13-c78464b/job.log
(1/1) sleeptest.py:SleepTest.test: INTERRUPTED (3.04 s)
RESULTS : PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 1
TESTS TIME : 3.04 s
JOB TIME : 3.14 s
JOB HTML : $HOME/avocado/job-results/job-2016-11-02T11.13-c78464b/html/results.html
......@@ -991,7 +991,7 @@ Will produce the following result::
JOB LOG : $HOME/avocado/job-results/job-2017-02-03T17.16-1bd8642/job.log
(1/1) test_skip_method.py:MyTestClass.test: SKIP
RESULTS : PASS 0 | ERROR 0 | FAIL 0 | SKIP 1 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.00 s
JOB TIME : 0.10 s
JOB HTML : $HOME/avocado/job-results/job-2017-02-03T17.16-1bd8642/html/results.html
Notice that the `tearDown()` will not be executed when `skip()` is used.
......@@ -1037,7 +1037,7 @@ Will produce the following result::
(2/3) test_skip_decorators.py:MyTest.test2: SKIP
(3/3) test_skip_decorators.py:MyTest.test3: PASS (0.02 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 2 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.03 s
JOB TIME : 0.13 s
JOB HTML : $HOME/avocado/job-results/job-2017-02-03T17.41-59c815f/html/results.html
Notice the ``test3`` was not skipped because the provided condition was
......@@ -1097,7 +1097,7 @@ the correct version, the result will be::
(1/2) /home/apahim/avocado/tests/test_cancel.py:CancelTest.test_iperf: CANCEL (1.15 s)
(2/2) /home/apahim/avocado/tests/test_cancel.py:CancelTest.test_gcc: PASS (1.13 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1
TESTS TIME : 2.28 s
JOB TIME : 2.38 s
JOB HTML : $HOME/avocado/job-results/job-2017-03-10T16.22-39c1f12/html/results.html
Notice that using the `self.cancel()` will cancel the rest of the test
......
......@@ -485,7 +485,7 @@ test directories. The output should be similar to::
JOB LOG : /home/<user>/avocado/job-results/job-<date>-<shortid>/job.log
(1/1) sleeptest.py:SleepTest.test: PASS (1.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 1.01 s
JOB TIME : 1.11 s
The test directories will vary depending on you system and installation
method used. Still, it's pretty easy to find that out as shown in the
......@@ -711,7 +711,7 @@ And the output should look like::
(3/4) sleeptest.py:SleepTest.test;3: PASS (5.02 s)
(4/4) sleeptest.py:SleepTest.test;4: PASS (10.01 s)
RESULTS : PASS 4 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 16.55 s
JOB TIME : 16.65 s
The `multiplex` plugin and the test runner supports two kinds of global
filters, through the command line options `--mux-filter-only` and
......@@ -903,7 +903,7 @@ files with shell code could be considered tests::
(1/2) /tmp/pass: PASS (0.01 s)
(2/2) /tmp/fail: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.01 s
JOB TIME : 0.11 s
This example is pretty obvious, and could be achieved by giving
`/tmp/pass` and `/tmp/fail` shell "shebangs" (`#!/bin/sh`), making
......@@ -920,7 +920,7 @@ But now consider the following example::
(1/2) http://local-avocado-server:9405/jobs/: PASS (0.02 s)
(2/2) http://remote-avocado-server:9405/jobs/: FAIL (3.02 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 3.04 s
JOB TIME : 3.14 s
This effectively makes `/bin/curl` an "external test runner",
responsible for trying to fetch those URLs, and reporting PASS or FAIL
......@@ -958,7 +958,7 @@ passing the option --output-check-record all to the test runner::
JOB LOG : /home/<user>/avocado/job-results/job-<date>-<shortid>/job.log
(1/1) examples/tests/synctest.py:SyncTest.test: PASS (4.00 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 4.00 s
JOB TIME : 4.10 s
After the reference files are added, the check process is transparent,
in the sense that you do not need to provide special flags to the test
......@@ -990,7 +990,7 @@ Let's record the output (both stdout and stderr) for this one::
TESTS : 1
(1/1) home/$USER/Code/avocado/output_record.sh: PASS (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.01 s
JOB TIME : 0.11 s
After this is done, you'll notice that a the test data directory
appeared in the same level of our shell script, containing 2 files::
......@@ -1034,7 +1034,7 @@ The output should look like::
LOGIN : fedora@localhost:22 (TIMEOUT: 60 seconds)
(1/1) sleeptest.py:SleepTest.test: PASS (1.02 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 1.02 s
JOB TIME : 1.12 s
For more information, please consult the topic Remote Machine Plugin
on Avocado's online documentation.
......
......@@ -83,7 +83,7 @@ class InterruptTest(unittest.TestCase):
# We have to actually wait 2 seconds until the ignore window is over
time.sleep(2.5)
proc.sendline('\x03')
proc.read_until_last_line_matches('TESTS TIME : %d s')
proc.read_until_last_line_matches('JOB TIME : %d s')
wait.wait_for(lambda: not proc.is_alive(), timeout=1)
# Make sure the bad test will be really gone from the process table
......@@ -139,7 +139,7 @@ class InterruptTest(unittest.TestCase):
proc = aexpect.Expect(command=cmd_line, linesep='')
proc.read_until_last_line_matches(os.path.basename(good_test.path))
proc.sendline('\x03')
proc.read_until_last_line_matches('TESTS TIME : %d s')
proc.read_until_last_line_matches('JOB TIME : %d s')
wait.wait_for(lambda: not proc.is_alive(), timeout=1)
# Make sure the good test will be really gone from the process table
......
......@@ -4,16 +4,18 @@ import shutil
import tempfile
import unittest
from avocado.core import data_dir
from avocado.core import exceptions
from avocado.core import test
from avocado.core import job
from avocado.core import exit_codes
from avocado.core import job
from avocado.core import test
from avocado.utils import path as utils_path
class JobTest(unittest.TestCase):
def setUp(self):
data_dir._tmp_tracker.unittest_refresh_dir_tracker()
self.tmpdir = tempfile.mkdtemp(prefix="avocado_" + __name__)
@staticmethod
......@@ -104,7 +106,6 @@ class JobTest(unittest.TestCase):
self.assertEqual(myjob.unique_id[::-1],
open(os.path.join(myjob.logdir, "reversed_id")).read())
@unittest.skip("Issue described at https://trello.com/c/qgSTIK0Y")
def test_job_run(self):
class JobFilterLog(job.Job):
def pre_tests(self):
......@@ -130,7 +131,29 @@ class JobTest(unittest.TestCase):
self.assertEqual(myjob.unique_id[::-1],
open(os.path.join(myjob.logdir, "reversed_id")).read())
def test_job_run_account_time(self):
args = argparse.Namespace(logdir=self.tmpdir)
myjob = job.Job(args)
myjob.run()
self.assertNotEqual(myjob.time_start, -1)
self.assertNotEqual(myjob.time_end, -1)
self.assertNotEqual(myjob.time_elapsed, -1)
def test_job_self_account_time(self):
args = argparse.Namespace(logdir=self.tmpdir)
myjob = job.Job(args)
myjob.time_start = 10.0
myjob.run()
myjob.time_end = 20.0
# forcing a different value to check if it's not being
# calculated when time_start or time_end are manually set
myjob.time_elapsed = 100.0
self.assertEqual(myjob.time_start, 10.0)
self.assertEqual(myjob.time_end, 20.0)
self.assertEqual(myjob.time_elapsed, 100.0)
def tearDown(self):
data_dir._tmp_tracker.unittest_refresh_dir_tracker()
shutil.rmtree(self.tmpdir)
......
......@@ -154,6 +154,7 @@ if __name__ == '__main__':
'avocado.plugins.job.prepost': [
'jobscripts = avocado.plugins.jobscripts:JobScripts',
'teststmpdir = avocado.plugins.teststmpdir:TestsTmpDir',
'human = avocado.plugins.human:HumanJob',
],
'avocado.plugins.result': [
'xunit = avocado.plugins.xunit:XUnitResult',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册