未验证 提交 0c7e8233 编写于 作者: L Lukáš Doktor

Merging pull request 1447

* https://github.com/avocado-framework/avocado:
  HTML Result: host it in a directory outside the avocado package
  HTML Result: move rendering code from core to the plugin itself
  HTML Result: add missing fields to formatted version of test info
  HTML result: use names closer to underlying result instance
  Makefile: smokecheck also needs clean/develop
  Makefile: fix test name used on smokecheck
......@@ -129,8 +129,8 @@ requirements-plugins: requirements
do AVOCADO_DIRNAME=$(AVOCADO_DIRNAME) make -C $$MAKEFILE requirements &>/dev/null && echo ">> DONE $$MAKEFILE" || echo ">> SKIP $$MAKEFILE";\
done
smokecheck:
./scripts/avocado run passtest
smokecheck: clean develop
./scripts/avocado run passtest.py
check: clean develop check_cyclical modules_boundaries
selftests/checkall
......
......@@ -7,7 +7,7 @@
Summary: Avocado Test Framework
Name: avocado
Version: 40.0
Release: 0%{?dist}
Release: 1%{?dist}
License: GPLv2
Group: Development/Tools
URL: http://avocado-framework.github.io/
......@@ -39,10 +39,16 @@ these days a framework) to perform automated testing.
%build
%{__python} setup.py build
cd optional_plugins/html
%{__python} setup.py build
cd ../../
%{__make} man
%install
%{__python} setup.py install --root %{buildroot} --skip-build
cd optional_plugins/html
%{__python} setup.py install --root %{buildroot} --skip-build
cd ../../
%{__mkdir} -p %{buildroot}%{_mandir}/man1
%{__install} -m 0644 man/avocado.1 %{buildroot}%{_mandir}/man1/avocado.1
%{__install} -m 0644 man/avocado-rest-client.1 %{buildroot}%{_mandir}/man1/avocado-rest-client.1
......@@ -78,8 +84,7 @@ selftests/run
%{_mandir}/man1/avocado-rest-client.1.gz
%{_docdir}/avocado/avocado.rst
%{_docdir}/avocado/avocado-rest-client.rst
%exclude %{python_sitelib}/avocado/plugins/html.py*
%exclude %{python_sitelib}/avocado/core/resources/htmlresult/*
%exclude %{python_sitelib}/avocado_result_html*
%{_libexecdir}/avocado/avocado-bash-utils
%{_libexecdir}/avocado/avocado_debug
%{_libexecdir}/avocado/avocado_error
......@@ -96,8 +101,7 @@ directory. It also gives the user the ability to write a report on an
arbitrary filesystem location.
%files plugins-output-html
%{python_sitelib}/avocado/plugins/html.py*
%{python_sitelib}/avocado/core/resources/htmlresult/*
%{python_sitelib}/avocado_result_html*
%package examples
Summary: Avocado Test Framework Example Tests
......@@ -113,6 +117,9 @@ examples of how to write tests on your own.
%{_datadir}/avocado/wrappers
%changelog
* Tue Sep 6 2016 Cleber Rosa <cleber@redhat.com> - 40.0-1
- Adapt build of now separate html plugin
* Tue Aug 16 2016 Cleber Rosa <cleber@redhat.com> - 40.0-0
- New upstream release
......
......@@ -53,13 +53,6 @@ from ..utils import stacktrace
from ..utils import data_structures
try:
from . import html
HTML_REPORT_SUPPORT = html.check_resource_requirements()
except ImportError:
HTML_REPORT_SUPPORT = False
_NEW_ISSUE_LINK = 'https://github.com/avocado-framework/avocado/issues/new'
_TEST_LOGGER = logging.getLogger('avocado.test')
......@@ -281,12 +274,6 @@ class Job(object):
# If there are any active output plugins, let's use them
self._set_output_plugins()
# Setup the html output to the results directory
if HTML_REPORT_SUPPORT:
html_file = os.path.join(self.logdir, 'html', 'results.html')
html_plugin = html.HTMLResult(self, html_file)
self.result_proxy.add_output_plugin(html_plugin)
if not getattr(self.args, 'stdout_claimed_by', False) or self.standalone:
human_plugin = result.HumanResult(self)
self.result_proxy.add_output_plugin(human_plugin)
......
......@@ -20,7 +20,6 @@ It also contains the most basic result class, HumanResult, used by the
test runner.
"""
import os
import logging
from . import output
......@@ -216,11 +215,6 @@ class HumanResult(Result):
"WARN %d | INTERRUPT %s", self.passed,
self.errors, self.failed, self.skipped,
self.warned, self.interrupted)
if self.args is not None:
if 'html_output' in self.args:
logdir = os.path.dirname(self.logfile)
html_file = os.path.join(logdir, 'html', 'results.html')
self.log.info("JOB HTML : %s", html_file)
self.log.info("TESTS TIME : %.2f s", self.tests_total_time)
def start_test(self, state):
......
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
"""
HTML output module.
"""
import logging
import sys
from avocado.core import exit_codes
from avocado.core.html import HTMLResult
from avocado.core.result import register_test_result_class
from avocado.core.plugin_interfaces import CLI
class HTML(CLI):
"""
HTML job report
"""
name = 'htmlresult'
description = "HTML job report options for 'run' subcommand"
def configure(self, parser):
run_subcommand_parser = parser.subcommands.choices.get('run', None)
if run_subcommand_parser is None:
return
run_subcommand_parser.output.add_argument(
'--html', type=str,
dest='html_output', metavar='FILE',
help=('Enable HTML output to the FILE where the result should be '
'written. The value - (output to stdout) is not supported '
'since not all HTML resources can be embedded into a '
'single file (page resources will be copied to the '
'output file dir)'))
run_subcommand_parser.output.add_argument(
'--open-browser',
dest='open_browser',
action='store_true',
default=False,
help='Open the generated report on your preferred browser. '
'This works even if --html was not explicitly passed, '
'since an HTML report is always generated on the job '
'results dir. Current: %s' % False)
def run(self, args):
if 'html_output' in args and args.html_output == '-':
log = logging.getLogger("avocado.app")
log.error('HTML to stdout not supported (not all HTML resources '
'can be embedded on a single file)')
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
if 'html_output' in args and args.html_output is not None:
register_test_result_class(args, HTMLResult)
......@@ -103,8 +103,8 @@ recognizable name::
TESTS : 1
(1/1) /bin/true: PASS (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.39-381b849a/html/results.html
TESTS TIME : 0.01 s
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.39-381b849a/html/results.html
You probably noticed that we used ``/bin/true`` as a test, and in accordance with our
expectations, it passed! These are known as `simple tests`, but there is also another
......@@ -123,8 +123,8 @@ using the ``--dry-run`` argument::
TESTS : 1
(1/1) /bin/true: SKIP
RESULTS : PASS 0 | ERROR 0 | FAIL 0 | SKIP 1 | WARN 0 | INTERRUPT 0
JOB HTML : /tmp/avocado-dry-runSeWniM/job-2015-10-16T15.46-0000000/html/results.html
TESTS TIME : 0.00 s
JOB HTML : /tmp/avocado-dry-runSeWniM/job-2015-10-16T15.46-0000000/html/results.html
which supports all ``run`` arguments, simulates the run and even lists the test params.
......@@ -213,8 +213,8 @@ instrumented and simple tests::
(6/6) /bin/true: PASS (0.00 s)
(6/6) /tmp/simple_test.sh.1: PASS (0.02 s)
RESULTS : PASS 2 | ERROR 2 | FAIL 2 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.42-86911e49/html/results.html
TESTS TIME : 5.88 s
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.42-86911e49/html/results.html
Interrupting The Job On First Failed Test (failfast)
====================================================
......@@ -230,8 +230,8 @@ on first failed test::
(2/4) /bin/false: FAIL (0.01 s)
Interrupting job (failfast).
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 2 | WARN 0 | INTERRUPT 0
JOB HTML : /home/apahim/avocado/job-results/job-2016-07-19T09.43-eaf51b8/html/results.html
TESTS TIME : 0.02 s
JOB HTML : /home/apahim/avocado/job-results/job-2016-07-19T09.43-eaf51b8/html/results.html
The ``--failfast`` option accepts the argument ``off``. Since it's disabled
by default, the ``off`` argument only makes sense in replay jobs, when the
......@@ -277,8 +277,8 @@ files with shell code could be considered tests::
(1/2) /tmp/pass: PASS (0.01 s)
(2/2) /tmp/fail: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
TESTS TIME : 0.01 s
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
This example is pretty obvious, and could be achieved by giving
`/tmp/pass` and `/tmp/fail` shell "shebangs" (`#!/bin/sh`), making
......@@ -295,8 +295,8 @@ But now consider the following example::
(1/2) http://local-avocado-server:9405/jobs/: PASS (0.02 s)
(2/2) http://remote-avocado-server:9405/jobs/: FAIL (3.02 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
TESTS TIME : 3.04 s
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
This effectively makes `/bin/curl` an "external test runner", responsible for
trying to fetch those URLs, and reporting PASS or FAIL for each of them.
......
......@@ -20,8 +20,8 @@ Let's see an example. First, running a simple job with two urls::
(1/2) /bin/true: PASS (0.01 s)
(2/2) /bin/false: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T16.14-825b860/html/results.html
TESTS TIME : 0.02 s
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T16.14-825b860/html/results.html
Now we can replay the job by running::
......@@ -33,8 +33,8 @@ Now we can replay the job by running::
(1/2) /bin/true: PASS (0.01 s)
(2/2) /bin/false: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T16.18-55a0d10/html/results.html
TESTS TIME : 0.01 s
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T16.18-55a0d10/html/results.html
The replay feature will retrieve the original job urls, the multiplex
tree and the configuration. Let's see another example, now using
......@@ -93,8 +93,8 @@ multiplex file::
(47/48) /bin/false;23: FAIL (0.01 s)
(48/48) /bin/false;24: FAIL (0.01 s)
RESULTS : PASS 24 | ERROR 0 | FAIL 24 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T21.56-bd6aa3b/html/results.html
TESTS TIME : 0.29 s
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T21.56-bd6aa3b/html/results.html
We can replay the job as is, using ``$ avocado run --replay latest``,
or replay the job ignoring the multiplex file, as below::
......@@ -108,8 +108,8 @@ or replay the job ignoring the multiplex file, as below::
(1/2) /bin/true: PASS (0.01 s)
(2/2) /bin/false: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T22.01-d5a4618/html/results.html
TESTS TIME : 0.02 s
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T22.01-d5a4618/html/results.html
Also, it is possible to replay only the variants that faced a given
result, using the option ``--replay-test-status``. See the example below::
......@@ -168,8 +168,8 @@ result, using the option ``--replay-test-status``. See the example below::
(47/48) /bin/false;23: FAIL (0.01 s)
(48/48) /bin/false;24: FAIL (0.01 s)
RESULTS : PASS 0 | ERROR 0 | FAIL 24 | SKIP 24 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-01-12T00.38-2e1dc41/html/results.html
TESTS TIME : 0.19 s
JOB HTML : $HOME/avocado/job-results/job-2016-01-12T00.38-2e1dc41/html/results.html
When replaying jobs that were executed with the ``--failfast on`` option, you
can disable the ``failfast`` option using ``--failfast off`` in the replay job.
......@@ -186,8 +186,8 @@ below::
TESTS : 1
(1/1) /bin/true: PASS (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : /tmp/avocado_results/job-2016-01-11T22.10-f1b1c87/html/results.html
TESTS TIME : 0.01 s
JOB HTML : /tmp/avocado_results/job-2016-01-11T22.10-f1b1c87/html/results.html
Trying to replay the job, it fails::
......@@ -203,5 +203,5 @@ In this case, we have to inform where the job results directory is located::
TESTS : 1
(1/1) /bin/true: PASS (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T22.15-19c76ab/html/results.html
TESTS TIME : 0.01 s
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T22.15-19c76ab/html/results.html
......@@ -29,8 +29,8 @@ that is, the job and its test(s) results are constantly updated::
(2/3) failtest.py:FailTest.test: FAIL (0.00 s)
(3/3) synctest.py:SyncTest.test: PASS (1.98 s)
RESULTS : PASS 1 | ERROR 1 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.57-5ffe4792/html/results.html
TESTS TIME : 3.17 s
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.57-5ffe4792/html/results.html
The most important thing is to remember that programs should never need to parse
human output to figure out what happened to a test job run.
......
......@@ -190,8 +190,8 @@ command similar to::
(2/3) /avocado_remote_test_dir/$HOME/warntest.py:WarnTest.test: WARN (0.00 s)
(3/3) /avocado_remote_test_dir/$HOME/failtest.py:FailTest.test: FAIL (0.00 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 1 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-07-25T08.01-db309f5/html/results.html
TESTS TIME : 0.00 s
JOB HTML : $HOME/avocado/job-results/job-2016-07-25T08.01-db309f5/html/results.html
Environment Variables
=====================
......
......@@ -162,8 +162,8 @@ generation for sleeptest just like::
(2/3) sleeptest.py:SleepTest.test;2: PASS (1.00 s)
(3/3) sleeptest.py:SleepTest.test;3: PASS (5.00 s)
RESULTS : PASS 3 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.44-d565e8de/html/results.html
TESTS TIME : 6.50 s
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.44-d565e8de/html/results.html
The ``--multiplex`` accepts either only ``$FILE_LOCATION`` or ``$INJECT_TO:$FILE_LOCATION``.
As explained in :doc:`MultiplexConfig` without any path the content gets
......@@ -199,8 +199,8 @@ You can also execute multiple tests with the same multiplex file::
(7/8) synctest.py:SyncTest.test;3: PASS (2.46 s)
(8/8) synctest.py:SyncTest.test;4: PASS (2.45 s)
RESULTS : PASS 8 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-05-04T09.25-cd20fc8/html/results.html
TESTS TIME : 26.26 s
JOB HTML : $HOME/avocado/job-results/job-2016-05-04T09.25-cd20fc8/html/results.html
Advanced logging capabilities
......@@ -274,8 +274,8 @@ The outcome should be similar to::
progress: 1-plant.py:Plant.test_plant_organic: harvesting organic avocados on row 2
PASS (7.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : /home/cleber/avocado/job-results/job-2016-03-18T10.29-af786f8/html/results.html
TESTS TIME : 7.01 s
JOB HTML : /home/cleber/avocado/job-results/job-2016-03-18T10.29-af786f8/html/results.html
The custom ``progress`` stream is combined with the application output, which
may or may not suit your needs or preferences. If you want the ``progress``
......@@ -778,11 +778,11 @@ impact your test grid. You can account for that possibility and set up a
$ avocado run sleeptest.py --multiplex /tmp/sleeptest-example.yaml
JOB ID : 6d5a2ff16bb92395100fbc3945b8d253308728c9
JOB LOG : $HOME/avocado/job-results/job-2014-08-12T15.52-6d5a2ff1/job.log
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.52-6d5a2ff1/html/results.html
TESTS : 1
(1/1) sleeptest.py:SleepTest.test: ERROR (2.97 s)
RESULTS : PASS 0 | ERROR 1 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 2.97 s
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.52-6d5a2ff1/html/results.html
::
......@@ -864,12 +864,11 @@ This accomplishes a similar effect to the multiplex setup defined in there.
$ avocado run timeouttest.py
JOB ID : d78498a54504b481192f2f9bca5ebb9bbb820b8a
JOB LOG : $HOME/avocado/job-results/job-2014-08-12T15.54-d78498a5/job.log
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.54-d78498a5/html/results.html
TESTS : 1
(1/1) timeouttest.py:TimeoutTest.test: INTERRUPTED (3.04 s)
RESULTS : PASS 0 | ERROR 1 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 3.04 s
JOB HTML : $HOME/avocado/job-results/job-2014-08-12T15.54-d78498a5/html/results.html
::
......
......@@ -484,12 +484,12 @@ files with shell code could be considered tests::
$ avocado run --external-runner=/bin/sh /tmp/pass /tmp/fail
JOB ID : 4a2a1d259690cc7b226e33facdde4f628ab30741
JOB LOG : /home/<user>/avocado/job-results/job-<date>-<shortid>/job.log
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
TESTS : 2
(1/2) /tmp/pass: PASS (0.01 s)
(2/2) /tmp/fail: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 0.01 s
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
This example is pretty obvious, and could be achieved by giving
`/tmp/pass` and `/tmp/fail` shell "shebangs" (`#!/bin/sh`), making
......@@ -502,12 +502,12 @@ But now consider the following example::
http://remote-avocado-server:9405/jobs/
JOB ID : 56016a1ffffaba02492fdbd5662ac0b958f51e11
JOB LOG : /home/<user>/avocado/job-results/job-<date>-<shortid>/job.log
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
TESTS : 2
(1/2) http://local-avocado-server:9405/jobs/: PASS (0.02 s)
(2/2) http://remote-avocado-server:9405/jobs/: FAIL (3.02 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TESTS TIME : 3.04 s
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
This effectively makes `/bin/curl` an "external test runner", responsible for
trying to fetch those URLs, and reporting PASS or FAIL for each of them.
......
recursive-include avocado_result_html/resources *
\ No newline at end of file
......@@ -14,28 +14,20 @@
"""
HTML output module.
"""
import codecs
import logging
import os
import shutil
import time
import subprocess
import urllib
import sys
import time
import pystache
import pkg_resources
import pystache
from .result import Result
def check_resource_requirements():
"""
Checks if necessary resource files to render the report are in place
Currently, only the template file is looked for
"""
return pkg_resources.resource_exists(
'avocado.core',
'resources/htmlresult/templates/report.mustache')
from avocado.core import exit_codes
from avocado.core.plugin_interfaces import CLI, Result
class ReportModel(object):
......@@ -62,10 +54,10 @@ class ReportModel(object):
else:
return value
def job_id(self):
def job_unique_id(self):
return self.result.job_unique_id
def execution_time(self):
def tests_total_time(self):
return "%.2f" % self.result.tests_total_time
def results_dir(self, relative_links=True):
......@@ -85,7 +77,7 @@ class ReportModel(object):
self.html_output_dir)
return urllib.quote(path)
def total(self):
def tests_total(self):
return self.result.tests_total
def passed(self):
......@@ -130,20 +122,21 @@ class ReportModel(object):
"INTERRUPTED": "danger"}
test_info = []
results_dir = self.results_dir(False)
for tst in self.result.tests:
tst = tst.copy() # we don't want to override other's results
tst["test"] = str(tst["name"])
logdir = os.path.join(results_dir, 'test-results', tst['logdir'])
tst['logdir'] = os.path.relpath(logdir, self.html_output_dir)
for t in self.result.tests:
formatted = {}
formatted['name'] = t['name']
formatted['status'] = t['status']
logdir = os.path.join(results_dir, 'test-results', t['logdir'])
formatted['logdir'] = os.path.relpath(logdir, self.html_output_dir)
logfile = os.path.join(logdir, 'debug.log')
tst['logfile'] = os.path.relpath(logfile, self.html_output_dir)
tst['logfile_basename'] = os.path.basename(logfile)
tst['time'] = "%.2f" % tst['time_elapsed']
tst['time_start'] = time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(tst['time_start']))
tst['row_class'] = mapping[tst['status']]
formatted['logfile'] = os.path.relpath(logfile, self.html_output_dir)
formatted['logfile_basename'] = os.path.basename(logfile)
formatted['time'] = "%.2f" % t['time_elapsed']
formatted['time_start'] = time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(t['time_start']))
formatted['row_class'] = mapping[t['status']]
exhibition_limit = 40
fail_reason = tst.get('fail_reason')
fail_reason = t.get('fail_reason')
if fail_reason is None:
fail_reason = '<unknown>'
fail_reason = str(fail_reason)
......@@ -155,8 +148,8 @@ class ReportModel(object):
'data-content="%s">%s...</a>' %
('fail_reason',
'fail_reason'[:exhibition_limit]))
tst['fail_reason'] = fail_reason
test_info.append(tst)
formatted['fail_reason'] = fail_reason
test_info.append(formatted)
return test_info
def _sysinfo_phase(self, phase):
......@@ -207,27 +200,10 @@ class HTMLResult(Result):
HTML Test Result class.
"""
def __init__(self, job, force_html_file=None):
"""
:param job: Job which defines this result
:param force_html_file: Override the output html file location
"""
Result.__init__(self, job)
if force_html_file:
self.output = force_html_file
else:
self.output = self.args.html_output
def end_tests(self):
"""
Called once after all tests are executed.
"""
Result.end_tests(self)
self._render_report()
def _copy_static_resources(self):
module = 'avocado.core'
base_path = 'resources/htmlresult/static'
@staticmethod
def _copy_static_resources(html_path):
module = 'avocado_result_html'
base_path = 'resources/static'
for top_dir in pkg_resources.resource_listdir(module, base_path):
rsrc_dir = base_path + '/%s' % top_dir
......@@ -238,18 +214,31 @@ class HTMLResult(Result):
module,
rsrc_dir + '/%s' % rsrc_file)
dest = os.path.join(
os.path.dirname(os.path.abspath(self.output)),
os.path.dirname(os.path.abspath(html_path)),
top_dir,
os.path.basename(source))
pkg_resources.ensure_directory(dest)
shutil.copy(source, dest)
def _render_report(self):
context = ReportModel(result=self,
html_output=self.output)
@staticmethod
def _open_browser(html_path):
# if possible, put browser in separate process
# group, so keyboard interrupts don't affect
# browser as well as Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
inout = file(os.devnull, "r+")
cmd = ['xdg-open', html_path]
subprocess.Popen(cmd, close_fds=True, stdin=inout,
stdout=inout, stderr=inout,
preexec_fn=setsid)
def _render(self, result, output_path):
context = ReportModel(result=result, html_output=output_path)
template = pkg_resources.resource_string(
'avocado.core',
'resources/htmlresult/templates/report.mustache')
'avocado_result_html',
'resources/templates/report.mustache')
# pylint: disable=E0611
try:
......@@ -262,7 +251,7 @@ class HTMLResult(Result):
report_contents = v.render('utf8') # encodes into ascii
report_contents = codecs.decode("utf8") # decode to unicode
except UnicodeDecodeError as details:
# FIXME: Removeme when UnicodeDecodeError problem is fixed
# FIXME: Remove me when UnicodeDecodeError problem is fixed
import logging
ui = logging.getLogger("avocado.app")
ui.critical("\n" + ("-" * 80))
......@@ -274,18 +263,76 @@ class HTMLResult(Result):
ui.critical("-" * 80)
raise
self._copy_static_resources()
with codecs.open(self.output, 'w', 'utf-8') as report_file:
self._copy_static_resources(output_path)
with codecs.open(output_path, 'w', 'utf-8') as report_file:
report_file.write(report_contents)
if self.args is not None:
if getattr(self.args, 'open_browser', False):
# if possible, put browser in separate process group, so
# keyboard interrupts don't affect browser as well as Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
inout = file(os.devnull, "r+")
cmd = ['xdg-open', self.output]
subprocess.Popen(cmd, close_fds=True, stdin=inout, stdout=inout,
stderr=inout, preexec_fn=setsid)
def render(self, result, job):
if not (hasattr(job.args, 'html_job_result') or
hasattr(job.args, 'html_output')):
return
open_browser = getattr(job.args, 'open_browser', False)
if getattr(job.args, 'html_job_result', 'off') == 'on':
html_dir = os.path.join(job.logdir, 'html')
os.makedirs(html_dir)
html_path = os.path.join(html_dir, 'results.html')
self._render(result, html_path)
if getattr(job.args, 'stdout_claimed_by', None) is None:
log = logging.getLogger("avocado.app")
log.info("JOB HTML : %s", html_path)
if open_browser:
self._open_browser(html_path)
open_browser = False
html_path = getattr(job.args, 'html_output', 'None')
if html_path is not None:
self._render(result, html_path)
if open_browser:
self._open_browser(html_path)
class HTML(CLI):
"""
HTML job report
"""
name = 'htmlresult'
description = "HTML job report options for 'run' subcommand"
def configure(self, parser):
run_subcommand_parser = parser.subcommands.choices.get('run', None)
if run_subcommand_parser is None:
return
run_subcommand_parser.output.add_argument(
'--html', type=str,
dest='html_output', metavar='FILE',
help=('Enable HTML output to the FILE where the result should be '
'written. The value - (output to stdout) is not supported '
'since not all HTML resources can be embedded into a '
'single file (page resources will be copied to the '
'output file dir)'))
run_subcommand_parser.output.add_argument(
'--open-browser',
dest='open_browser',
action='store_true',
default=False,
help='Open the generated report on your preferred browser. '
'This works even if --html was not explicitly passed, '
'since an HTML report is always generated on the job '
'results dir. Current: %s' % False)
run_subcommand_parser.output.add_argument(
'--html-job-result', dest='html_job_result',
choices=('on', 'off'), default='on',
help=('Enables default HTML result in the job results directory. '
'File will be located at "html/results.html".'))
def run(self, args):
if 'html_output' in args and args.html_output == '-':
log = logging.getLogger("avocado.app")
log.error('HTML to stdout not supported (not all HTML resources '
'can be embedded on a single file)')
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
......@@ -37,7 +37,7 @@
<!-- Table -->
<table class="table table-bordered">
<tr>
<td>ID</td><td><tt>{{job_id}}</tt></td>
<td>ID</td><td><tt>{{job_unique_id}}</tt></td>
</tr>
<tr>
<td>Host</td><td><tt>{{hostname}}</tt></td>
......@@ -46,10 +46,10 @@
<td>Results Dir</td><td><a href="{{results_dir}}"><tt>{{results_dir_basename}}</tt></a></td>
</tr>
<tr>
<td>Cumulative test time</td><td>{{execution_time}} s</td>
<td>Cumulative test time</td><td>{{tests_total_time}} s</td>
</tr>
<tr>
<td>Stats</td><td>From {{total}} tests executed, {{passed}} passed (pass rate of {{pass_rate}}%)</td>
<td>Stats</td><td>From {{tests_total}} tests executed, {{passed}} passed (pass rate of {{pass_rate}}%)</td>
</tr>
</table>
</div>
......@@ -68,7 +68,7 @@
{{#tests}}
<tr class="{{row_class}}">
<td>{{time_start}}</td>
<td><a href="{{logdir}}">{{test}}</a></td>
<td><a href="{{logdir}}">{{name}}</a></td>
<td>{{status}}</td>
<td>{{time}}</td>
<td>{{& fail_reason}}</td>
......
#!/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2016
# Author: Cleber Rosa <crosa@redhat.com>
from setuptools import setup, find_packages
setup(name='avocado_result_html',
description='Avocado HTML Report for Jobs',
version='40.0',
author='Avocado Developers',
author_email='avocado-devel@redhat.com',
url='http://avocado-framework.github.io/',
packages=find_packages(),
include_package_data=True,
install_requires=['pystache'],
entry_points={
'avocado.plugins.cli': [
'html = avocado_result_html:HTML',
],
'avocado.plugins.result': [
'html = avocado_result_html:HTMLResult',
]}
)
......@@ -6,6 +6,8 @@ import sys
import shutil
from xml.dom import minidom
import pkg_resources
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
......@@ -42,6 +44,14 @@ def image_output_uncapable():
return True
def html_uncapable():
try:
pkg_resources.require('avocado_result_html')
return False
except pkg_resources.DistributionNotFound:
return True
def perl_tap_parser_uncapable():
return os.system("perl -e 'use TAP::Parser;'") != 0
......@@ -106,6 +116,8 @@ class OutputPluginTest(unittest.TestCase):
"Missing error message from output:\n%s" %
result.stderr)
@unittest.skipIf(html_uncapable(),
"Uncapable of Avocado Result HTML plugin")
def test_output_incompatible_setup_2(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
......@@ -167,6 +179,8 @@ class OutputPluginTest(unittest.TestCase):
except OSError:
pass
@unittest.skipIf(html_uncapable(),
"Uncapable of Avocado Result HTML plugin")
def test_output_compatible_setup_3(self):
tmpfile = tempfile.mktemp(prefix='avocado_' + __name__)
tmpfile2 = tempfile.mktemp(prefix='avocado_' + __name__)
......
......@@ -84,7 +84,7 @@ def get_data_files():
return data_files
def _get_resource_files(path):
def _get_resource_files(path, base):
"""
Given a path, return all the files in there to package
"""
......@@ -92,7 +92,7 @@ def _get_resource_files(path):
for root, _, files in sorted(os.walk(path)):
for name in files:
fullname = os.path.join(root, name)
flist.append(fullname[len('avocado/core/'):])
flist.append(fullname[len(base):])
return flist
......@@ -111,8 +111,6 @@ if __name__ == '__main__':
url='http://avocado-framework.github.io/',
use_2to3=True,
packages=find_packages(exclude=('selftests*',)),
package_data={'avocado.core': _get_resource_files(
'avocado/core/resources')},
data_files=get_data_files(),
scripts=['scripts/avocado',
'scripts/avocado-rest-client'],
......@@ -124,7 +122,6 @@ if __name__ == '__main__':
'xunit = avocado.plugins.xunit:XUnitCLI',
'json = avocado.plugins.jsonresult:JSONCLI',
'journal = avocado.plugins.journal:Journal',
'html = avocado.plugins.html:HTML',
'remote = avocado.plugins.remote:Remote',
'replay = avocado.plugins.replay:Replay',
'tap = avocado.plugins.tap:TAP',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册