未验证 提交 2136dbee 编写于 作者: L Lukáš Doktor

Merging pull request 1352

* https://github.com/avocado-framework/avocado:
  Result: remove command_line_arg_name attribute
  Result: port JSON result plugin
  Result: port xUnit result
  Result: introduce plugin interface and corresponding dispatcher
  Result: keep track of the state of all tests
  Result: be more specific about the the total time
  Result: drop "Test" from TestResult related classes names
......@@ -88,3 +88,9 @@ class JobPrePostDispatcher(Dispatcher):
except:
job.log.error('Error running method "%s" of plugin "%s": %s',
method_name, ext.name, sys.exc_info()[1])
class ResultDispatcher(Dispatcher):
def __init__(self):
super(ResultDispatcher, self).__init__('avocado.plugins.result')
......@@ -24,7 +24,7 @@ import urllib
import pystache
from .result import TestResult
from .result import Result
from ..utils import path as utils_path
from ..utils import runtime
......@@ -193,20 +193,18 @@ class ReportModel(object):
return self._sysinfo_phase('post')
class HTMLTestResult(TestResult):
class HTMLResult(Result):
"""
HTML Test Result class.
"""
command_line_arg_name = '--html'
def __init__(self, job, force_html_file=None):
"""
:param job: Job which defines this result
:param force_html_file: Override the output html file location
"""
TestResult.__init__(self, job)
Result.__init__(self, job)
if force_html_file:
self.output = force_html_file
else:
......@@ -217,7 +215,7 @@ class HTMLTestResult(TestResult):
"""
Called once before any tests are executed.
"""
TestResult.start_tests(self)
Result.start_tests(self)
self.json = {'debuglog': self.logfile,
'job_id': runtime.CURRENT_JOB.unique_id,
'tests': []}
......@@ -229,7 +227,7 @@ class HTMLTestResult(TestResult):
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
TestResult.end_test(self, state)
Result.end_test(self, state)
t = {'test': str(state.get('name', "<unknown>")),
'url': state.get('name', "<unknown>"),
'time_start': state.get('time_start', -1),
......@@ -247,14 +245,14 @@ class HTMLTestResult(TestResult):
"""
Called once after all tests are executed.
"""
TestResult.end_tests(self)
Result.end_tests(self)
self.json.update({
'total': len(self.json['tests']),
'pass': self.passed,
'errors': self.errors,
'failures': self.failed,
'skip': self.skipped,
'time': self.total_time
'time': self.tests_total_time
})
self._render_report()
......
......@@ -42,8 +42,6 @@ from . import output
from . import multiplexer
from . import tree
from . import test
from . import xunit
from . import jsonresult
from . import replay
from .output import STD_OUTPUT
from .settings import settings
......@@ -115,7 +113,7 @@ class Job(object):
self.test_dir = data_dir.get_test_dir()
self.test_index = 1
self.status = "RUNNING"
self.result_proxy = result.TestResultProxy()
self.result_proxy = result.ResultProxy()
self.sysinfo = None
self.timeout = getattr(self.args, 'job_timeout', 0)
self.__logging_handlers = {}
......@@ -274,8 +272,6 @@ class Job(object):
The basic idea behind the output plugins is:
* If there are any active output plugins, use them
* Always add Xunit and JSON plugins outputting to files inside the
results dir
* If at the end we only have 2 output plugins (Xunit and JSON), we can
add the human output plugin.
"""
......@@ -283,24 +279,14 @@ class Job(object):
# If there are any active output plugins, let's use them
self._set_output_plugins()
# Setup the xunit plugin to output to the debug directory
xunit_file = os.path.join(self.logdir, 'results.xml')
xunit_plugin = xunit.xUnitTestResult(self, xunit_file)
self.result_proxy.add_output_plugin(xunit_plugin)
# Setup the json plugin to output to the debug directory
json_file = os.path.join(self.logdir, 'results.json')
json_plugin = jsonresult.JSONTestResult(self, json_file)
self.result_proxy.add_output_plugin(json_plugin)
# Setup the html output to the results directory
if HTML_REPORT_SUPPORT:
html_file = os.path.join(self.logdir, 'html', 'results.html')
html_plugin = html.HTMLTestResult(self, html_file)
html_plugin = html.HTMLResult(self, html_file)
self.result_proxy.add_output_plugin(html_plugin)
if not getattr(self.args, 'stdout_claimed_by', False) or self.standalone:
human_plugin = result.HumanTestResult(self)
human_plugin = result.HumanResult(self)
self.result_proxy.add_output_plugin(human_plugin)
def _make_test_suite(self, urls=None):
......
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2014
# Author: Ruda Moura <rmoura@redhat.com>
"""
JSON output module.
"""
import json
import logging
from .result import TestResult
class JSONTestResult(TestResult):
"""
JSON Test Result class.
"""
command_line_arg_name = '--json'
def __init__(self, job, force_json_file=None):
"""
:param job: Job which defines this result
:param force_json_file: Override the json output file location
"""
TestResult.__init__(self, job)
if force_json_file:
self.output = force_json_file
else:
self.output = getattr(self.args, 'json_output', '-')
self.json = None
self.log = logging.getLogger("avocado.app")
def start_tests(self):
"""
Called once before any tests are executed.
"""
TestResult.start_tests(self)
self.json = {'debuglog': self.logfile,
'tests': []}
def end_test(self, state):
"""
Called when the given test has been run.
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
TestResult.end_test(self, state)
if 'job_id' not in self.json:
self.json['job_id'] = state.get('job_unique_id', "<unknown>")
t = {'test': str(state.get('name', "<unknown>")),
'url': str(state.get('name', "<unknown>")),
'start': state.get('time_start', -1),
'end': state.get('time_end', -1),
'time': state.get('time_elapsed', -1),
'status': state.get('status', {}),
'whiteboard': state.get('whiteboard', "<unknown>"),
'logdir': state.get('logdir', "<unknown>"),
'logfile': state.get('logfile', "<unknown>"),
'fail_reason': str(state.get('fail_reason', "<unknown>"))
}
self.json['tests'].append(t)
def _save_json(self):
with open(self.output, 'w') as j:
j.write(self.json)
def end_tests(self):
"""
Called once after all tests are executed.
"""
TestResult.end_tests(self)
self.json.update({
'total': self.tests_total,
'pass': self.passed,
'errors': self.errors,
'failures': self.failed,
'skip': self.skipped,
'time': self.total_time
})
self.json = json.dumps(self.json)
if self.output == '-':
self.log.debug(self.json)
else:
self._save_json()
......@@ -118,3 +118,19 @@ class JobPost(Plugin):
"""
Entry point for actually running the post job action
"""
class Result(Plugin):
@abc.abstractmethod
def render(self, result, job):
"""
Entry point with method that renders the result
This will usually be used to write the result to a file or directory.
:param result: the complete job result
:type result: :class:`avocado.core.result.Result`
:param job: the finished job for which a result will be written
:type job: :class:`avocado.core.job.Job`
"""
......@@ -13,7 +13,7 @@
# Author: Ruda Moura <rmoura@redhat.com>
from .test import RemoteTest
from .result import RemoteTestResult, VMTestResult
from .result import RemoteResult, VMResult
from .runner import RemoteTestRunner, VMTestRunner
__all__ = ['RemoteTestResult', 'VMTestResult', 'RemoteTestRunner', 'VMTestRunner', 'RemoteTest']
__all__ = ['RemoteResult', 'VMResult', 'RemoteTestRunner', 'VMTestRunner', 'RemoteTest']
......@@ -16,24 +16,22 @@
import os
from ..result import HumanTestResult
from ..result import HumanResult
class RemoteTestResult(HumanTestResult):
class RemoteResult(HumanResult):
"""
Remote Machine Test Result class.
"""
command_line_arg_name = '--remote-hostname'
def __init__(self, job):
"""
Creates an instance of RemoteTestResult.
Creates an instance of RemoteResult.
:param job: an instance of :class:`avocado.core.job.Job`.
"""
HumanTestResult.__init__(self, job)
HumanResult.__init__(self, job)
self.test_dir = os.getcwd()
self.remote_test_dir = '~/avocado/tests'
self.urls = self.args.url
......@@ -45,14 +43,12 @@ class RemoteTestResult(HumanTestResult):
pass
class VMTestResult(RemoteTestResult):
class VMResult(RemoteResult):
"""
Virtual Machine Test Result class.
"""
command_line_arg_name = '--vm-domain'
def __init__(self, job):
super(VMTestResult, self).__init__(job)
super(VMResult, self).__init__(job)
self.vm = None
......@@ -14,10 +14,10 @@
# Ruda Moura <rmoura@redhat.com>
"""
Contains the definition of the TestResult class, used for output in avocado.
Contains the definition of the Result class, used for output in avocado.
It also contains the most basic test result class, HumanTestResult,
used by the test runner.
It also contains the most basic result class, HumanResult, used by the
test runner.
"""
import os
......@@ -39,14 +39,14 @@ def register_test_result_class(application_args, klass):
settings and feature choices, such as the runner.
:type application_args: :class:`argparse.Namespace`
:param klass: the test result class to enable
:type klass: a subclass of :class:`TestResult`
:type klass: a subclass of :class:`Result`
"""
if not hasattr(application_args, 'test_result_classes'):
application_args.test_result_classes = set()
application_args.test_result_classes.add(klass)
class TestResultProxy(object):
class ResultProxy(object):
def __init__(self):
self.output_plugins = []
......@@ -57,9 +57,9 @@ class TestResultProxy(object):
output_plugin.notify_progress(progress_from_test)
def add_output_plugin(self, plugin):
if not isinstance(plugin, TestResult):
if not isinstance(plugin, Result):
raise InvalidOutputPlugin("Object %s is not an instance of "
"TestResult" % plugin)
"Result" % plugin)
self.output_plugins.append(plugin)
def start_tests(self):
......@@ -83,20 +83,15 @@ class TestResultProxy(object):
output_plugin.check_test(state)
class TestResult(object):
class Result(object):
"""
Test result class, holder for test result information.
Result class, holder for job (and its tests) result information.
"""
#: Should be set by result plugins to inform users about output options
#: inconsistencies given on the command line, and where these
#: inconsistencies come from.
command_line_arg_name = None
def __init__(self, job):
"""
Creates an instance of TestResult.
Creates an instance of Result.
:param job: an instance of :class:`avocado.core.job.Job`.
"""
......@@ -105,13 +100,14 @@ class TestResult(object):
self.args = getattr(job, "args", None)
self.tests_total = getattr(self.args, 'test_result_total', 1)
self.tests_run = 0
self.total_time = 0.0
self.tests_total_time = 0.0
self.passed = 0
self.errors = 0
self.failed = 0
self.skipped = 0
self.warned = 0
self.interrupted = 0
self.tests = []
# Where this results intends to write to. Convention is that a dash (-)
# means stdout, and stdout is a special output that can be exclusively
......@@ -164,7 +160,8 @@ class TestResult(object):
:type state: dict
"""
self.tests_run += 1
self.total_time += state.get('time_elapsed', -1)
self.tests_total_time += state.get('time_elapsed', -1)
self.tests.append(state)
def check_test(self, state):
"""
......@@ -188,14 +185,14 @@ class TestResult(object):
self.end_test(state)
class HumanTestResult(TestResult):
class HumanResult(Result):
"""
Human output Test result class.
"""
def __init__(self, job):
super(HumanTestResult, self).__init__(job)
super(HumanResult, self).__init__(job)
self.log = logging.getLogger("avocado.app")
self.__throbber = output.Throbber()
......@@ -203,7 +200,7 @@ class HumanTestResult(TestResult):
"""
Called once before any tests are executed.
"""
super(HumanTestResult, self).start_tests()
super(HumanResult, self).start_tests()
self.log.info("JOB ID : %s", self.job_unique_id)
if getattr(self.args, "replay_sourcejob", None):
self.log.info("SRC JOB ID : %s", self.args.replay_sourcejob)
......@@ -214,7 +211,7 @@ class HumanTestResult(TestResult):
"""
Called once after all tests are executed.
"""
super(HumanTestResult, self).end_tests()
super(HumanResult, self).end_tests()
self.log.info("RESULTS : PASS %d | ERROR %d | FAIL %d | SKIP %d | "
"WARN %d | INTERRUPT %s", self.passed,
self.errors, self.failed, self.skipped,
......@@ -224,10 +221,10 @@ class HumanTestResult(TestResult):
logdir = os.path.dirname(self.logfile)
html_file = os.path.join(logdir, 'html', 'results.html')
self.log.info("JOB HTML : %s", html_file)
self.log.info("TESTS TIME : %.2f s", self.total_time)
self.log.info("TESTS TIME : %.2f s", self.tests_total_time)
def start_test(self, state):
super(HumanTestResult, self).start_test(state)
super(HumanResult, self).start_test(state)
if "name" in state:
name = state["name"]
uid = name.str_uid
......@@ -239,7 +236,7 @@ class HumanTestResult(TestResult):
extra={"skip_newline": True})
def end_test(self, state):
super(HumanTestResult, self).end_test(state)
super(HumanResult, self).end_test(state)
status = state.get("status", "ERROR")
if status == "TEST_NA":
status = "SKIP"
......
......@@ -257,7 +257,7 @@ class TestRunner(object):
:param job: an instance of :class:`avocado.core.job.Job`.
:param test_result: an instance of
:class:`avocado.core.result.TestResultProxy`.
:class:`avocado.core.result.ResultProxy`.
"""
self.job = job
self.result = test_result
......
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2014
# Author: Ruda Moura <rmoura@redhat.com>
"""xUnit module."""
import datetime
import logging
import string
from xml.sax.saxutils import quoteattr
from .result import TestResult
# We use a subset of the XML format defined in this URL:
# https://svn.jenkins-ci.org/trunk/hudson/dtkit/dtkit-format/dtkit-junit-model/src/main/resources/com/thalesgroup/dtkit/junit/model/xsd/junit-4.xsd
PRINTABLE = string.ascii_letters + string.digits + string.punctuation + '\n\r '
class XmlResult(object):
"""
Handles the XML details for xUnit output.
"""
def __init__(self):
self.xml = ['<?xml version="1.0" encoding="UTF-8"?>']
def _escape_attr(self, attrib):
attrib = ''.join(_ if _ in PRINTABLE else "\\x%02x" % ord(_)
for _ in str(attrib))
return quoteattr(attrib)
def _escape_cdata(self, cdata):
cdata = ''.join(_ if _ in PRINTABLE else "\\x%02x" % ord(_)
for _ in str(cdata))
return cdata.replace(']]>', ']]>]]&gt;<![CDATA[')
def get_contents(self):
return '\n'.join(self.xml)
def start_testsuite(self, timestamp):
"""
Start a new testsuite node.
:param timestamp: Timestamp string in date/time format.
"""
self.testsuite = '<testsuite name="avocado" tests="{tests}" errors="{errors}" failures="{failures}" skipped="{skip}" time="{total_time}" timestamp="%s">' % timestamp
self.testcases = []
def end_testsuite(self, tests, errors, failures, skip, total_time):
"""
End of testsuite node.
:param tests: Number of tests.
:param errors: Number of test errors.
:param failures: Number of test failures.
:param skip: Number of test skipped.
:param total_time: The total time of test execution.
"""
values = {'tests': tests,
'errors': errors,
'failures': failures,
'skip': skip,
'total_time': total_time}
self.xml.append(self.testsuite.format(**values))
for tc in self.testcases:
self.xml.append(tc)
self.xml.append('</testsuite>')
def add_success(self, state):
"""
Add a testcase node of kind succeed.
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
tc = '\t<testcase classname={class} name={name} time="{time}"/>'
values = {'class': self._escape_attr(state.get('class_name', "<unknown>")),
'name': self._escape_attr(state.get('name', "<unknown>")),
'time': state.get('time_elapsed', -1)}
self.testcases.append(tc.format(**values))
def add_skip(self, state):
"""
Add a testcase node of kind skipped.
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
tc = '''\t<testcase classname={class} name={name} time="{time}">
\t\t<skipped />
\t</testcase>'''
values = {'class': self._escape_attr(state.get('class_name', "<unknown>")),
'name': self._escape_attr(state.get('name', "<unknown>")),
'time': state.get('time_elapsed', -1)}
self.testcases.append(tc.format(**values))
def add_failure(self, state):
"""
Add a testcase node of kind failed.
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
tc = '''\t<testcase classname={class} name={name} time="{time}">
\t\t<failure type={type} message={reason}><![CDATA[{traceback}]]></failure>
\t\t<system-out><![CDATA[{systemout}]]></system-out>
\t</testcase>'''
values = {'class': self._escape_attr(state.get('class_name', "<unknown>")),
'name': self._escape_attr(state.get('name', "<unknown>")),
'time': state.get('time_elapsed', -1),
'type': self._escape_attr(state.get('fail_class', "<unknown>")),
'traceback': self._escape_cdata(state.get('traceback', "<unknown>")),
'systemout': self._escape_cdata(state.get('text_output', "<unknown>")),
'reason': self._escape_attr(str(state.get('fail_reason', "<unknown>")))}
self.testcases.append(tc.format(**values))
def add_error(self, state):
"""
Add a testcase node of kind error.
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
tc = '''\t<testcase classname={class} name={name} time="{time}">
\t\t<error type={type} message={reason}><![CDATA[{traceback}]]></error>
\t\t<system-out><![CDATA[{systemout}]]></system-out>
\t</testcase>'''
values = {'class': self._escape_attr(state.get('class_name', "<unknown>")),
'name': self._escape_attr(state.get('name', "<unknown>")),
'time': state.get('time_elapsed', -1),
'type': self._escape_attr(state.get('fail_class', "<unknown>")),
'traceback': self._escape_cdata(state.get('traceback', "<unknown>")),
'systemout': self._escape_cdata(state.get('text_output', "<unknown>")),
'reason': self._escape_attr(str(state.get('fail_reason', "<unknown>")))}
self.testcases.append(tc.format(**values))
class xUnitTestResult(TestResult):
"""
xUnit Test Result class.
"""
command_line_arg_name = '--xunit'
def __init__(self, job, force_xunit_file=None):
"""
Creates an instance of xUnitTestResult.
:param job: an instance of :class:`avocado.core.job.Job`.
:param force_xunit_file: Override the output file defined in job.args
"""
TestResult.__init__(self, job)
if force_xunit_file:
self.output = force_xunit_file
else:
self.output = getattr(self.args, 'xunit_output', '-')
self.log = logging.getLogger("avocado.app")
self.xml = XmlResult()
def start_tests(self):
"""
Record a start tests event.
"""
TestResult.start_tests(self)
self.xml.start_testsuite(datetime.datetime.now())
def start_test(self, test):
"""
Record a start test event.
"""
TestResult.start_test(self, test)
def end_test(self, state):
"""
Record an end test event, accord to the given test status.
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
TestResult.end_test(self, state)
status = state.get('status', "ERROR")
if status in ('PASS', 'WARN'):
self.xml.add_success(state)
elif status == 'SKIP':
self.xml.add_skip(state)
elif status == 'FAIL':
self.xml.add_failure(state)
else: # ERROR, INTERRUPTED, ...
self.xml.add_error(state)
def end_tests(self):
"""
Record an end tests event.
"""
TestResult.end_tests(self)
values = {'tests': self.tests_total,
'errors': self.errors + self.interrupted,
'failures': self.failed,
'skip': self.skipped,
'total_time': self.total_time}
self.xml.end_testsuite(**values)
contents = self.xml.get_contents()
if self.output == '-':
self.log.debug(contents)
else:
with open(self.output, 'w') as xunit_output:
xunit_output.write(contents)
......@@ -19,7 +19,7 @@ import logging
import sys
from avocado.core import exit_codes
from avocado.core.html import HTMLTestResult
from avocado.core.html import HTMLResult
from avocado.core.result import register_test_result_class
from avocado.core.plugin_interfaces import CLI
......@@ -64,4 +64,4 @@ class HTML(CLI):
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
if 'html_output' in args and args.html_output is not None:
register_test_result_class(args, HTMLTestResult)
register_test_result_class(args, HTMLResult)
......@@ -19,7 +19,7 @@ import sqlite3
import datetime
from avocado.core.plugin_interfaces import CLI
from avocado.core.result import TestResult
from avocado.core.result import Result
from avocado.core.result import register_test_result_class
JOURNAL_FILENAME = ".journal.sqlite"
......@@ -33,7 +33,7 @@ SCHEMA = {'job_info': 'CREATE TABLE job_info (unique_id TEXT UNIQUE)',
"flushed BOOLEAN DEFAULT 0)")}
class TestResultJournal(TestResult):
class ResultJournal(Result):
"""
Test Result Journal class.
......@@ -43,15 +43,13 @@ class TestResultJournal(TestResult):
feedback to users from a central place.
"""
command_line_arg_name = '--journal'
def __init__(self, job=None):
"""
Creates an instance of TestResultJournal.
Creates an instance of ResultJournal.
:param job: an instance of :class:`avocado.core.job.Job`.
"""
TestResult.__init__(self, job)
Result.__init__(self, job)
self.journal_initialized = False
def _init_journal(self, logdir):
......@@ -99,12 +97,12 @@ class TestResultJournal(TestResult):
def start_test(self, state):
self.lazy_init_journal(state)
TestResult.start_test(self, state)
Result.start_test(self, state)
self._record_status(state, "STARTED")
def end_test(self, state):
self.lazy_init_journal(state)
TestResult.end_test(self, state)
Result.end_test(self, state)
self._record_status(state, "ENDED")
def end_tests(self):
......@@ -134,4 +132,4 @@ class Journal(CLI):
def run(self, args):
if 'journal' in args and args.journal is True:
register_test_result_class(args, TestResultJournal)
register_test_result_class(args, ResultJournal)
......@@ -10,19 +10,72 @@
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2014
# Author: Ruda Moura <rmoura@redhat.com>
# Authors: Ruda Moura <rmoura@redhat.com>
# Cleber Rosa <crosa@redhat.com>
"""
JSON output module.
"""
from avocado.core.jsonresult import JSONTestResult
from avocado.core.plugin_interfaces import CLI
from avocado.core.result import register_test_result_class
import json
import logging
import os
from avocado.core.parser import FileOrStdoutAction
from avocado.core.plugin_interfaces import CLI, Result
UNKNOWN = '<unknown>'
class JSONResult(Result):
def _render(self, result):
tests = []
for test in result.tests:
tests.append({'test': str(test.get('name', UNKNOWN)),
'url': str(test.get('name', UNKNOWN)),
'start': test.get('time_start', -1),
'end': test.get('time_end', -1),
'time': test.get('time_elapsed', -1),
'status': test.get('status', {}),
'whiteboard': test.get('whiteboard', UNKNOWN),
'logdir': test.get('logdir', UNKNOWN),
'logfile': test.get('logfile', UNKNOWN),
'fail_reason': str(test.get('fail_reason', UNKNOWN))})
content = {'job_id': result.job_unique_id,
'debuglog': result.logfile,
'tests': tests,
'total': result.tests_total,
'pass': result.passed,
'errors': result.errors,
'failures': result.failed,
'skip': result.skipped,
'time': result.tests_total_time}
return json.dumps(content)
def render(self, result, job):
if not (hasattr(job.args, 'json_job_result') or
hasattr(job.args, 'json_output')):
return
content = self._render(result)
if getattr(job.args, 'json_job_result', 'off') == 'on':
json_path = os.path.join(job.logdir, 'results.json')
with open(json_path, 'w') as json_file:
json_file.write(content)
json_path = getattr(job.args, 'json_output', 'None')
if json_path is not None:
if json_path == '-':
log = logging.getLogger("avocado.app")
log.debug(content)
else:
with open(json_path, 'w') as json_file:
json_file.write(content)
class JSON(CLI):
class JSONCLI(CLI):
"""
JSON output
......@@ -42,6 +95,11 @@ class JSON(CLI):
help='Enable JSON result format and write it to FILE. '
"Use '-' to redirect to the standard output.")
run_subcommand_parser.output.add_argument(
'--json-job-result', dest='json_job_result',
choices=('on', 'off'), default='on',
help=('Enables default JSON result in the job results directory. '
'File will be named "results.json".'))
def run(self, args):
if 'json_output' in args and args.json_output is not None:
register_test_result_class(args, JSONTestResult)
pass
......@@ -21,7 +21,7 @@ import sys
from avocado.core import exit_codes
from avocado.core import remoter
from avocado.core.plugin_interfaces import CLI
from avocado.core.remote import RemoteTestResult
from avocado.core.remote import RemoteResult
from avocado.core.remote import RemoteTestRunner
from avocado.core.result import register_test_result_class
......@@ -106,6 +106,6 @@ class Remote(CLI):
def run(self, args):
if self._check_required_args(args, 'remote_hostname',
('remote_hostname',)):
register_test_result_class(args, RemoteTestResult)
register_test_result_class(args, RemoteResult)
args.test_runner = RemoteTestRunner
setattr(args, 'stdout_claimed_by', '--remote-hostname')
......@@ -25,6 +25,7 @@ from avocado.core import job
from avocado.core import loader
from avocado.core import multiplexer
from avocado.core.plugin_interfaces import CLICmd
from avocado.core.dispatcher import ResultDispatcher
from avocado.core.settings import settings
from avocado.utils.data_structures import time_to_seconds
......@@ -186,4 +187,13 @@ class Run(CLICmd):
log.error(e.message)
sys.exit(exit_codes.AVOCADO_FAIL)
job_instance = job.Job(args)
return job_instance.run()
job_run = job_instance.run()
result_dispatcher = ResultDispatcher()
if result_dispatcher.extensions:
# At this point job_instance doesn't have a single results attribute
# which is the end goal. For now, we pick any of the plugin classes
# added to the result proxy.
if len(job_instance.result_proxy.output_plugins) > 0:
result = job_instance.result_proxy.output_plugins[0]
result_dispatcher.map_method('render', result, job_instance)
return job_run
......@@ -19,10 +19,10 @@ import logging
from avocado.core.parser import FileOrStdoutAction
from avocado.core.plugin_interfaces import CLI
from avocado.core.result import register_test_result_class, TestResult
from avocado.core.result import register_test_result_class, Result
class TAPResult(TestResult):
class TAPResult(Result):
"""
TAP output class
"""
......
......@@ -21,7 +21,7 @@ import sys
from avocado.core import exit_codes
from avocado.core import virt
from avocado.core.plugin_interfaces import CLI
from avocado.core.remote import VMTestResult
from avocado.core.remote import VMResult
from avocado.core.remote import VMTestRunner
from avocado.core.result import register_test_result_class
......@@ -109,6 +109,6 @@ class VM(CLI):
def run(self, args):
if self._check_required_args(args, 'vm_domain', ('vm_domain',)):
register_test_result_class(args, VMTestResult)
register_test_result_class(args, VMResult)
args.test_runner = VMTestRunner
setattr(args, 'stdout_claimed_by', '--vm-domain')
......@@ -10,17 +10,109 @@
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2014
# Author: Ruda Moura <rmoura@redhat.com>
# Authors: Ruda Moura <rmoura@redhat.com>
# Cleber Rosa <crosa@redhat.com>
"""xUnit module."""
from avocado.core.plugin_interfaces import CLI
from avocado.core.result import register_test_result_class
from avocado.core.xunit import xUnitTestResult
import datetime
import logging
import os
import string
from xml.dom.minidom import Document, Element
from avocado.core.parser import FileOrStdoutAction
from avocado.core.plugin_interfaces import CLI, Result
class XUnitResult(Result):
UNKNOWN = '<unknown>'
PRINTABLE = string.ascii_letters + string.digits + string.punctuation + '\n\r '
def _escape_attr(self, attrib):
attrib = ''.join(_ if _ in self.PRINTABLE else "\\x%02x" % ord(_)
for _ in str(attrib))
return attrib
def _escape_cdata(self, cdata):
cdata = ''.join(_ if _ in self.PRINTABLE else "\\x%02x" % ord(_)
for _ in str(cdata))
return cdata.replace(']]>', ']]>]]&gt;<![CDATA[')
def _get_attr(self, container, attrib):
return self._escape_attr(container.get(attrib, self.UNKNOWN))
def _create_testcase_element(self, document, state):
testcase = document.createElement('testcase')
testcase.setAttribute('classname', self._get_attr(state, 'class_name'))
testcase.setAttribute('name', self._get_attr(state, 'name'))
testcase.setAttribute('time', self._get_attr(state, 'time_elapsed'))
return testcase
def _create_failure_or_error(self, document, test, element_type):
element = Element(element_type)
element.setAttribute('type', self._get_attr(test, 'fail_class'))
element.setAttribute('message', self._get_attr(test, 'fail_reason'))
traceback_content = self._escape_cdata(test.get('traceback', self.UNKNOWN))
traceback = document.createCDATASection(traceback_content)
element.appendChild(traceback)
system_out = Element('system-out')
system_out_cdata_content = self._escape_cdata(test.get('text_output', self.UNKNOWN))
system_out_cdata = document.createCDATASection(system_out_cdata_content)
system_out.appendChild(system_out_cdata)
element.appendChild(system_out)
return element
class XUnit(CLI):
def _render(self, result):
document = Document()
testsuite = document.createElement('testsuite')
testsuite.setAttribute('name', 'avocado')
testsuite.setAttribute('tests', self._escape_attr(result.tests_total))
testsuite.setAttribute('errors', self._escape_attr(result.errors + result.interrupted))
testsuite.setAttribute('failures', self._escape_attr(result.failed))
testsuite.setAttribute('skipped', self._escape_attr(result.skipped))
testsuite.setAttribute('time', self._escape_attr(result.tests_total_time))
testsuite.setAttribute('timestamp', self._escape_attr(datetime.datetime.now()))
document.appendChild(testsuite)
for test in result.tests:
testcase = self._create_testcase_element(document, test)
status = test.get('status', 'ERROR')
if status in ('PASS', 'WARN'):
pass
elif status == 'SKIP':
testcase.appendChild(Element('skipped'))
elif status == 'FAIL':
element = self._create_failure_or_error(document, test, 'failure')
testcase.appendChild(element)
else:
element = self._create_failure_or_error(document, test, 'error')
testcase.appendChild(element)
testsuite.appendChild(testcase)
return document.toxml(encoding='UTF-8')
def render(self, result, job):
if not (hasattr(job.args, 'xunit_job_result') or
hasattr(job.args, 'xunit_output')):
return
content = self._render(result)
if getattr(job.args, 'xunit_job_result', 'off') == 'on':
xunit_path = os.path.join(job.logdir, 'results.xml')
with open(xunit_path, 'w') as xunit_file:
xunit_file.write(content)
xunit_path = getattr(job.args, 'xunit_output', 'None')
if xunit_path is not None:
if xunit_path == '-':
log = logging.getLogger("avocado.app")
log.debug(content)
else:
with open(xunit_path, 'w') as xunit_file:
xunit_file.write(content)
class XUnitCLI(CLI):
"""
xUnit output
......@@ -41,6 +133,11 @@ class XUnit(CLI):
help=('Enable xUnit result format and write it to FILE. '
"Use '-' to redirect to the standard output."))
run_subcommand_parser.output.add_argument(
'--xunit-job-result', dest='xunit_job_result',
choices=('on', 'off'), default='on',
help=('Enables default xUnit result in the job results directory. '
'File will be named "results.xml".'))
def run(self, args):
if 'xunit_output' in args and args.xunit_output is not None:
register_test_result_class(args, xUnitTestResult)
pass
......@@ -269,8 +269,16 @@ If you are looking to implement a new machine or human readable output
format, you can refer to :mod:`avocado.core.plugins.xunit` and use it as a
starting point.
In a nutshell, you have to implement a class that inherits from
:class:`avocado.core.result.TestResult` and implements all public methods,
that perform actions (write to a file/stream) for each test states. You can
take a look at :doc:`Plugins` for more information on how to write a plugin
that will activate and execute the new result format.
If your result is something that is produced at once, based on the
complete job outcome, you should create a new class that inherits from
:class:`avocado.core.plugin_interfaces.Result` and implements the
:meth:`avocado.core.plugin_interfaces.Result.render` method.
But, if your result implementation is something that outputs
information live before/after each test, have to implement the
old-style interface. Create a class that inherits from
:class:`avocado.core.result.Result` and implements all public methods,
that perform actions (write to a file/stream) for each test states.
You can take a look at :doc:`Plugins` for more information on how to
write a plugin that will activate and execute the new result format.
......@@ -6,8 +6,9 @@ import tempfile
import shutil
from avocado import Test
from avocado.core import jsonresult
from avocado.core import job
from avocado.core.result import Result
from avocado.plugins import jsonresult
class FakeJob(object):
......@@ -28,10 +29,11 @@ class JSONResultTest(unittest.TestCase):
self.tmpfile = tempfile.mkstemp()
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
args = argparse.Namespace(json_output=self.tmpfile[1])
self.test_result = jsonresult.JSONTestResult(FakeJob(args))
self.job = job.Job(args)
self.test_result = Result(FakeJob(args))
self.test_result.filename = self.tmpfile[1]
self.test_result.start_tests()
self.test1 = SimpleTest(job=job.Job(), base_logdir=self.tmpdir)
self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir)
self.test1.status = 'PASS'
self.test1.time_elapsed = 1.23
......@@ -44,8 +46,9 @@ class JSONResultTest(unittest.TestCase):
self.test_result.start_test(self.test1)
self.test_result.end_test(self.test1.get_state())
self.test_result.end_tests()
self.assertTrue(self.test_result.json)
with open(self.test_result.filename) as fp:
json_result = jsonresult.JSONResult()
json_result.render(self.test_result, self.job)
with open(self.job.args.json_output) as fp:
j = fp.read()
obj = json.loads(j)
self.assertTrue(obj)
......@@ -78,7 +81,9 @@ class JSONResultTest(unittest.TestCase):
run_fake_status({"status": ""})
# Postprocess
self.test_result.end_tests()
res = json.loads(self.test_result.json)
json_result = jsonresult.JSONResult()
json_result.render(self.test_result, self.job)
res = json.loads(open(self.job.args.json_output).read())
check_item("[pass]", res["pass"], 2)
check_item("[errors]", res["errors"], 4)
check_item("[failures]", res["failures"], 1)
......@@ -94,7 +99,9 @@ class JSONResultTest(unittest.TestCase):
self.test_result.start_test(self.test1)
self.test_result.check_test(self.test1.get_state())
self.test_result.end_tests()
res = json.loads(self.test_result.json)
json_result = jsonresult.JSONResult()
json_result.render(self.test_result, self.job)
res = json.loads(open(self.job.args.json_output).read())
check_item("[total]", res["total"], 1)
check_item("[skip]", res["skip"], 0)
check_item("[pass]", res["pass"], 1)
......
......@@ -182,7 +182,7 @@ class RemoteTestRunnerSetup(unittest.TestCase):
flexmock_teardown()
def test_setup(self):
""" Tests RemoteTestResult.test_setup() """
""" Tests RemoteResult.test_setup() """
self.runner.setup()
flexmock_teardown()
......
......@@ -8,8 +8,9 @@ from StringIO import StringIO
from xml.dom import minidom
from avocado import Test
from avocado.core import xunit
from avocado.core import job
from avocado.core.result import Result
from avocado.plugins import xunit
class ParseXMLError(Exception):
......@@ -35,9 +36,10 @@ class xUnitSucceedTest(unittest.TestCase):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
args = argparse.Namespace()
args.xunit_output = self.tmpfile[1]
self.test_result = xunit.xUnitTestResult(FakeJob(args))
self.job = job.Job(args)
self.test_result = Result(FakeJob(args))
self.test_result.start_tests()
self.test1 = SimpleTest(job=job.Job(), base_logdir=self.tmpdir)
self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir)
self.test1.status = 'PASS'
self.test1.time_elapsed = 1.23
unittests_path = os.path.dirname(os.path.abspath(__file__))
......@@ -52,8 +54,9 @@ class xUnitSucceedTest(unittest.TestCase):
self.test_result.start_test(self.test1)
self.test_result.end_test(self.test1.get_state())
self.test_result.end_tests()
self.assertTrue(self.test_result.xml)
with open(self.test_result.output) as fp:
xunit_result = xunit.XUnitResult()
xunit_result.render(self.test_result, self.job)
with open(self.job.args.xunit_output) as fp:
xml = fp.read()
try:
dom = minidom.parseString(xml)
......
......@@ -120,8 +120,8 @@ if __name__ == '__main__':
'envkeep = avocado.plugins.envkeep:EnvKeep',
'gdb = avocado.plugins.gdb:GDB',
'wrapper = avocado.plugins.wrapper:Wrapper',
'xunit = avocado.plugins.xunit:XUnit',
'json = avocado.plugins.json:JSON',
'xunit = avocado.plugins.xunit:XUnitCLI',
'json = avocado.plugins.jsonresult:JSONCLI',
'journal = avocado.plugins.journal:Journal',
'html = avocado.plugins.html:HTML',
'remote = avocado.plugins.remote:Remote',
......@@ -143,6 +143,10 @@ if __name__ == '__main__':
'avocado.plugins.job.prepost': [
'jobscripts = avocado.plugins.jobscripts:JobScripts',
],
'avocado.plugins.result': [
'xunit = avocado.plugins.xunit:XUnitResult',
'json = avocado.plugins.jsonresult:JSONResult',
],
},
zip_safe=False,
test_suite='selftests',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册