提交 86dcba4f 编写于 作者: R Rudá Moura 提交者: Rudá Moura

Merge pull request #98 from avocado-framework/multiple-output-plugins-2

Multiple output plugins
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
Module that describes a sequence of automated test operations. Module that describes a sequence of automated test operations.
""" """
import argparse
import imp import imp
import logging import logging
import multiprocessing import multiprocessing
...@@ -35,6 +36,8 @@ from avocado.utils import archive ...@@ -35,6 +36,8 @@ from avocado.utils import archive
from avocado import multiplex_config from avocado import multiplex_config
from avocado import test from avocado import test
from avocado import result from avocado import result
from avocado.plugins import xunit
from avocado.plugins import jsonresult
_NEW_ISSUE_LINK = 'https://github.com/avocado-framework/avocado/issues/new' _NEW_ISSUE_LINK = 'https://github.com/avocado-framework/avocado/issues/new'
...@@ -178,27 +181,76 @@ class Job(object): ...@@ -178,27 +181,76 @@ class Job(object):
self.test_dir = data_dir.get_test_dir() self.test_dir = data_dir.get_test_dir()
self.test_index = 1 self.test_index = 1
self.status = "RUNNING" self.status = "RUNNING"
self.result_proxy = result.TestResultProxy()
self.output_manager = output.OutputManager() self.output_manager = output.OutputManager()
def _make_test_runner(self, test_result): def _make_test_runner(self):
if hasattr(self.args, 'test_runner'): if hasattr(self.args, 'test_runner'):
test_runner_class = self.args.test_runner test_runner_class = self.args.test_runner
else: else:
test_runner_class = TestRunner test_runner_class = TestRunner
test_runner = test_runner_class(job=self,
test_result=test_result)
return test_runner
def _make_test_result(self, urls): self.test_runner = test_runner_class(job=self,
if hasattr(self.args, 'test_result'): test_result=self.result_proxy)
test_result_class = self.args.test_result
else: def _set_output_plugins(self):
test_result_class = result.HumanTestResult plugin_using_stdout = None
if self.args is not None: e_msg = ("Avocado could not set %s and %s both to output to stdout. ")
self.args.test_result_total = len(urls) e_msg_2 = ("Please set the output flag of one of them to a file "
test_result = test_result_class(self.output_manager, self.args) "to avoid conflicts.")
return test_result for key in self.args.__dict__:
if key.endswith('_result'):
result_class = getattr(self.args, key)
if issubclass(result_class, result.TestResult):
result_plugin = result_class(self.output_manager,
self.args)
if result_plugin.output == '-':
if plugin_using_stdout is not None:
e_msg %= (plugin_using_stdout.output_option,
result_plugin.output_option)
self.output_manager.log_fail_header(e_msg)
self.output_manager.log_fail_header(e_msg_2)
sys.exit(error_codes.numeric_status['AVOCADO_JOB_FAIL'])
else:
plugin_using_stdout = result_plugin
self.result_proxy.add_output_plugin(result_plugin)
def _make_test_result(self):
"""
Set up output plugins.
The basic idea behind the output plugins is:
* If there are any active output plugins, use them
* Always add Xunit and JSON plugins outputting to files inside the
results dir
* If at the end we only have 2 output plugins (Xunit and JSON), we can
add the human output plugin.
"""
if self.args:
# If there are any active output plugins, let's use them
self._set_output_plugins()
# Setup the xunit plugin to output to the debug directory
xunit_file = os.path.join(self.debugdir, 'results.xml')
args = argparse.Namespace()
args.xunit_output = xunit_file
xunit_plugin = xunit.xUnitTestResult(self.output_manager, args)
self.result_proxy.add_output_plugin(xunit_plugin)
# Setup the json plugin to output to the debug directory
json_file = os.path.join(self.debugdir, 'results.json')
args = argparse.Namespace()
args.json_output = json_file
json_plugin = jsonresult.JSONTestResult(self.output_manager, args)
self.result_proxy.add_output_plugin(json_plugin)
# If there are no active output plugins besides xunit and json,
# set up the human output.
if len(self.result_proxy.output_plugins) == 2:
human_plugin = result.HumanTestResult(self.output_manager, self.args)
self.result_proxy.add_output_plugin(human_plugin)
def _run(self, urls=None, multiplex_file=None): def _run(self, urls=None, multiplex_file=None):
""" """
...@@ -248,8 +300,11 @@ class Job(object): ...@@ -248,8 +300,11 @@ class Job(object):
for dct in parser.get_dicts(): for dct in parser.get_dicts():
params_list.append(dct) params_list.append(dct)
test_result = self._make_test_result(params_list) if self.args is not None:
self.test_runner = self._make_test_runner(test_result) self.args.test_result_total = len(params_list)
self._make_test_result()
self._make_test_runner()
self.output_manager.start_file_logging(self.debuglog, self.output_manager.start_file_logging(self.debuglog,
self.loglevel) self.loglevel)
......
...@@ -86,6 +86,14 @@ class TestResultJournal(TestResult): ...@@ -86,6 +86,14 @@ class TestResultJournal(TestResult):
status)) status))
self.journal.commit() self.journal.commit()
def set_output(self):
# Journal does not output to stdout
self.output = None
def set_output_option(self):
# Journal does not need an output option
self.output_option = None
def start_test(self, test): def start_test(self, test):
# lazy init because we need the toplevel logdir for the job # lazy init because we need the toplevel logdir for the job
if not self.journal_initialized: if not self.journal_initialized:
...@@ -121,4 +129,4 @@ class Journal(plugin.Plugin): ...@@ -121,4 +129,4 @@ class Journal(plugin.Plugin):
def activate(self, app_args): def activate(self, app_args):
if app_args.journal: if app_args.journal:
self.parser.set_defaults(test_result=TestResultJournal) self.parser.set_defaults(journal_result=TestResultJournal)
...@@ -28,6 +28,12 @@ class JSONTestResult(TestResult): ...@@ -28,6 +28,12 @@ class JSONTestResult(TestResult):
JSON Test Result class. JSON Test Result class.
""" """
def set_output(self):
self.output = getattr(self.args, 'json_output', '-')
def set_output_option(self):
self.output_option = '--json'
def start_tests(self): def start_tests(self):
""" """
Called once before any tests are executed. Called once before any tests are executed.
...@@ -94,4 +100,4 @@ class JSON(plugin.Plugin): ...@@ -94,4 +100,4 @@ class JSON(plugin.Plugin):
def activate(self, app_args): def activate(self, app_args):
if app_args.json: if app_args.json:
self.parser.set_defaults(test_result=JSONTestResult) self.parser.set_defaults(json_result=JSONTestResult)
...@@ -36,9 +36,15 @@ class Test(object): ...@@ -36,9 +36,15 @@ class Test(object):
""" """
def __init__(self, name, status, time): def __init__(self, name, status, time):
note = "Not supported yet"
self.name = name
self.tagged_name = name self.tagged_name = name
self.status = status self.status = status
self.time_elapsed = time self.time_elapsed = time
self.fail_class = note
self.traceback = note
self.text_output = note
self.fail_reason = note
class VMTestRunner(TestRunner): class VMTestRunner(TestRunner):
...@@ -120,6 +126,7 @@ class VMTestResult(TestResult): ...@@ -120,6 +126,7 @@ class VMTestResult(TestResult):
self.vm.remote.send_files(test_path, self.remote_test_dir) self.vm.remote.send_files(test_path, self.remote_test_dir)
def setup(self): def setup(self):
self.urls = self.args.url.split()
if self.args.vm_domain is None: if self.args.vm_domain is None:
e_msg = ('Please set Virtual Machine Domain with option ' e_msg = ('Please set Virtual Machine Domain with option '
'--vm-domain.') '--vm-domain.')
...@@ -167,6 +174,12 @@ class VMTestResult(TestResult): ...@@ -167,6 +174,12 @@ class VMTestResult(TestResult):
if self.args.vm_cleanup is True and self.vm.snapshot is not None: if self.args.vm_cleanup is True and self.vm.snapshot is not None:
self.vm.restore_snapshot() self.vm.restore_snapshot()
def set_output(self):
self.output = '-'
def set_output_option(self):
self.output_option = "--vm"
def start_tests(self): def start_tests(self):
""" """
Called once before any tests are executed. Called once before any tests are executed.
...@@ -289,5 +302,5 @@ class RunVM(plugin.Plugin): ...@@ -289,5 +302,5 @@ class RunVM(plugin.Plugin):
def activate(self, app_args): def activate(self, app_args):
if app_args.vm: if app_args.vm:
self.parser.set_defaults(test_result=VMTestResult, self.parser.set_defaults(vm_result=VMTestResult,
test_runner=VMTestRunner) test_runner=VMTestRunner)
...@@ -28,8 +28,9 @@ class XmlResult(object): ...@@ -28,8 +28,9 @@ class XmlResult(object):
Handles the XML details for xUnit output. Handles the XML details for xUnit output.
""" """
def __init__(self): def __init__(self, output):
self.xml = ['<?xml version="1.0" encoding="UTF-8"?>'] self.xml = ['<?xml version="1.0" encoding="UTF-8"?>']
self.output = output
def _escape_attr(self, attrib): def _escape_attr(self, attrib):
return quoteattr(attrib) return quoteattr(attrib)
...@@ -37,12 +38,14 @@ class XmlResult(object): ...@@ -37,12 +38,14 @@ class XmlResult(object):
def _escape_cdata(self, cdata): def _escape_cdata(self, cdata):
return cdata.replace(']]>', ']]>]]&gt;<![CDATA[') return cdata.replace(']]>', ']]>]]&gt;<![CDATA[')
def save(self, filename): def save(self, filename=None):
""" """
Save the XML document to a file or standard output. Save the XML document to a file or standard output.
:param filename: File name to save. Use '-' for standard output. :param filename: File name to save. Use '-' for standard output.
""" """
if filename is None:
filename = self.output
xml = '\n'.join(self.xml) xml = '\n'.join(self.xml)
if filename == '-': if filename == '-':
sys.stdout.write(xml) sys.stdout.write(xml)
...@@ -158,8 +161,13 @@ class xUnitTestResult(TestResult): ...@@ -158,8 +161,13 @@ class xUnitTestResult(TestResult):
:param args: an instance of :class:`argparse.Namespace`. :param args: an instance of :class:`argparse.Namespace`.
""" """
TestResult.__init__(self, stream, args) TestResult.__init__(self, stream, args)
self.filename = getattr(self.args, 'xunit_output', '-') self.xml = XmlResult(self.output)
self.xml = XmlResult()
def set_output(self):
self.output = getattr(self.args, 'xunit_output', '-')
def set_output_option(self):
self.output_option = '--xunit'
def start_tests(self): def start_tests(self):
""" """
...@@ -199,7 +207,7 @@ class xUnitTestResult(TestResult): ...@@ -199,7 +207,7 @@ class xUnitTestResult(TestResult):
'skip': len(self.skipped), 'skip': len(self.skipped),
'total_time': self.total_time} 'total_time': self.total_time}
self.xml.end_testsuite(**values) self.xml.end_testsuite(**values)
self.xml.save(self.filename) self.xml.save()
class XUnit(plugin.Plugin): class XUnit(plugin.Plugin):
...@@ -222,4 +230,4 @@ class XUnit(plugin.Plugin): ...@@ -222,4 +230,4 @@ class XUnit(plugin.Plugin):
def activate(self, app_args): def activate(self, app_args):
if app_args.xunit: if app_args.xunit:
self.parser.set_defaults(test_result=xUnitTestResult) self.parser.set_defaults(xunit_result=xUnitTestResult)
...@@ -21,6 +21,70 @@ used by the test runner. ...@@ -21,6 +21,70 @@ used by the test runner.
""" """
class InvalidOutputPlugin(Exception):
pass
class TestResultProxy(object):
def __init__(self):
self.output_plugins = []
self.console_plugin = None
def __getattr__(self, attr):
for output_plugin in self.output_plugins:
if hasattr(output_plugin, attr):
return getattr(output_plugin, attr)
else:
return None
def add_output_plugin(self, plugin):
if not isinstance(plugin, TestResult):
raise InvalidOutputPlugin("Object %s is not an instance of "
"TestResult" % plugin)
self.output_plugins.append(plugin)
def start_tests(self):
for output_plugin in self.output_plugins:
output_plugin.start_tests()
def end_tests(self):
for output_plugin in self.output_plugins:
output_plugin.end_tests()
def start_test(self, test):
for output_plugin in self.output_plugins:
output_plugin.start_test(test)
def end_test(self, test):
for output_plugin in self.output_plugins:
output_plugin.end_test(test)
def add_pass(self, test):
for output_plugin in self.output_plugins:
output_plugin.add_pass(test)
def add_error(self, test):
for output_plugin in self.output_plugins:
output_plugin.add_error(test)
def add_fail(self, test):
for output_plugin in self.output_plugins:
output_plugin.add_fail(test)
def add_skip(self, test):
for output_plugin in self.output_plugins:
output_plugin.add_skip(test)
def add_warn(self, test):
for output_plugin in self.output_plugins:
output_plugin.add_warn(test)
def check_test(self, test):
for output_plugin in self.output_plugins:
output_plugin.check_test(test)
class TestResult(object): class TestResult(object):
""" """
...@@ -44,6 +108,33 @@ class TestResult(object): ...@@ -44,6 +108,33 @@ class TestResult(object):
self.failed = [] self.failed = []
self.skipped = [] self.skipped = []
self.warned = [] self.warned = []
# The convention is that a dash denotes stdout.
self.output = '-'
self.set_output()
self.output_option = None
self.set_output_option()
def set_output(self):
"""
Set the value of the output attribute.
By default, output is the stream (stdout), denoted by '-'.
Must be implemented by plugins, so avocado knows where the plugin wants
to output to, avoiding clashes among different plugins that want to
use the stream at the same time.
"""
pass
def set_output_option(self):
"""
Set the value of the output option (command line).
Must be implemented by plugins, so avocado prints a friendly
message to users who are using more than one plugin to print results
to stdout.
"""
pass
def start_tests(self): def start_tests(self):
""" """
......
...@@ -62,6 +62,42 @@ simply use:: ...@@ -62,6 +62,42 @@ simply use::
</testcase> </testcase>
<testcase classname="synctest" name="synctest.1" time="1.69329714775"/> <testcase classname="synctest" name="synctest.1" time="1.69329714775"/>
Machine readable output - json
------------------------------
`JSON <http://www.json.org/>`__ is a widely used data exchange format. The
json avocado plugin outputs job information, similarly to the xunit output
plugin::
$ scripts/avocado --json run "sleeptest failtest synctest"
{"tests": [{"test": "sleeptest.1", "url": "sleeptest", "status": "PASS", "time": 1.4282619953155518}, {"test": "failtest.1", "url": "failtest", "status": "FAIL", "time": 0.34017300605773926}, {"test": "synctest.1", "url": "synctest", "status": "PASS", "time": 2.109131097793579}], "errors": 0, "skip": 0, "time": 3.87756609916687, "debuglog": "/home/lmr/avocado/logs/run-2014-06-11-01.35.15/debug.log", "pass": 2, "failures": 1, "total": 3}
Multiple output plugins
-----------------------
You can enable multiple output plugins at once, as long as only one of them
uses the standard output. For example, it is fine to use the xunit plugin on
stdout and the JSON plugin to output to a file::
$ scripts/avocado --xunit --json --json-output /tmp/result.json run "sleeptest synctest"
<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="avocado" tests="2" errors="0" failures="0" skip="0" time="3.21392536163" timestamp="2014-06-11 01:49:35.858187">
<testcase classname="sleeptest" name="sleeptest.1" time="1.34533214569"/>
<testcase classname="synctest" name="synctest.1" time="1.86859321594"/>
</testsuite>
$ cat /tmp/result.json
{"tests": [{"test": "sleeptest.1", "url": "sleeptest", "status": "PASS", "time": 1.345332145690918}, {"test": "synctest.1", "url": "synctest", "status": "PASS", "time": 1.8685932159423828}], "errors": 0, "skip": 0, "time": 3.213925361633301, "debuglog": "/home/lmr/avocado/logs/run-2014-06-11-01.49.35/debug.log", "pass": 2, "failures": 0, "total": 2}
But you won't be able to do the same without the --json-output flag passed to
the program::
$ scripts/avocado --xunit --json run "sleeptest synctest"
Avocado could not set --json and --xunit both to output to stdout.
Please set the output flag of one of them to a file to avoid conflicts.
That's basically the only rule you need to follow.
Implementing other output formats Implementing other output formats
--------------------------------- ---------------------------------
......
...@@ -14,9 +14,12 @@ ...@@ -14,9 +14,12 @@
# Copyright: Red Hat Inc. 2013-2014 # Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com> # Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import json
import tempfile
import unittest import unittest
import os import os
import sys import sys
from xml.dom import minidom
# simple magic for using scripts within a source tree # simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..') basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
...@@ -44,5 +47,128 @@ class OutputTest(unittest.TestCase): ...@@ -44,5 +47,128 @@ class OutputTest(unittest.TestCase):
"doublefree output:\n%s" % output) "doublefree output:\n%s" % output)
class OutputPluginTest(unittest.TestCase):
def check_output_files(self, debug_log):
base_dir = os.path.dirname(debug_log)
json_output = os.path.join(base_dir, 'results.json')
self.assertTrue(os.path.isfile(json_output))
with open(json_output, 'r') as fp:
json.load(fp)
xunit_output = os.path.join(base_dir, 'results.xml')
self.assertTrue(os.path.isfile(json_output))
minidom.parse(xunit_output)
def test_output_incompatible_setup(self):
os.chdir(basedir)
cmd_line = './scripts/avocado --xunit --json run sleeptest'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 2
output = result.stdout + result.stderr
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
error_excerpt = "Avocado could not set --json and --xunit both to output to stdout."
self.assertIn(error_excerpt, output,
"Missing excepted error message from output:\n%s" % output)
def test_output_incompatible_setup_2(self):
os.chdir(basedir)
cmd_line = './scripts/avocado --vm --json run sleeptest'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 2
output = result.stdout + result.stderr
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
error_excerpt = "Avocado could not set --json and --vm both to output to stdout."
self.assertIn(error_excerpt, output,
"Missing excepted error message from output:\n%s" % output)
def test_output_compatible_setup(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = './scripts/avocado --journal --xunit --xunit-output %s --json run sleeptest' % tmpfile
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
try:
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
# Check if we are producing valid outputs
json.loads(output)
minidom.parse(tmpfile)
finally:
try:
os.remove(tmpfile)
except OSError:
pass
def test_output_compatible_setup_2(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = './scripts/avocado --xunit --json --json-output %s run sleeptest' % tmpfile
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
try:
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
# Check if we are producing valid outputs
with open(tmpfile, 'r') as fp:
json_results = json.load(fp)
debug_log = json_results['debuglog']
self.check_output_files(debug_log)
minidom.parseString(output)
finally:
try:
os.remove(tmpfile)
except OSError:
pass
def test_output_compatible_setup_nooutput(self):
tmpfile = tempfile.mktemp()
tmpfile2 = tempfile.mktemp()
os.chdir(basedir)
cmd_line = './scripts/avocado --xunit --xunit-output %s --json --json-output %s run sleeptest' % (tmpfile, tmpfile2)
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
try:
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, "",
"Output is not empty as expected:\n%s" % output)
# Check if we are producing valid outputs
with open(tmpfile2, 'r') as fp:
json_results = json.load(fp)
debug_log = json_results['debuglog']
self.check_output_files(debug_log)
minidom.parse(tmpfile)
finally:
try:
os.remove(tmpfile)
os.remove(tmpfile2)
except OSError:
pass
def test_default_enabled_plugins(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run sleeptest'
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
output_lines = output.splitlines()
first_line = output_lines[0]
debug_log = first_line.split()[-1]
self.check_output_files(debug_log)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
# Copyright: Red Hat Inc. 2014 # Copyright: Red Hat Inc. 2014
# Author: Ruda Moura <rmoura@redhat.com> # Author: Ruda Moura <rmoura@redhat.com>
import argparse
import unittest import unittest
import os import os
import sys import sys
...@@ -30,12 +31,17 @@ from avocado.plugins import xunit ...@@ -30,12 +31,17 @@ from avocado.plugins import xunit
from avocado import test from avocado import test
class ParseXMLError(Exception):
pass
class xUnitSucceedTest(unittest.TestCase): class xUnitSucceedTest(unittest.TestCase):
def setUp(self): def setUp(self):
self.tmpfile = mkstemp() self.tmpfile = mkstemp()
self.test_result = xunit.xUnitTestResult() args = argparse.Namespace()
self.test_result.filename = self.tmpfile[1] args.xunit_output = self.tmpfile[1]
self.test_result = xunit.xUnitTestResult(args=args)
self.test_result.start_tests() self.test_result.start_tests()
self.test1 = test.Test() self.test1 = test.Test()
self.test1.status = 'PASS' self.test1.status = 'PASS'
...@@ -50,9 +56,12 @@ class xUnitSucceedTest(unittest.TestCase): ...@@ -50,9 +56,12 @@ class xUnitSucceedTest(unittest.TestCase):
self.test_result.end_test(self.test1) self.test_result.end_test(self.test1)
self.test_result.end_tests() self.test_result.end_tests()
self.assertTrue(self.test_result.xml) self.assertTrue(self.test_result.xml)
with open(self.test_result.filename) as fp: with open(self.test_result.output) as fp:
xml = fp.read() xml = fp.read()
dom = minidom.parseString(xml) try:
dom = minidom.parseString(xml)
except Exception, details:
raise ParseXMLError("Error parsing XML: '%s'.\nXML Contents:\n%s" % (details, xml))
self.assertTrue(dom) self.assertTrue(dom)
els = dom.getElementsByTagName('testcase') els = dom.getElementsByTagName('testcase')
self.assertEqual(len(els), 1) self.assertEqual(len(els), 1)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册