diff --git a/avocado/job.py b/avocado/job.py
index e8ea33d29c84818b933443ac6cd542cb4ee38459..b1ad82b7a3b58948b3ba8483e9dfae1df97beb36 100644
--- a/avocado/job.py
+++ b/avocado/job.py
@@ -17,6 +17,7 @@
Module that describes a sequence of automated test operations.
"""
+import argparse
import imp
import logging
import multiprocessing
@@ -35,6 +36,8 @@ from avocado.utils import archive
from avocado import multiplex_config
from avocado import test
from avocado import result
+from avocado.plugins import xunit
+from avocado.plugins import jsonresult
_NEW_ISSUE_LINK = 'https://github.com/avocado-framework/avocado/issues/new'
@@ -178,27 +181,76 @@ class Job(object):
self.test_dir = data_dir.get_test_dir()
self.test_index = 1
self.status = "RUNNING"
+ self.result_proxy = result.TestResultProxy()
self.output_manager = output.OutputManager()
- def _make_test_runner(self, test_result):
+ def _make_test_runner(self):
if hasattr(self.args, 'test_runner'):
test_runner_class = self.args.test_runner
else:
test_runner_class = TestRunner
- test_runner = test_runner_class(job=self,
- test_result=test_result)
- return test_runner
- def _make_test_result(self, urls):
- if hasattr(self.args, 'test_result'):
- test_result_class = self.args.test_result
- else:
- test_result_class = result.HumanTestResult
- if self.args is not None:
- self.args.test_result_total = len(urls)
- test_result = test_result_class(self.output_manager, self.args)
- return test_result
+ self.test_runner = test_runner_class(job=self,
+ test_result=self.result_proxy)
+
+ def _set_output_plugins(self):
+ plugin_using_stdout = None
+ e_msg = ("Avocado could not set %s and %s both to output to stdout. ")
+ e_msg_2 = ("Please set the output flag of one of them to a file "
+ "to avoid conflicts.")
+ for key in self.args.__dict__:
+ if key.endswith('_result'):
+ result_class = getattr(self.args, key)
+ if issubclass(result_class, result.TestResult):
+ result_plugin = result_class(self.output_manager,
+ self.args)
+ if result_plugin.output == '-':
+ if plugin_using_stdout is not None:
+ e_msg %= (plugin_using_stdout.output_option,
+ result_plugin.output_option)
+ self.output_manager.log_fail_header(e_msg)
+ self.output_manager.log_fail_header(e_msg_2)
+ sys.exit(error_codes.numeric_status['AVOCADO_JOB_FAIL'])
+ else:
+ plugin_using_stdout = result_plugin
+ self.result_proxy.add_output_plugin(result_plugin)
+
+ def _make_test_result(self):
+ """
+ Set up output plugins.
+
+ The basic idea behind the output plugins is:
+
+ * If there are any active output plugins, use them
+ * Always add Xunit and JSON plugins outputting to files inside the
+ results dir
+ * If at the end we only have 2 output plugins (Xunit and JSON), we can
+ add the human output plugin.
+ """
+ if self.args:
+ # If there are any active output plugins, let's use them
+ self._set_output_plugins()
+
+ # Setup the xunit plugin to output to the debug directory
+ xunit_file = os.path.join(self.debugdir, 'results.xml')
+ args = argparse.Namespace()
+ args.xunit_output = xunit_file
+ xunit_plugin = xunit.xUnitTestResult(self.output_manager, args)
+ self.result_proxy.add_output_plugin(xunit_plugin)
+
+ # Setup the json plugin to output to the debug directory
+ json_file = os.path.join(self.debugdir, 'results.json')
+ args = argparse.Namespace()
+ args.json_output = json_file
+ json_plugin = jsonresult.JSONTestResult(self.output_manager, args)
+ self.result_proxy.add_output_plugin(json_plugin)
+
+ # If there are no active output plugins besides xunit and json,
+ # set up the human output.
+ if len(self.result_proxy.output_plugins) == 2:
+ human_plugin = result.HumanTestResult(self.output_manager, self.args)
+ self.result_proxy.add_output_plugin(human_plugin)
def _run(self, urls=None, multiplex_file=None):
"""
@@ -248,8 +300,11 @@ class Job(object):
for dct in parser.get_dicts():
params_list.append(dct)
- test_result = self._make_test_result(params_list)
- self.test_runner = self._make_test_runner(test_result)
+ if self.args is not None:
+ self.args.test_result_total = len(params_list)
+
+ self._make_test_result()
+ self._make_test_runner()
self.output_manager.start_file_logging(self.debuglog,
self.loglevel)
diff --git a/avocado/plugins/journal.py b/avocado/plugins/journal.py
index 7ae8ff674bf5d7bf4bd6605bb891d93f4afb5b5a..4cce59351af26658571ee57090502df26c141da0 100644
--- a/avocado/plugins/journal.py
+++ b/avocado/plugins/journal.py
@@ -86,6 +86,14 @@ class TestResultJournal(TestResult):
status))
self.journal.commit()
+ def set_output(self):
+ # Journal does not output to stdout
+ self.output = None
+
+ def set_output_option(self):
+ # Journal does not need an output option
+ self.output_option = None
+
def start_test(self, test):
# lazy init because we need the toplevel logdir for the job
if not self.journal_initialized:
@@ -121,4 +129,4 @@ class Journal(plugin.Plugin):
def activate(self, app_args):
if app_args.journal:
- self.parser.set_defaults(test_result=TestResultJournal)
+ self.parser.set_defaults(journal_result=TestResultJournal)
diff --git a/avocado/plugins/jsonresult.py b/avocado/plugins/jsonresult.py
index 654f5ad147a8ae5cd40028c9e4d091b35dd974f1..0e2d581572444425122652e9b3566ca8c1cf223b 100644
--- a/avocado/plugins/jsonresult.py
+++ b/avocado/plugins/jsonresult.py
@@ -28,6 +28,12 @@ class JSONTestResult(TestResult):
JSON Test Result class.
"""
+ def set_output(self):
+ self.output = getattr(self.args, 'json_output', '-')
+
+ def set_output_option(self):
+ self.output_option = '--json'
+
def start_tests(self):
"""
Called once before any tests are executed.
@@ -94,4 +100,4 @@ class JSON(plugin.Plugin):
def activate(self, app_args):
if app_args.json:
- self.parser.set_defaults(test_result=JSONTestResult)
+ self.parser.set_defaults(json_result=JSONTestResult)
diff --git a/avocado/plugins/vm.py b/avocado/plugins/vm.py
index 79886b2cb9bfaaefc1ef7744a94124b43bd5eaf9..aa51b9f3132e97e7bd0de307ad0a5edbed86b831 100644
--- a/avocado/plugins/vm.py
+++ b/avocado/plugins/vm.py
@@ -36,9 +36,15 @@ class Test(object):
"""
def __init__(self, name, status, time):
+ note = "Not supported yet"
+ self.name = name
self.tagged_name = name
self.status = status
self.time_elapsed = time
+ self.fail_class = note
+ self.traceback = note
+ self.text_output = note
+ self.fail_reason = note
class VMTestRunner(TestRunner):
@@ -120,6 +126,7 @@ class VMTestResult(TestResult):
self.vm.remote.send_files(test_path, self.remote_test_dir)
def setup(self):
+ self.urls = self.args.url.split()
if self.args.vm_domain is None:
e_msg = ('Please set Virtual Machine Domain with option '
'--vm-domain.')
@@ -167,6 +174,12 @@ class VMTestResult(TestResult):
if self.args.vm_cleanup is True and self.vm.snapshot is not None:
self.vm.restore_snapshot()
+ def set_output(self):
+ self.output = '-'
+
+ def set_output_option(self):
+ self.output_option = "--vm"
+
def start_tests(self):
"""
Called once before any tests are executed.
@@ -289,5 +302,5 @@ class RunVM(plugin.Plugin):
def activate(self, app_args):
if app_args.vm:
- self.parser.set_defaults(test_result=VMTestResult,
+ self.parser.set_defaults(vm_result=VMTestResult,
test_runner=VMTestRunner)
diff --git a/avocado/plugins/xunit.py b/avocado/plugins/xunit.py
index 4b89c3211a120a83662c4d74a6aab290e4b297b7..7ebe0452ea5c8b18dd06f89e00ed57a3dbe78ce4 100644
--- a/avocado/plugins/xunit.py
+++ b/avocado/plugins/xunit.py
@@ -28,8 +28,9 @@ class XmlResult(object):
Handles the XML details for xUnit output.
"""
- def __init__(self):
+ def __init__(self, output):
self.xml = ['']
+ self.output = output
def _escape_attr(self, attrib):
return quoteattr(attrib)
@@ -37,12 +38,14 @@ class XmlResult(object):
def _escape_cdata(self, cdata):
return cdata.replace(']]>', ']]>]]>
+Machine readable output - json
+------------------------------
+
+`JSON `__ is a widely used data exchange format. The
+json avocado plugin outputs job information, similarly to the xunit output
+plugin::
+
+ $ scripts/avocado --json run "sleeptest failtest synctest"
+ {"tests": [{"test": "sleeptest.1", "url": "sleeptest", "status": "PASS", "time": 1.4282619953155518}, {"test": "failtest.1", "url": "failtest", "status": "FAIL", "time": 0.34017300605773926}, {"test": "synctest.1", "url": "synctest", "status": "PASS", "time": 2.109131097793579}], "errors": 0, "skip": 0, "time": 3.87756609916687, "debuglog": "/home/lmr/avocado/logs/run-2014-06-11-01.35.15/debug.log", "pass": 2, "failures": 1, "total": 3}
+
+Multiple output plugins
+-----------------------
+
+You can enable multiple output plugins at once, as long as only one of them
+uses the standard output. For example, it is fine to use the xunit plugin on
+stdout and the JSON plugin to output to a file::
+
+ $ scripts/avocado --xunit --json --json-output /tmp/result.json run "sleeptest synctest"
+
+
+
+
+
+
+ $ cat /tmp/result.json
+ {"tests": [{"test": "sleeptest.1", "url": "sleeptest", "status": "PASS", "time": 1.345332145690918}, {"test": "synctest.1", "url": "synctest", "status": "PASS", "time": 1.8685932159423828}], "errors": 0, "skip": 0, "time": 3.213925361633301, "debuglog": "/home/lmr/avocado/logs/run-2014-06-11-01.49.35/debug.log", "pass": 2, "failures": 0, "total": 2}
+
+But you won't be able to do the same without the --json-output flag passed to
+the program::
+
+ $ scripts/avocado --xunit --json run "sleeptest synctest"
+ Avocado could not set --json and --xunit both to output to stdout.
+ Please set the output flag of one of them to a file to avoid conflicts.
+
+That's basically the only rule you need to follow.
+
Implementing other output formats
---------------------------------
diff --git a/selftests/all/functional/avocado/output_tests.py b/selftests/all/functional/avocado/output_tests.py
index 45b762a06105c834251887c4a561cce7ad7431ce..4bbcab043ed3130f1b8c4bed8c87e497563a56aa 100644
--- a/selftests/all/functional/avocado/output_tests.py
+++ b/selftests/all/functional/avocado/output_tests.py
@@ -14,9 +14,12 @@
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues
+import json
+import tempfile
import unittest
import os
import sys
+from xml.dom import minidom
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
@@ -44,5 +47,128 @@ class OutputTest(unittest.TestCase):
"doublefree output:\n%s" % output)
+class OutputPluginTest(unittest.TestCase):
+
+ def check_output_files(self, debug_log):
+ base_dir = os.path.dirname(debug_log)
+ json_output = os.path.join(base_dir, 'results.json')
+ self.assertTrue(os.path.isfile(json_output))
+ with open(json_output, 'r') as fp:
+ json.load(fp)
+ xunit_output = os.path.join(base_dir, 'results.xml')
+ self.assertTrue(os.path.isfile(json_output))
+ minidom.parse(xunit_output)
+
+ def test_output_incompatible_setup(self):
+ os.chdir(basedir)
+ cmd_line = './scripts/avocado --xunit --json run sleeptest'
+ result = process.run(cmd_line, ignore_status=True)
+ expected_rc = 2
+ output = result.stdout + result.stderr
+ self.assertEqual(result.exit_status, expected_rc,
+ "Avocado did not return rc %d:\n%s" %
+ (expected_rc, result))
+ error_excerpt = "Avocado could not set --json and --xunit both to output to stdout."
+ self.assertIn(error_excerpt, output,
+ "Missing excepted error message from output:\n%s" % output)
+
+ def test_output_incompatible_setup_2(self):
+ os.chdir(basedir)
+ cmd_line = './scripts/avocado --vm --json run sleeptest'
+ result = process.run(cmd_line, ignore_status=True)
+ expected_rc = 2
+ output = result.stdout + result.stderr
+ self.assertEqual(result.exit_status, expected_rc,
+ "Avocado did not return rc %d:\n%s" %
+ (expected_rc, result))
+ error_excerpt = "Avocado could not set --json and --vm both to output to stdout."
+ self.assertIn(error_excerpt, output,
+ "Missing excepted error message from output:\n%s" % output)
+
+ def test_output_compatible_setup(self):
+ tmpfile = tempfile.mktemp()
+ os.chdir(basedir)
+ cmd_line = './scripts/avocado --journal --xunit --xunit-output %s --json run sleeptest' % tmpfile
+ result = process.run(cmd_line, ignore_status=True)
+ output = result.stdout + result.stderr
+ expected_rc = 0
+ try:
+ self.assertEqual(result.exit_status, expected_rc,
+ "Avocado did not return rc %d:\n%s" %
+ (expected_rc, result))
+ # Check if we are producing valid outputs
+ json.loads(output)
+ minidom.parse(tmpfile)
+ finally:
+ try:
+ os.remove(tmpfile)
+ except OSError:
+ pass
+
+ def test_output_compatible_setup_2(self):
+ tmpfile = tempfile.mktemp()
+ os.chdir(basedir)
+ cmd_line = './scripts/avocado --xunit --json --json-output %s run sleeptest' % tmpfile
+ result = process.run(cmd_line, ignore_status=True)
+ output = result.stdout + result.stderr
+ expected_rc = 0
+ try:
+ self.assertEqual(result.exit_status, expected_rc,
+ "Avocado did not return rc %d:\n%s" %
+ (expected_rc, result))
+ # Check if we are producing valid outputs
+ with open(tmpfile, 'r') as fp:
+ json_results = json.load(fp)
+ debug_log = json_results['debuglog']
+ self.check_output_files(debug_log)
+ minidom.parseString(output)
+ finally:
+ try:
+ os.remove(tmpfile)
+ except OSError:
+ pass
+
+ def test_output_compatible_setup_nooutput(self):
+ tmpfile = tempfile.mktemp()
+ tmpfile2 = tempfile.mktemp()
+ os.chdir(basedir)
+ cmd_line = './scripts/avocado --xunit --xunit-output %s --json --json-output %s run sleeptest' % (tmpfile, tmpfile2)
+ result = process.run(cmd_line, ignore_status=True)
+ output = result.stdout + result.stderr
+ expected_rc = 0
+ try:
+ self.assertEqual(result.exit_status, expected_rc,
+ "Avocado did not return rc %d:\n%s" %
+ (expected_rc, result))
+ self.assertEqual(output, "",
+ "Output is not empty as expected:\n%s" % output)
+ # Check if we are producing valid outputs
+ with open(tmpfile2, 'r') as fp:
+ json_results = json.load(fp)
+ debug_log = json_results['debuglog']
+ self.check_output_files(debug_log)
+ minidom.parse(tmpfile)
+ finally:
+ try:
+ os.remove(tmpfile)
+ os.remove(tmpfile2)
+ except OSError:
+ pass
+
+ def test_default_enabled_plugins(self):
+ os.chdir(basedir)
+ cmd_line = './scripts/avocado run sleeptest'
+ result = process.run(cmd_line, ignore_status=True)
+ output = result.stdout + result.stderr
+ expected_rc = 0
+ self.assertEqual(result.exit_status, expected_rc,
+ "Avocado did not return rc %d:\n%s" %
+ (expected_rc, result))
+ output_lines = output.splitlines()
+ first_line = output_lines[0]
+ debug_log = first_line.split()[-1]
+ self.check_output_files(debug_log)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/selftests/all/unit/avocado/xunit_unittest.py b/selftests/all/unit/avocado/xunit_unittest.py
index 1a7fcc300ae2beb63bdc490512e6d24b4882c9a9..75e394c8f486de7eb28d74ea4dd3108c37db8d80 100755
--- a/selftests/all/unit/avocado/xunit_unittest.py
+++ b/selftests/all/unit/avocado/xunit_unittest.py
@@ -14,6 +14,7 @@
# Copyright: Red Hat Inc. 2014
# Author: Ruda Moura
+import argparse
import unittest
import os
import sys
@@ -30,12 +31,17 @@ from avocado.plugins import xunit
from avocado import test
+class ParseXMLError(Exception):
+ pass
+
+
class xUnitSucceedTest(unittest.TestCase):
def setUp(self):
self.tmpfile = mkstemp()
- self.test_result = xunit.xUnitTestResult()
- self.test_result.filename = self.tmpfile[1]
+ args = argparse.Namespace()
+ args.xunit_output = self.tmpfile[1]
+ self.test_result = xunit.xUnitTestResult(args=args)
self.test_result.start_tests()
self.test1 = test.Test()
self.test1.status = 'PASS'
@@ -50,9 +56,12 @@ class xUnitSucceedTest(unittest.TestCase):
self.test_result.end_test(self.test1)
self.test_result.end_tests()
self.assertTrue(self.test_result.xml)
- with open(self.test_result.filename) as fp:
+ with open(self.test_result.output) as fp:
xml = fp.read()
- dom = minidom.parseString(xml)
+ try:
+ dom = minidom.parseString(xml)
+ except Exception, details:
+ raise ParseXMLError("Error parsing XML: '%s'.\nXML Contents:\n%s" % (details, xml))
self.assertTrue(dom)
els = dom.getElementsByTagName('testcase')
self.assertEqual(len(els), 1)