Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
avocado
提交
86dcba4f
A
avocado
项目概览
openeuler
/
avocado
通知
0
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
A
avocado
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
86dcba4f
编写于
6月 11, 2014
作者:
R
Rudá Moura
提交者:
Rudá Moura
6月 11, 2014
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #98 from avocado-framework/multiple-output-plugins-2
Multiple output plugins
上级
79de5c6d
017e2a93
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
380 addition
and
28 deletion
+380
-28
avocado/job.py
avocado/job.py
+70
-15
avocado/plugins/journal.py
avocado/plugins/journal.py
+9
-1
avocado/plugins/jsonresult.py
avocado/plugins/jsonresult.py
+7
-1
avocado/plugins/vm.py
avocado/plugins/vm.py
+14
-1
avocado/plugins/xunit.py
avocado/plugins/xunit.py
+14
-6
avocado/result.py
avocado/result.py
+91
-0
docs/source/OutputPlugins.rst
docs/source/OutputPlugins.rst
+36
-0
selftests/all/functional/avocado/output_tests.py
selftests/all/functional/avocado/output_tests.py
+126
-0
selftests/all/unit/avocado/xunit_unittest.py
selftests/all/unit/avocado/xunit_unittest.py
+13
-4
未找到文件。
avocado/job.py
浏览文件 @
86dcba4f
...
@@ -17,6 +17,7 @@
...
@@ -17,6 +17,7 @@
Module that describes a sequence of automated test operations.
Module that describes a sequence of automated test operations.
"""
"""
import
argparse
import
imp
import
imp
import
logging
import
logging
import
multiprocessing
import
multiprocessing
...
@@ -35,6 +36,8 @@ from avocado.utils import archive
...
@@ -35,6 +36,8 @@ from avocado.utils import archive
from
avocado
import
multiplex_config
from
avocado
import
multiplex_config
from
avocado
import
test
from
avocado
import
test
from
avocado
import
result
from
avocado
import
result
from
avocado.plugins
import
xunit
from
avocado.plugins
import
jsonresult
_NEW_ISSUE_LINK
=
'https://github.com/avocado-framework/avocado/issues/new'
_NEW_ISSUE_LINK
=
'https://github.com/avocado-framework/avocado/issues/new'
...
@@ -178,27 +181,76 @@ class Job(object):
...
@@ -178,27 +181,76 @@ class Job(object):
self
.
test_dir
=
data_dir
.
get_test_dir
()
self
.
test_dir
=
data_dir
.
get_test_dir
()
self
.
test_index
=
1
self
.
test_index
=
1
self
.
status
=
"RUNNING"
self
.
status
=
"RUNNING"
self
.
result_proxy
=
result
.
TestResultProxy
()
self
.
output_manager
=
output
.
OutputManager
()
self
.
output_manager
=
output
.
OutputManager
()
def
_make_test_runner
(
self
,
test_result
):
def
_make_test_runner
(
self
):
if
hasattr
(
self
.
args
,
'test_runner'
):
if
hasattr
(
self
.
args
,
'test_runner'
):
test_runner_class
=
self
.
args
.
test_runner
test_runner_class
=
self
.
args
.
test_runner
else
:
else
:
test_runner_class
=
TestRunner
test_runner_class
=
TestRunner
test_runner
=
test_runner_class
(
job
=
self
,
test_result
=
test_result
)
return
test_runner
def
_make_test_result
(
self
,
urls
):
self
.
test_runner
=
test_runner_class
(
job
=
self
,
if
hasattr
(
self
.
args
,
'test_result'
):
test_result
=
self
.
result_proxy
)
test_result_class
=
self
.
args
.
test_result
else
:
def
_set_output_plugins
(
self
):
test_result_class
=
result
.
HumanTestResult
plugin_using_stdout
=
None
if
self
.
args
is
not
None
:
e_msg
=
(
"Avocado could not set %s and %s both to output to stdout. "
)
self
.
args
.
test_result_total
=
len
(
urls
)
e_msg_2
=
(
"Please set the output flag of one of them to a file "
test_result
=
test_result_class
(
self
.
output_manager
,
self
.
args
)
"to avoid conflicts."
)
return
test_result
for
key
in
self
.
args
.
__dict__
:
if
key
.
endswith
(
'_result'
):
result_class
=
getattr
(
self
.
args
,
key
)
if
issubclass
(
result_class
,
result
.
TestResult
):
result_plugin
=
result_class
(
self
.
output_manager
,
self
.
args
)
if
result_plugin
.
output
==
'-'
:
if
plugin_using_stdout
is
not
None
:
e_msg
%=
(
plugin_using_stdout
.
output_option
,
result_plugin
.
output_option
)
self
.
output_manager
.
log_fail_header
(
e_msg
)
self
.
output_manager
.
log_fail_header
(
e_msg_2
)
sys
.
exit
(
error_codes
.
numeric_status
[
'AVOCADO_JOB_FAIL'
])
else
:
plugin_using_stdout
=
result_plugin
self
.
result_proxy
.
add_output_plugin
(
result_plugin
)
def
_make_test_result
(
self
):
"""
Set up output plugins.
The basic idea behind the output plugins is:
* If there are any active output plugins, use them
* Always add Xunit and JSON plugins outputting to files inside the
results dir
* If at the end we only have 2 output plugins (Xunit and JSON), we can
add the human output plugin.
"""
if
self
.
args
:
# If there are any active output plugins, let's use them
self
.
_set_output_plugins
()
# Setup the xunit plugin to output to the debug directory
xunit_file
=
os
.
path
.
join
(
self
.
debugdir
,
'results.xml'
)
args
=
argparse
.
Namespace
()
args
.
xunit_output
=
xunit_file
xunit_plugin
=
xunit
.
xUnitTestResult
(
self
.
output_manager
,
args
)
self
.
result_proxy
.
add_output_plugin
(
xunit_plugin
)
# Setup the json plugin to output to the debug directory
json_file
=
os
.
path
.
join
(
self
.
debugdir
,
'results.json'
)
args
=
argparse
.
Namespace
()
args
.
json_output
=
json_file
json_plugin
=
jsonresult
.
JSONTestResult
(
self
.
output_manager
,
args
)
self
.
result_proxy
.
add_output_plugin
(
json_plugin
)
# If there are no active output plugins besides xunit and json,
# set up the human output.
if
len
(
self
.
result_proxy
.
output_plugins
)
==
2
:
human_plugin
=
result
.
HumanTestResult
(
self
.
output_manager
,
self
.
args
)
self
.
result_proxy
.
add_output_plugin
(
human_plugin
)
def
_run
(
self
,
urls
=
None
,
multiplex_file
=
None
):
def
_run
(
self
,
urls
=
None
,
multiplex_file
=
None
):
"""
"""
...
@@ -248,8 +300,11 @@ class Job(object):
...
@@ -248,8 +300,11 @@ class Job(object):
for
dct
in
parser
.
get_dicts
():
for
dct
in
parser
.
get_dicts
():
params_list
.
append
(
dct
)
params_list
.
append
(
dct
)
test_result
=
self
.
_make_test_result
(
params_list
)
if
self
.
args
is
not
None
:
self
.
test_runner
=
self
.
_make_test_runner
(
test_result
)
self
.
args
.
test_result_total
=
len
(
params_list
)
self
.
_make_test_result
()
self
.
_make_test_runner
()
self
.
output_manager
.
start_file_logging
(
self
.
debuglog
,
self
.
output_manager
.
start_file_logging
(
self
.
debuglog
,
self
.
loglevel
)
self
.
loglevel
)
...
...
avocado/plugins/journal.py
浏览文件 @
86dcba4f
...
@@ -86,6 +86,14 @@ class TestResultJournal(TestResult):
...
@@ -86,6 +86,14 @@ class TestResultJournal(TestResult):
status
))
status
))
self
.
journal
.
commit
()
self
.
journal
.
commit
()
def
set_output
(
self
):
# Journal does not output to stdout
self
.
output
=
None
def
set_output_option
(
self
):
# Journal does not need an output option
self
.
output_option
=
None
def
start_test
(
self
,
test
):
def
start_test
(
self
,
test
):
# lazy init because we need the toplevel logdir for the job
# lazy init because we need the toplevel logdir for the job
if
not
self
.
journal_initialized
:
if
not
self
.
journal_initialized
:
...
@@ -121,4 +129,4 @@ class Journal(plugin.Plugin):
...
@@ -121,4 +129,4 @@ class Journal(plugin.Plugin):
def
activate
(
self
,
app_args
):
def
activate
(
self
,
app_args
):
if
app_args
.
journal
:
if
app_args
.
journal
:
self
.
parser
.
set_defaults
(
test
_result
=
TestResultJournal
)
self
.
parser
.
set_defaults
(
journal
_result
=
TestResultJournal
)
avocado/plugins/jsonresult.py
浏览文件 @
86dcba4f
...
@@ -28,6 +28,12 @@ class JSONTestResult(TestResult):
...
@@ -28,6 +28,12 @@ class JSONTestResult(TestResult):
JSON Test Result class.
JSON Test Result class.
"""
"""
def
set_output
(
self
):
self
.
output
=
getattr
(
self
.
args
,
'json_output'
,
'-'
)
def
set_output_option
(
self
):
self
.
output_option
=
'--json'
def
start_tests
(
self
):
def
start_tests
(
self
):
"""
"""
Called once before any tests are executed.
Called once before any tests are executed.
...
@@ -94,4 +100,4 @@ class JSON(plugin.Plugin):
...
@@ -94,4 +100,4 @@ class JSON(plugin.Plugin):
def
activate
(
self
,
app_args
):
def
activate
(
self
,
app_args
):
if
app_args
.
json
:
if
app_args
.
json
:
self
.
parser
.
set_defaults
(
test
_result
=
JSONTestResult
)
self
.
parser
.
set_defaults
(
json
_result
=
JSONTestResult
)
avocado/plugins/vm.py
浏览文件 @
86dcba4f
...
@@ -36,9 +36,15 @@ class Test(object):
...
@@ -36,9 +36,15 @@ class Test(object):
"""
"""
def
__init__
(
self
,
name
,
status
,
time
):
def
__init__
(
self
,
name
,
status
,
time
):
note
=
"Not supported yet"
self
.
name
=
name
self
.
tagged_name
=
name
self
.
tagged_name
=
name
self
.
status
=
status
self
.
status
=
status
self
.
time_elapsed
=
time
self
.
time_elapsed
=
time
self
.
fail_class
=
note
self
.
traceback
=
note
self
.
text_output
=
note
self
.
fail_reason
=
note
class
VMTestRunner
(
TestRunner
):
class
VMTestRunner
(
TestRunner
):
...
@@ -120,6 +126,7 @@ class VMTestResult(TestResult):
...
@@ -120,6 +126,7 @@ class VMTestResult(TestResult):
self
.
vm
.
remote
.
send_files
(
test_path
,
self
.
remote_test_dir
)
self
.
vm
.
remote
.
send_files
(
test_path
,
self
.
remote_test_dir
)
def
setup
(
self
):
def
setup
(
self
):
self
.
urls
=
self
.
args
.
url
.
split
()
if
self
.
args
.
vm_domain
is
None
:
if
self
.
args
.
vm_domain
is
None
:
e_msg
=
(
'Please set Virtual Machine Domain with option '
e_msg
=
(
'Please set Virtual Machine Domain with option '
'--vm-domain.'
)
'--vm-domain.'
)
...
@@ -167,6 +174,12 @@ class VMTestResult(TestResult):
...
@@ -167,6 +174,12 @@ class VMTestResult(TestResult):
if
self
.
args
.
vm_cleanup
is
True
and
self
.
vm
.
snapshot
is
not
None
:
if
self
.
args
.
vm_cleanup
is
True
and
self
.
vm
.
snapshot
is
not
None
:
self
.
vm
.
restore_snapshot
()
self
.
vm
.
restore_snapshot
()
def
set_output
(
self
):
self
.
output
=
'-'
def
set_output_option
(
self
):
self
.
output_option
=
"--vm"
def
start_tests
(
self
):
def
start_tests
(
self
):
"""
"""
Called once before any tests are executed.
Called once before any tests are executed.
...
@@ -289,5 +302,5 @@ class RunVM(plugin.Plugin):
...
@@ -289,5 +302,5 @@ class RunVM(plugin.Plugin):
def
activate
(
self
,
app_args
):
def
activate
(
self
,
app_args
):
if
app_args
.
vm
:
if
app_args
.
vm
:
self
.
parser
.
set_defaults
(
test
_result
=
VMTestResult
,
self
.
parser
.
set_defaults
(
vm
_result
=
VMTestResult
,
test_runner
=
VMTestRunner
)
test_runner
=
VMTestRunner
)
avocado/plugins/xunit.py
浏览文件 @
86dcba4f
...
@@ -28,8 +28,9 @@ class XmlResult(object):
...
@@ -28,8 +28,9 @@ class XmlResult(object):
Handles the XML details for xUnit output.
Handles the XML details for xUnit output.
"""
"""
def
__init__
(
self
):
def
__init__
(
self
,
output
):
self
.
xml
=
[
'<?xml version="1.0" encoding="UTF-8"?>'
]
self
.
xml
=
[
'<?xml version="1.0" encoding="UTF-8"?>'
]
self
.
output
=
output
def
_escape_attr
(
self
,
attrib
):
def
_escape_attr
(
self
,
attrib
):
return
quoteattr
(
attrib
)
return
quoteattr
(
attrib
)
...
@@ -37,12 +38,14 @@ class XmlResult(object):
...
@@ -37,12 +38,14 @@ class XmlResult(object):
def
_escape_cdata
(
self
,
cdata
):
def
_escape_cdata
(
self
,
cdata
):
return
cdata
.
replace
(
']]>'
,
']]>]]><![CDATA['
)
return
cdata
.
replace
(
']]>'
,
']]>]]><![CDATA['
)
def
save
(
self
,
filename
):
def
save
(
self
,
filename
=
None
):
"""
"""
Save the XML document to a file or standard output.
Save the XML document to a file or standard output.
:param filename: File name to save. Use '-' for standard output.
:param filename: File name to save. Use '-' for standard output.
"""
"""
if
filename
is
None
:
filename
=
self
.
output
xml
=
'
\n
'
.
join
(
self
.
xml
)
xml
=
'
\n
'
.
join
(
self
.
xml
)
if
filename
==
'-'
:
if
filename
==
'-'
:
sys
.
stdout
.
write
(
xml
)
sys
.
stdout
.
write
(
xml
)
...
@@ -158,8 +161,13 @@ class xUnitTestResult(TestResult):
...
@@ -158,8 +161,13 @@ class xUnitTestResult(TestResult):
:param args: an instance of :class:`argparse.Namespace`.
:param args: an instance of :class:`argparse.Namespace`.
"""
"""
TestResult
.
__init__
(
self
,
stream
,
args
)
TestResult
.
__init__
(
self
,
stream
,
args
)
self
.
filename
=
getattr
(
self
.
args
,
'xunit_output'
,
'-'
)
self
.
xml
=
XmlResult
(
self
.
output
)
self
.
xml
=
XmlResult
()
def
set_output
(
self
):
self
.
output
=
getattr
(
self
.
args
,
'xunit_output'
,
'-'
)
def
set_output_option
(
self
):
self
.
output_option
=
'--xunit'
def
start_tests
(
self
):
def
start_tests
(
self
):
"""
"""
...
@@ -199,7 +207,7 @@ class xUnitTestResult(TestResult):
...
@@ -199,7 +207,7 @@ class xUnitTestResult(TestResult):
'skip'
:
len
(
self
.
skipped
),
'skip'
:
len
(
self
.
skipped
),
'total_time'
:
self
.
total_time
}
'total_time'
:
self
.
total_time
}
self
.
xml
.
end_testsuite
(
**
values
)
self
.
xml
.
end_testsuite
(
**
values
)
self
.
xml
.
save
(
self
.
filename
)
self
.
xml
.
save
()
class
XUnit
(
plugin
.
Plugin
):
class
XUnit
(
plugin
.
Plugin
):
...
@@ -222,4 +230,4 @@ class XUnit(plugin.Plugin):
...
@@ -222,4 +230,4 @@ class XUnit(plugin.Plugin):
def
activate
(
self
,
app_args
):
def
activate
(
self
,
app_args
):
if
app_args
.
xunit
:
if
app_args
.
xunit
:
self
.
parser
.
set_defaults
(
tes
t_result
=
xUnitTestResult
)
self
.
parser
.
set_defaults
(
xuni
t_result
=
xUnitTestResult
)
avocado/result.py
浏览文件 @
86dcba4f
...
@@ -21,6 +21,70 @@ used by the test runner.
...
@@ -21,6 +21,70 @@ used by the test runner.
"""
"""
class
InvalidOutputPlugin
(
Exception
):
pass
class
TestResultProxy
(
object
):
def
__init__
(
self
):
self
.
output_plugins
=
[]
self
.
console_plugin
=
None
def
__getattr__
(
self
,
attr
):
for
output_plugin
in
self
.
output_plugins
:
if
hasattr
(
output_plugin
,
attr
):
return
getattr
(
output_plugin
,
attr
)
else
:
return
None
def
add_output_plugin
(
self
,
plugin
):
if
not
isinstance
(
plugin
,
TestResult
):
raise
InvalidOutputPlugin
(
"Object %s is not an instance of "
"TestResult"
%
plugin
)
self
.
output_plugins
.
append
(
plugin
)
def
start_tests
(
self
):
for
output_plugin
in
self
.
output_plugins
:
output_plugin
.
start_tests
()
def
end_tests
(
self
):
for
output_plugin
in
self
.
output_plugins
:
output_plugin
.
end_tests
()
def
start_test
(
self
,
test
):
for
output_plugin
in
self
.
output_plugins
:
output_plugin
.
start_test
(
test
)
def
end_test
(
self
,
test
):
for
output_plugin
in
self
.
output_plugins
:
output_plugin
.
end_test
(
test
)
def
add_pass
(
self
,
test
):
for
output_plugin
in
self
.
output_plugins
:
output_plugin
.
add_pass
(
test
)
def
add_error
(
self
,
test
):
for
output_plugin
in
self
.
output_plugins
:
output_plugin
.
add_error
(
test
)
def
add_fail
(
self
,
test
):
for
output_plugin
in
self
.
output_plugins
:
output_plugin
.
add_fail
(
test
)
def
add_skip
(
self
,
test
):
for
output_plugin
in
self
.
output_plugins
:
output_plugin
.
add_skip
(
test
)
def
add_warn
(
self
,
test
):
for
output_plugin
in
self
.
output_plugins
:
output_plugin
.
add_warn
(
test
)
def
check_test
(
self
,
test
):
for
output_plugin
in
self
.
output_plugins
:
output_plugin
.
check_test
(
test
)
class
TestResult
(
object
):
class
TestResult
(
object
):
"""
"""
...
@@ -44,6 +108,33 @@ class TestResult(object):
...
@@ -44,6 +108,33 @@ class TestResult(object):
self
.
failed
=
[]
self
.
failed
=
[]
self
.
skipped
=
[]
self
.
skipped
=
[]
self
.
warned
=
[]
self
.
warned
=
[]
# The convention is that a dash denotes stdout.
self
.
output
=
'-'
self
.
set_output
()
self
.
output_option
=
None
self
.
set_output_option
()
def
set_output
(
self
):
"""
Set the value of the output attribute.
By default, output is the stream (stdout), denoted by '-'.
Must be implemented by plugins, so avocado knows where the plugin wants
to output to, avoiding clashes among different plugins that want to
use the stream at the same time.
"""
pass
def
set_output_option
(
self
):
"""
Set the value of the output option (command line).
Must be implemented by plugins, so avocado prints a friendly
message to users who are using more than one plugin to print results
to stdout.
"""
pass
def
start_tests
(
self
):
def
start_tests
(
self
):
"""
"""
...
...
docs/source/OutputPlugins.rst
浏览文件 @
86dcba4f
...
@@ -62,6 +62,42 @@ simply use::
...
@@ -62,6 +62,42 @@ simply use::
</testcase>
</testcase>
<testcase classname="synctest" name="synctest.1" time="1.69329714775"/>
<testcase classname="synctest" name="synctest.1" time="1.69329714775"/>
Machine readable output - json
------------------------------
`JSON <http://www.json.org/>`__ is a widely used data exchange format. The
json avocado plugin outputs job information, similarly to the xunit output
plugin::
$ scripts/avocado --json run "sleeptest failtest synctest"
{"tests": [{"test": "sleeptest.1", "url": "sleeptest", "status": "PASS", "time": 1.4282619953155518}, {"test": "failtest.1", "url": "failtest", "status": "FAIL", "time": 0.34017300605773926}, {"test": "synctest.1", "url": "synctest", "status": "PASS", "time": 2.109131097793579}], "errors": 0, "skip": 0, "time": 3.87756609916687, "debuglog": "/home/lmr/avocado/logs/run-2014-06-11-01.35.15/debug.log", "pass": 2, "failures": 1, "total": 3}
Multiple output plugins
-----------------------
You can enable multiple output plugins at once, as long as only one of them
uses the standard output. For example, it is fine to use the xunit plugin on
stdout and the JSON plugin to output to a file::
$ scripts/avocado --xunit --json --json-output /tmp/result.json run "sleeptest synctest"
<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="avocado" tests="2" errors="0" failures="0" skip="0" time="3.21392536163" timestamp="2014-06-11 01:49:35.858187">
<testcase classname="sleeptest" name="sleeptest.1" time="1.34533214569"/>
<testcase classname="synctest" name="synctest.1" time="1.86859321594"/>
</testsuite>
$ cat /tmp/result.json
{"tests": [{"test": "sleeptest.1", "url": "sleeptest", "status": "PASS", "time": 1.345332145690918}, {"test": "synctest.1", "url": "synctest", "status": "PASS", "time": 1.8685932159423828}], "errors": 0, "skip": 0, "time": 3.213925361633301, "debuglog": "/home/lmr/avocado/logs/run-2014-06-11-01.49.35/debug.log", "pass": 2, "failures": 0, "total": 2}
But you won't be able to do the same without the --json-output flag passed to
the program::
$ scripts/avocado --xunit --json run "sleeptest synctest"
Avocado could not set --json and --xunit both to output to stdout.
Please set the output flag of one of them to a file to avoid conflicts.
That's basically the only rule you need to follow.
Implementing other output formats
Implementing other output formats
---------------------------------
---------------------------------
...
...
selftests/all/functional/avocado/output_tests.py
浏览文件 @
86dcba4f
...
@@ -14,9 +14,12 @@
...
@@ -14,9 +14,12 @@
# Copyright: Red Hat Inc. 2013-2014
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import
json
import
tempfile
import
unittest
import
unittest
import
os
import
os
import
sys
import
sys
from
xml.dom
import
minidom
# simple magic for using scripts within a source tree
# simple magic for using scripts within a source tree
basedir
=
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
)),
'..'
,
'..'
,
'..'
,
'..'
)
basedir
=
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
)),
'..'
,
'..'
,
'..'
,
'..'
)
...
@@ -44,5 +47,128 @@ class OutputTest(unittest.TestCase):
...
@@ -44,5 +47,128 @@ class OutputTest(unittest.TestCase):
"doublefree output:
\n
%s"
%
output
)
"doublefree output:
\n
%s"
%
output
)
class
OutputPluginTest
(
unittest
.
TestCase
):
def
check_output_files
(
self
,
debug_log
):
base_dir
=
os
.
path
.
dirname
(
debug_log
)
json_output
=
os
.
path
.
join
(
base_dir
,
'results.json'
)
self
.
assertTrue
(
os
.
path
.
isfile
(
json_output
))
with
open
(
json_output
,
'r'
)
as
fp
:
json
.
load
(
fp
)
xunit_output
=
os
.
path
.
join
(
base_dir
,
'results.xml'
)
self
.
assertTrue
(
os
.
path
.
isfile
(
json_output
))
minidom
.
parse
(
xunit_output
)
def
test_output_incompatible_setup
(
self
):
os
.
chdir
(
basedir
)
cmd_line
=
'./scripts/avocado --xunit --json run sleeptest'
result
=
process
.
run
(
cmd_line
,
ignore_status
=
True
)
expected_rc
=
2
output
=
result
.
stdout
+
result
.
stderr
self
.
assertEqual
(
result
.
exit_status
,
expected_rc
,
"Avocado did not return rc %d:
\n
%s"
%
(
expected_rc
,
result
))
error_excerpt
=
"Avocado could not set --json and --xunit both to output to stdout."
self
.
assertIn
(
error_excerpt
,
output
,
"Missing excepted error message from output:
\n
%s"
%
output
)
def
test_output_incompatible_setup_2
(
self
):
os
.
chdir
(
basedir
)
cmd_line
=
'./scripts/avocado --vm --json run sleeptest'
result
=
process
.
run
(
cmd_line
,
ignore_status
=
True
)
expected_rc
=
2
output
=
result
.
stdout
+
result
.
stderr
self
.
assertEqual
(
result
.
exit_status
,
expected_rc
,
"Avocado did not return rc %d:
\n
%s"
%
(
expected_rc
,
result
))
error_excerpt
=
"Avocado could not set --json and --vm both to output to stdout."
self
.
assertIn
(
error_excerpt
,
output
,
"Missing excepted error message from output:
\n
%s"
%
output
)
def
test_output_compatible_setup
(
self
):
tmpfile
=
tempfile
.
mktemp
()
os
.
chdir
(
basedir
)
cmd_line
=
'./scripts/avocado --journal --xunit --xunit-output %s --json run sleeptest'
%
tmpfile
result
=
process
.
run
(
cmd_line
,
ignore_status
=
True
)
output
=
result
.
stdout
+
result
.
stderr
expected_rc
=
0
try
:
self
.
assertEqual
(
result
.
exit_status
,
expected_rc
,
"Avocado did not return rc %d:
\n
%s"
%
(
expected_rc
,
result
))
# Check if we are producing valid outputs
json
.
loads
(
output
)
minidom
.
parse
(
tmpfile
)
finally
:
try
:
os
.
remove
(
tmpfile
)
except
OSError
:
pass
def
test_output_compatible_setup_2
(
self
):
tmpfile
=
tempfile
.
mktemp
()
os
.
chdir
(
basedir
)
cmd_line
=
'./scripts/avocado --xunit --json --json-output %s run sleeptest'
%
tmpfile
result
=
process
.
run
(
cmd_line
,
ignore_status
=
True
)
output
=
result
.
stdout
+
result
.
stderr
expected_rc
=
0
try
:
self
.
assertEqual
(
result
.
exit_status
,
expected_rc
,
"Avocado did not return rc %d:
\n
%s"
%
(
expected_rc
,
result
))
# Check if we are producing valid outputs
with
open
(
tmpfile
,
'r'
)
as
fp
:
json_results
=
json
.
load
(
fp
)
debug_log
=
json_results
[
'debuglog'
]
self
.
check_output_files
(
debug_log
)
minidom
.
parseString
(
output
)
finally
:
try
:
os
.
remove
(
tmpfile
)
except
OSError
:
pass
def
test_output_compatible_setup_nooutput
(
self
):
tmpfile
=
tempfile
.
mktemp
()
tmpfile2
=
tempfile
.
mktemp
()
os
.
chdir
(
basedir
)
cmd_line
=
'./scripts/avocado --xunit --xunit-output %s --json --json-output %s run sleeptest'
%
(
tmpfile
,
tmpfile2
)
result
=
process
.
run
(
cmd_line
,
ignore_status
=
True
)
output
=
result
.
stdout
+
result
.
stderr
expected_rc
=
0
try
:
self
.
assertEqual
(
result
.
exit_status
,
expected_rc
,
"Avocado did not return rc %d:
\n
%s"
%
(
expected_rc
,
result
))
self
.
assertEqual
(
output
,
""
,
"Output is not empty as expected:
\n
%s"
%
output
)
# Check if we are producing valid outputs
with
open
(
tmpfile2
,
'r'
)
as
fp
:
json_results
=
json
.
load
(
fp
)
debug_log
=
json_results
[
'debuglog'
]
self
.
check_output_files
(
debug_log
)
minidom
.
parse
(
tmpfile
)
finally
:
try
:
os
.
remove
(
tmpfile
)
os
.
remove
(
tmpfile2
)
except
OSError
:
pass
def
test_default_enabled_plugins
(
self
):
os
.
chdir
(
basedir
)
cmd_line
=
'./scripts/avocado run sleeptest'
result
=
process
.
run
(
cmd_line
,
ignore_status
=
True
)
output
=
result
.
stdout
+
result
.
stderr
expected_rc
=
0
self
.
assertEqual
(
result
.
exit_status
,
expected_rc
,
"Avocado did not return rc %d:
\n
%s"
%
(
expected_rc
,
result
))
output_lines
=
output
.
splitlines
()
first_line
=
output_lines
[
0
]
debug_log
=
first_line
.
split
()[
-
1
]
self
.
check_output_files
(
debug_log
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
selftests/all/unit/avocado/xunit_unittest.py
浏览文件 @
86dcba4f
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
# Copyright: Red Hat Inc. 2014
# Copyright: Red Hat Inc. 2014
# Author: Ruda Moura <rmoura@redhat.com>
# Author: Ruda Moura <rmoura@redhat.com>
import
argparse
import
unittest
import
unittest
import
os
import
os
import
sys
import
sys
...
@@ -30,12 +31,17 @@ from avocado.plugins import xunit
...
@@ -30,12 +31,17 @@ from avocado.plugins import xunit
from
avocado
import
test
from
avocado
import
test
class
ParseXMLError
(
Exception
):
pass
class
xUnitSucceedTest
(
unittest
.
TestCase
):
class
xUnitSucceedTest
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
tmpfile
=
mkstemp
()
self
.
tmpfile
=
mkstemp
()
self
.
test_result
=
xunit
.
xUnitTestResult
()
args
=
argparse
.
Namespace
()
self
.
test_result
.
filename
=
self
.
tmpfile
[
1
]
args
.
xunit_output
=
self
.
tmpfile
[
1
]
self
.
test_result
=
xunit
.
xUnitTestResult
(
args
=
args
)
self
.
test_result
.
start_tests
()
self
.
test_result
.
start_tests
()
self
.
test1
=
test
.
Test
()
self
.
test1
=
test
.
Test
()
self
.
test1
.
status
=
'PASS'
self
.
test1
.
status
=
'PASS'
...
@@ -50,9 +56,12 @@ class xUnitSucceedTest(unittest.TestCase):
...
@@ -50,9 +56,12 @@ class xUnitSucceedTest(unittest.TestCase):
self
.
test_result
.
end_test
(
self
.
test1
)
self
.
test_result
.
end_test
(
self
.
test1
)
self
.
test_result
.
end_tests
()
self
.
test_result
.
end_tests
()
self
.
assertTrue
(
self
.
test_result
.
xml
)
self
.
assertTrue
(
self
.
test_result
.
xml
)
with
open
(
self
.
test_result
.
filename
)
as
fp
:
with
open
(
self
.
test_result
.
output
)
as
fp
:
xml
=
fp
.
read
()
xml
=
fp
.
read
()
dom
=
minidom
.
parseString
(
xml
)
try
:
dom
=
minidom
.
parseString
(
xml
)
except
Exception
,
details
:
raise
ParseXMLError
(
"Error parsing XML: '%s'.
\n
XML Contents:
\n
%s"
%
(
details
,
xml
))
self
.
assertTrue
(
dom
)
self
.
assertTrue
(
dom
)
els
=
dom
.
getElementsByTagName
(
'testcase'
)
els
=
dom
.
getElementsByTagName
(
'testcase'
)
self
.
assertEqual
(
len
(
els
),
1
)
self
.
assertEqual
(
len
(
els
),
1
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录