Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
avocado
提交
1bd41e53
A
avocado
项目概览
openeuler
/
avocado
通知
0
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
A
avocado
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
1bd41e53
编写于
2月 03, 2015
作者:
L
Lukáš Doktor
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #393 from lmr/test_discovery_v13
[V13] Test discovery and load (Phase II)
上级
dc562b80
75dfea65
变更
21
隐藏空白更改
内联
并排
Showing
21 changed file
with
521 addition
and
349 deletion
+521
-349
avocado/core/exceptions.py
avocado/core/exceptions.py
+0
-10
avocado/core/output.py
avocado/core/output.py
+0
-38
avocado/core/status.py
avocado/core/status.py
+0
-2
avocado/job.py
avocado/job.py
+59
-47
avocado/loader.py
avocado/loader.py
+185
-22
avocado/plugins/htmlresult.py
avocado/plugins/htmlresult.py
+0
-3
avocado/plugins/jsonresult.py
avocado/plugins/jsonresult.py
+0
-1
avocado/plugins/remote.py
avocado/plugins/remote.py
+1
-24
avocado/plugins/test_lister.py
avocado/plugins/test_lister.py
+121
-33
avocado/plugins/vm.py
avocado/plugins/vm.py
+1
-24
avocado/plugins/xunit.py
avocado/plugins/xunit.py
+1
-6
avocado/result.py
avocado/result.py
+0
-52
avocado/runner.py
avocado/runner.py
+4
-3
docs/source/GetStartedGuide.rst
docs/source/GetStartedGuide.rst
+69
-14
docs/source/RemoteMachinePlugin.rst
docs/source/RemoteMachinePlugin.rst
+0
-1
docs/source/VirtualMachinePlugin.rst
docs/source/VirtualMachinePlugin.rst
+0
-1
docs/source/WritingTests.rst
docs/source/WritingTests.rst
+0
-3
man/avocado.rst
man/avocado.rst
+59
-17
selftests/all/functional/avocado/basic_tests.py
selftests/all/functional/avocado/basic_tests.py
+8
-26
selftests/all/functional/avocado/loader_tests.py
selftests/all/functional/avocado/loader_tests.py
+4
-4
selftests/all/unit/avocado/loader_unittest.py
selftests/all/unit/avocado/loader_unittest.py
+9
-18
未找到文件。
avocado/core/exceptions.py
浏览文件 @
1bd41e53
...
...
@@ -75,16 +75,6 @@ class TestError(TestBaseException):
status
=
"ERROR"
class
TestNotFoundError
(
TestBaseException
):
"""
Indicates that the test was not found.
Causes: non existing path or could not resolve alias.
"""
status
=
"NOT_FOUND"
class
NotATestError
(
TestBaseException
):
"""
...
...
avocado/core/output.py
浏览文件 @
1bd41e53
...
...
@@ -137,7 +137,6 @@ class TermSupport(object):
self
.
SKIP
=
self
.
COLOR_YELLOW
self
.
FAIL
=
self
.
COLOR_RED
self
.
ERROR
=
self
.
COLOR_RED
self
.
NOT_FOUND
=
self
.
COLOR_YELLOW
self
.
WARN
=
self
.
COLOR_YELLOW
self
.
PARTIAL
=
self
.
COLOR_YELLOW
self
.
ENDC
=
self
.
CONTROL_END
...
...
@@ -157,7 +156,6 @@ class TermSupport(object):
self
.
SKIP
=
''
self
.
FAIL
=
''
self
.
ERROR
=
''
self
.
NOT_FOUND
=
''
self
.
WARN
=
''
self
.
PARTIAL
=
''
self
.
ENDC
=
''
...
...
@@ -237,22 +235,6 @@ class TermSupport(object):
"""
return
self
.
MOVE_BACK
+
self
.
ERROR
+
'ERROR'
+
self
.
ENDC
def
not_found_str
(
self
):
"""
Print a warning NOT_FOUND string (yellow colored).
If the output does not support colors, just return the original string.
"""
return
self
.
MOVE_BACK
+
self
.
NOT_FOUND
+
'NOT_FOUND'
+
self
.
ENDC
def
not_a_test_str
(
self
):
"""
Print a warning NOT_A_TEST string (yellow colored).
If the output does not support colors, just return the original string.
"""
return
self
.
MOVE_BACK
+
self
.
NOT_FOUND
+
'NOT_A_TEST'
+
self
.
ENDC
def
warn_str
(
self
):
"""
Print an warning string (yellow colored).
...
...
@@ -403,8 +385,6 @@ class View(object):
def
set_test_status
(
self
,
status
,
state
):
mapping
=
{
'PASS'
:
self
.
_log_ui_status_pass
,
'ERROR'
:
self
.
_log_ui_status_error
,
'NOT_FOUND'
:
self
.
_log_ui_status_not_found
,
'NOT_A_TEST'
:
self
.
_log_ui_status_not_a_test
,
'FAIL'
:
self
.
_log_ui_status_fail
,
'SKIP'
:
self
.
_log_ui_status_skip
,
'WARN'
:
self
.
_log_ui_status_warn
}
...
...
@@ -523,24 +503,6 @@ class View(object):
normal_error_msg
=
term_support
.
error_str
()
+
" (%.2f s)"
%
t_elapsed
self
.
_log_ui_error_base
(
normal_error_msg
)
def
_log_ui_status_not_found
(
self
,
t_elapsed
):
"""
Log a NOT_FOUND status message for a given operation.
:param t_elapsed: Time it took for the operation to complete.
"""
normal_error_msg
=
term_support
.
not_found_str
()
+
" (%.2f s)"
%
t_elapsed
self
.
_log_ui_error_base
(
normal_error_msg
)
def
_log_ui_status_not_a_test
(
self
,
t_elapsed
):
"""
Log a NOT_A_TEST status message for a given operation.
:param t_elapsed: Time it took for the operation to complete.
"""
normal_error_msg
=
term_support
.
not_a_test_str
()
+
" (%.2f s)"
%
t_elapsed
self
.
_log_ui_error_base
(
normal_error_msg
)
def
_log_ui_status_fail
(
self
,
t_elapsed
):
"""
Log a FAIL status message for a given operation.
...
...
avocado/core/status.py
浏览文件 @
1bd41e53
...
...
@@ -19,8 +19,6 @@ a test or a job in avocado PASSed or FAILed.
mapping
=
{
"TEST_NA"
:
True
,
"ABORT"
:
False
,
"ERROR"
:
False
,
"NOT_FOUND"
:
False
,
"NOT_A_TEST"
:
False
,
"FAIL"
:
False
,
"WARN"
:
False
,
"PASS"
:
True
,
...
...
avocado/job.py
浏览文件 @
1bd41e53
...
...
@@ -195,11 +195,39 @@ class Job(object):
human_plugin
=
result
.
HumanTestResult
(
self
.
view
,
self
.
args
)
self
.
result_proxy
.
add_output_plugin
(
human_plugin
)
def
_multiplex_params_list
(
self
,
params_list
,
multiplex_files
):
for
mux_file
in
multiplex_files
:
if
not
os
.
path
.
exists
(
mux_file
):
e_msg
=
"Multiplex file %s doesn't exist."
%
mux_file
raise
exceptions
.
OptionValidationError
(
e_msg
)
result
=
[]
for
params
in
params_list
:
try
:
variants
=
multiplexer
.
multiplex_yamls
(
multiplex_files
,
self
.
args
.
filter_only
,
self
.
args
.
filter_out
)
except
SyntaxError
:
variants
=
None
if
variants
:
tag
=
1
for
variant
in
variants
:
env
=
{}
for
t
in
variant
:
env
.
update
(
dict
(
t
.
environment
))
env
.
update
({
'tag'
:
tag
})
env
.
update
({
'id'
:
params
[
'id'
]})
result
.
append
(
env
)
tag
+=
1
else
:
result
.
append
(
params
)
return
result
def
_run
(
self
,
urls
=
None
,
multiplex_files
=
None
):
"""
Unhandled job method. Runs a list of test URLs to its completion.
:param urls: String with tests to run.
:param urls: String with tests to run, separated by whitespace.
Optionally, a list of tests (each test a string).
:param multiplex_files: File that multiplexes a given test url.
:return: Integer with overall job status. See
...
...
@@ -208,72 +236,55 @@ class Job(object):
:class:`avocado.core.exceptions.JobBaseException` errors,
that configure a job failure.
"""
params_list
=
[]
if
urls
is
None
:
if
self
.
args
and
self
.
args
.
url
:
if
self
.
args
and
self
.
args
.
url
is
not
None
:
urls
=
self
.
args
.
url
else
:
if
isinstance
(
urls
,
str
):
urls
=
urls
.
split
()
if
urls
is
not
None
:
for
url
in
urls
:
if
url
.
startswith
(
os
.
path
.
pardir
):
url
=
os
.
path
.
abspath
(
url
)
params_list
.
append
({
'id'
:
url
})
else
:
if
isinstance
(
urls
,
str
):
urls
=
urls
.
split
()
if
not
urls
:
e_msg
=
"Empty test ID. A test path or alias must be provided"
raise
exceptions
.
OptionValidationError
(
e_msg
)
self
.
_make_test_loader
()
params_list
=
self
.
test_loader
.
discover_urls
(
urls
)
if
multiplex_files
is
None
:
if
self
.
args
and
self
.
args
.
multiplex_files
is
not
None
:
multiplex_files
=
self
.
args
.
multiplex_files
else
:
multiplex_files
=
multiplex_files
if
multiplex_files
is
not
None
:
for
mux_file
in
multiplex_files
:
if
not
os
.
path
.
exists
(
mux_file
):
e_msg
=
"Multiplex file %s doesn't exist."
%
(
mux_file
)
raise
exceptions
.
OptionValidationError
(
e_msg
)
params_list
=
[]
if
urls
is
not
None
:
for
url
in
urls
:
try
:
variants
=
multiplexer
.
multiplex_yamls
(
multiplex_files
,
self
.
args
.
filter_only
,
self
.
args
.
filter_out
)
except
SyntaxError
:
variants
=
None
if
variants
:
tag
=
1
for
variant
in
variants
:
env
=
{}
for
t
in
variant
:
env
.
update
(
dict
(
t
.
environment
))
env
.
update
({
'tag'
:
tag
})
env
.
update
({
'id'
:
url
})
params_list
.
append
(
env
)
tag
+=
1
else
:
params_list
.
append
({
'id'
:
url
})
if
not
params_list
:
e_msg
=
"Test(s) with empty parameter list or the number of variants is zero"
params_list
=
self
.
_multiplex_params_list
(
params_list
,
multiplex_files
)
try
:
test_suite
=
self
.
test_loader
.
discover
(
params_list
)
error_msg_parts
=
self
.
test_loader
.
validate_ui
(
test_suite
)
except
KeyboardInterrupt
:
raise
exceptions
.
JobError
(
'Command interrupted by user...'
)
if
error_msg_parts
:
e_msg
=
'
\n
'
.
join
(
error_msg_parts
)
raise
exceptions
.
OptionValidationError
(
e_msg
)
if
not
test_suite
:
e_msg
=
(
"No tests found within the specified path(s) "
"(Possible reasons: File ownership, permissions, typos)"
)
raise
exceptions
.
OptionValidationError
(
e_msg
)
if
self
.
args
is
not
None
:
self
.
args
.
test_result_total
=
len
(
params_list
)
self
.
args
.
test_result_total
=
len
(
test_suite
)
self
.
_make_test_result
()
self
.
_make_test_runner
()
self
.
_make_test_loader
()
self
.
view
.
start_file_logging
(
self
.
logfile
,
self
.
loglevel
,
self
.
unique_id
)
self
.
view
.
logfile
=
self
.
logfile
failures
=
self
.
test_runner
.
run_suite
(
params_list
)
failures
=
self
.
test_runner
.
run_suite
(
test_suite
)
self
.
view
.
stop_file_logging
()
# If it's all good so far, set job status to 'PASS'
if
self
.
status
==
'RUNNING'
:
...
...
@@ -308,7 +319,8 @@ class Job(object):
The test runner figures out which tests need to be run on an empty urls
list by assuming the first component of the shortname is the test url.
:param urls: String with tests to run.
:param urls: String with tests to run, separated by whitespace.
Optionally, a list of tests (each test a string).
:param multiplex_files: File that multiplexes a given test url.
:return: Integer with overall job status. See
...
...
avocado/loader.py
浏览文件 @
1bd41e53
...
...
@@ -28,13 +28,29 @@ from avocado.core import data_dir
from
avocado.utils
import
path
class
_DebugJob
(
object
):
def
__init__
(
self
):
self
.
logdir
=
'.'
class
BrokenSymlink
(
object
):
pass
class
AccessDeniedPath
(
object
):
pass
class
TestLoader
(
object
):
"""
Test loader class.
"""
def
__init__
(
self
,
job
):
def
__init__
(
self
,
job
=
None
):
if
job
is
None
:
job
=
_DebugJob
()
self
.
job
=
job
def
_make_missing_test
(
self
,
test_name
,
params
):
...
...
@@ -61,7 +77,7 @@ class TestLoader(object):
'job'
:
self
.
job
}
return
test_class
,
test_parameters
def
_make_test
(
self
,
test_name
,
test_path
,
params
,
queue
):
def
_make_test
(
self
,
test_name
,
test_path
,
params
):
module_name
=
os
.
path
.
basename
(
test_path
).
split
(
'.'
)[
0
]
test_module_dir
=
os
.
path
.
dirname
(
test_path
)
sys
.
path
.
append
(
test_module_dir
)
...
...
@@ -71,11 +87,10 @@ class TestLoader(object):
'params'
:
params
,
'job'
:
self
.
job
}
test_parameters_queue
=
{
'name'
:
test_name
,
'base_logdir'
:
self
.
job
.
logdir
,
'params'
:
params
,
'job'
:
self
.
job
,
'runner_queue'
:
queue
}
test_parameters_name
=
{
'name'
:
test_name
,
'base_logdir'
:
self
.
job
.
logdir
,
'params'
:
params
,
'job'
:
self
.
job
}
try
:
f
,
p
,
d
=
imp
.
find_module
(
module_name
,
[
test_module_dir
])
test_module
=
imp
.
load_module
(
module_name
,
f
,
p
,
d
)
...
...
@@ -84,10 +99,11 @@ class TestLoader(object):
if
inspect
.
isclass
(
obj
):
if
issubclass
(
obj
,
test
.
Test
):
test_class
=
obj
break
if
test_class
is
not
None
:
# Module is importable and does have an avocado test class
# inside, let's proceed.
test_parameters
=
test_parameters_
queu
e
test_parameters
=
test_parameters_
nam
e
else
:
if
os
.
access
(
test_path
,
os
.
X_OK
):
# Module does not have an avocado test class inside but
...
...
@@ -98,7 +114,7 @@ class TestLoader(object):
# Module does not have an avocado test class inside, and
# it's not executable. Not a Test.
test_class
=
test
.
NotATest
test_parameters
=
test_parameters_
queu
e
test_parameters
=
test_parameters_
nam
e
# Since a lot of things can happen here, the broad exception is
# justified. The user will get it unadulterated anyway, and avocado
...
...
@@ -127,30 +143,31 @@ class TestLoader(object):
params
[
'exception'
]
=
details
else
:
test_class
=
test
.
NotATest
test_parameters
=
test_parameters_
queu
e
test_parameters
=
test_parameters_
nam
e
sys
.
path
.
pop
(
sys
.
path
.
index
(
test_module_dir
))
return
test_class
,
test_parameters
def
discover_test
(
self
,
params
,
queue
):
def
discover_test
(
self
,
params
):
"""
Try to discover and resolve a test.
:param params: dictionary with test parameters.
:type params: dict
:param queue: a queue for communicating with the test runner.
:type queue: an instance of :class:`multiprocessing.Queue`
:return: a test factory (a pair of test class and test parameters)
or `None`.
"""
test_name
=
params
.
get
(
'id'
)
test_path
=
os
.
path
.
abspath
(
test_name
)
test_name
=
test_path
=
params
.
get
(
'id'
)
if
os
.
path
.
exists
(
test_path
):
if
os
.
access
(
test_path
,
os
.
R_OK
)
is
False
:
return
(
AccessDeniedPath
,
{
'params'
:
{
'id'
:
test_path
}})
path_analyzer
=
path
.
PathInspector
(
test_path
)
if
path_analyzer
.
is_python
():
test_class
,
test_parameters
=
self
.
_make_test
(
test_name
,
test_path
,
params
,
queue
)
params
)
else
:
if
os
.
access
(
test_path
,
os
.
X_OK
):
test_class
,
test_parameters
=
self
.
_make_simple_test
(
test_path
,
...
...
@@ -159,34 +176,180 @@ class TestLoader(object):
test_class
,
test_parameters
=
self
.
_make_not_a_test
(
test_path
,
params
)
else
:
if
os
.
path
.
islink
(
test_path
):
try
:
if
not
os
.
path
.
isfile
(
os
.
readlink
(
test_path
)):
return
BrokenSymlink
,
{
'params'
:
{
'id'
:
test_path
}}
except
OSError
:
return
AccessDeniedPath
,
{
'params'
:
{
'id'
:
test_path
}}
# Try to resolve test ID (keep compatibility)
rel_path
=
'%s.py'
%
test_name
test_path
=
os
.
path
.
join
(
data_dir
.
get_test_dir
(),
rel_path
)
if
os
.
path
.
exists
(
test_path
):
test_class
,
test_parameters
=
self
.
_make_test
(
rel_path
,
test_path
,
params
,
queue
)
params
)
else
:
test_class
,
test_parameters
=
self
.
_make_missing_test
(
test_name
,
params
)
return
test_class
,
test_parameters
def
discover
(
self
,
params_list
,
queue
):
def
discover_url
(
self
,
url
):
"""
Discover (possible) tests from a directory.
Recursively walk in a directory and find tests params.
The tests are returned in alphabetic order.
:param dir_path: the directory path to inspect.
:type dir_path: str
:param ignore_suffix: list of suffix to ignore in paths.
:type ignore_suffix: list
:return: a list of test params (each one a dictionary).
"""
ignore_suffix
=
(
'.data'
,
'.pyc'
,
'.pyo'
,
'__init__.py'
,
'__main__.py'
)
params_list
=
[]
def
onerror
(
exception
):
norm_url
=
os
.
path
.
abspath
(
url
)
norm_error_filename
=
os
.
path
.
abspath
(
exception
.
filename
)
if
os
.
path
.
isdir
(
norm_url
)
and
norm_url
!=
norm_error_filename
:
omit_non_tests
=
True
else
:
omit_non_tests
=
False
params_list
.
append
({
'id'
:
exception
.
filename
,
'omit_non_tests'
:
omit_non_tests
})
for
dirpath
,
dirnames
,
filenames
in
os
.
walk
(
url
,
onerror
=
onerror
):
for
dir_name
in
dirnames
:
if
dir_name
.
startswith
(
'.'
):
dirnames
.
pop
(
dirnames
.
index
(
dir_name
))
for
file_name
in
filenames
:
if
not
file_name
.
startswith
(
'.'
):
ignore
=
False
for
suffix
in
ignore_suffix
:
if
file_name
.
endswith
(
suffix
):
ignore
=
True
if
not
ignore
:
pth
=
os
.
path
.
join
(
dirpath
,
file_name
)
params_list
.
append
({
'id'
:
pth
,
'omit_non_tests'
:
True
})
return
params_list
def
discover_urls
(
self
,
urls
):
"""
Discover (possible) tests from test urls.
:param urls: a list of tests urls.
:type urls: list
:return: a list of test params (each one a dictionary).
"""
params_list
=
[]
for
url
in
urls
:
if
url
==
''
:
continue
params_list
.
extend
(
self
.
discover_url
(
url
))
return
params_list
def
discover
(
self
,
params_list
):
"""
Discover tests for test suite.
:param params_list: a list of test parameters.
:type params_list: list
:param queue: a queue for communicating with the test runner.
:type queue: an instance of :class:`multiprocessing.Queue`
:return: a test suite (a list of test factories).
"""
test_suite
=
[]
for
params
in
params_list
:
test_class
,
test_parameters
=
self
.
discover_test
(
params
,
queue
)
test_suite
.
append
((
test_class
,
test_parameters
))
test_factory
=
self
.
discover_test
(
params
)
if
test_factory
is
None
:
continue
test_class
,
test_parameters
=
test_factory
if
test_class
in
[
test
.
NotATest
,
BrokenSymlink
,
AccessDeniedPath
]:
if
not
params
.
get
(
'omit_non_tests'
):
test_suite
.
append
((
test_class
,
test_parameters
))
else
:
test_suite
.
append
((
test_class
,
test_parameters
))
return
test_suite
@
staticmethod
def
validate
(
test_suite
):
"""
Find missing files/non-tests provided by the user in the input.
Used mostly for user input validation.
:param test_suite: List with tuples (test_class, test_params)
:return: list of missing files.
"""
missing
=
[]
not_test
=
[]
broken_symlink
=
[]
access_denied
=
[]
for
suite
in
test_suite
:
if
suite
[
0
]
==
test
.
MissingTest
:
missing
.
append
(
suite
[
1
][
'params'
][
'id'
])
elif
suite
[
0
]
==
test
.
NotATest
:
not_test
.
append
(
suite
[
1
][
'params'
][
'id'
])
elif
suite
[
0
]
==
BrokenSymlink
:
broken_symlink
.
append
(
suite
[
1
][
'params'
][
'id'
])
elif
suite
[
0
]
==
AccessDeniedPath
:
access_denied
.
append
(
suite
[
1
][
'params'
][
'id'
])
return
missing
,
not_test
,
broken_symlink
,
access_denied
def
validate_ui
(
self
,
test_suite
,
ignore_missing
=
False
,
ignore_not_test
=
False
,
ignore_broken_symlinks
=
False
,
ignore_access_denied
=
False
):
"""
Validate test suite and deliver error messages to the UI
:param test_suite: List of tuples (test_class, test_params)
:type test_suite: list
:return: List with error messages
:rtype: list
"""
(
missing
,
not_test
,
broken_symlink
,
access_denied
)
=
self
.
validate
(
test_suite
)
broken_symlink_msg
=
''
if
(
not
ignore_broken_symlinks
)
and
broken_symlink
:
if
len
(
broken_symlink
)
==
1
:
broken_symlink_msg
=
(
"Cannot access '%s': Broken symlink"
%
", "
.
join
(
broken_symlink
))
elif
len
(
broken_symlink
)
>
1
:
broken_symlink_msg
=
(
"Cannot access '%s': Broken symlinks"
%
", "
.
join
(
broken_symlink
))
access_denied_msg
=
''
if
(
not
ignore_access_denied
)
and
access_denied
:
if
len
(
access_denied
)
==
1
:
access_denied_msg
=
(
"Cannot access '%s': Access denied"
%
", "
.
join
(
access_denied
))
elif
len
(
access_denied
)
>
1
:
access_denied_msg
=
(
"Cannot access '%s': Access denied"
%
", "
.
join
(
access_denied
))
missing_msg
=
''
if
(
not
ignore_missing
)
and
missing
:
if
len
(
missing
)
==
1
:
missing_msg
=
(
"Cannot access '%s': File not found"
%
", "
.
join
(
missing
))
elif
len
(
missing
)
>
1
:
missing_msg
=
(
"Cannot access '%s': Files not found"
%
", "
.
join
(
missing
))
not_test_msg
=
''
if
(
not
ignore_not_test
)
and
not_test
:
if
len
(
not_test
)
==
1
:
not_test_msg
=
(
"File '%s' is not an avocado test"
%
", "
.
join
(
not_test
))
elif
len
(
not_test
)
>
1
:
not_test_msg
=
(
"Files '%s' are not avocado tests"
%
", "
.
join
(
not_test
))
return
[
msg
for
msg
in
[
access_denied_msg
,
broken_symlink_msg
,
missing_msg
,
not_test_msg
]
if
msg
]
def
load_test
(
self
,
test_factory
):
"""
Load test from the test factory.
...
...
avocado/plugins/htmlresult.py
浏览文件 @
1bd41e53
...
...
@@ -98,8 +98,6 @@ class ReportModel(object):
mapping
=
{
"TEST_NA"
:
"warning"
,
"ABORT"
:
"danger"
,
"ERROR"
:
"danger"
,
"NOT_FOUND"
:
"warning"
,
"NOT_A_TEST"
:
"warning"
,
"FAIL"
:
"danger"
,
"WARN"
:
"warning"
,
"PASS"
:
"success"
,
...
...
@@ -204,7 +202,6 @@ class HTMLTestResult(TestResult):
'total'
:
len
(
self
.
json
[
'tests'
]),
'pass'
:
len
(
self
.
passed
),
'errors'
:
len
(
self
.
errors
),
'not_found'
:
len
(
self
.
not_found
),
'failures'
:
len
(
self
.
failed
),
'skip'
:
len
(
self
.
skipped
),
'time'
:
self
.
total_time
...
...
avocado/plugins/jsonresult.py
浏览文件 @
1bd41e53
...
...
@@ -77,7 +77,6 @@ class JSONTestResult(TestResult):
'total'
:
self
.
tests_total
,
'pass'
:
len
(
self
.
passed
),
'errors'
:
len
(
self
.
errors
),
'not_found'
:
len
(
self
.
not_found
),
'failures'
:
len
(
self
.
failed
),
'skip'
:
len
(
self
.
skipped
),
'time'
:
self
.
total_time
...
...
avocado/plugins/remote.py
浏览文件 @
1bd41e53
...
...
@@ -39,7 +39,6 @@ class RemoteTestRunner(TestRunner):
:param urls: a string with test URLs.
:return: a dictionary with test results.
"""
urls
=
urls
.
split
()
avocado_cmd
=
(
'cd %s; avocado run --force-job-id %s --json - --archive %s'
%
(
self
.
remote_test_dir
,
self
.
result
.
stream
.
job_unique_id
,
" "
.
join
(
urls
)))
result
=
self
.
result
.
remote
.
run
(
avocado_cmd
,
ignore_status
=
True
)
...
...
@@ -62,10 +61,8 @@ class RemoteTestRunner(TestRunner):
:return: a list of test failures.
"""
failures
=
[]
urls
=
[
x
[
'id'
]
for
x
in
params_list
]
self
.
result
.
urls
=
urls
self
.
result
.
setup
()
results
=
self
.
run_test
(
' '
.
join
(
urls
)
)
results
=
self
.
run_test
(
self
.
result
.
urls
)
remote_log_dir
=
os
.
path
.
dirname
(
results
[
'debuglog'
])
self
.
result
.
start_tests
()
for
tst
in
results
[
'tests'
]:
...
...
@@ -160,8 +157,6 @@ class RemoteTestResult(TestResult):
"""
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"PASS : %d"
%
len
(
self
.
passed
))
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"ERROR : %d"
%
len
(
self
.
errors
))
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"NOT FOUND : %d"
%
len
(
self
.
not_found
))
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"NOT A TEST : %d"
%
len
(
self
.
not_a_test
))
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"FAIL : %d"
%
len
(
self
.
failed
))
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"SKIP : %d"
%
len
(
self
.
skipped
))
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"WARN : %d"
%
len
(
self
.
warned
))
...
...
@@ -201,24 +196,6 @@ class RemoteTestResult(TestResult):
TestResult
.
add_error
(
self
,
test
)
self
.
stream
.
set_test_status
(
status
=
'ERROR'
,
state
=
test
)
def
add_not_found
(
self
,
test
):
"""
Called when a test path was not found.
:param test: :class:`avocado.test.Test` instance.
"""
TestResult
.
add_not_found
(
self
,
test
)
self
.
stream
.
set_test_status
(
status
=
'NOT_FOUND'
,
state
=
test
)
def
add_not_a_test
(
self
,
test
):
"""
Called when a file is not an avocado test.
:param test: :class:`avocado.test.Test` instance.
"""
TestResult
.
add_not_a_test
(
self
,
test
)
self
.
stream
.
set_test_status
(
status
=
'NOT_A_TEST'
,
state
=
test
)
def
add_fail
(
self
,
test
):
"""
Called when a test fails.
...
...
avocado/plugins/test_lister.py
浏览文件 @
1bd41e53
...
...
@@ -12,12 +12,14 @@
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import
o
s
import
sy
s
from
avocado
import
loader
from
avocado
import
test
from
avocado.core
import
data_dir
from
avocado.core
import
output
from
avocado.
settings
import
setting
s
from
avocado.utils
import
path
from
avocado.
core
import
exit_code
s
from
avocado.utils
import
astring
from
avocado.plugins
import
plugin
...
...
@@ -29,6 +31,9 @@ class TestLister(plugin.Plugin):
name
=
'test_lister'
enabled
=
True
view
=
None
test_loader
=
loader
.
TestLoader
()
term_support
=
output
.
TermSupport
()
def
configure
(
self
,
parser
):
"""
...
...
@@ -39,40 +44,123 @@ class TestLister(plugin.Plugin):
self
.
parser
=
parser
.
subcommands
.
add_parser
(
'list'
,
help
=
'List available test modules'
)
self
.
parser
.
add_argument
(
'paths'
,
type
=
str
,
default
=
[],
nargs
=
'*'
,
help
=
"List of paths. If no paths provided, "
"avocado will list tests on the "
"configured test directory, "
"see 'avocado config --datadir'"
)
self
.
parser
.
add_argument
(
'-V'
,
'--verbose'
,
action
=
'store_true'
,
default
=
False
,
help
=
'Whether to show extra information '
'(headers and summary). Current: %('
'default)s'
)
super
(
TestLister
,
self
).
configure
(
self
.
parser
)
def
run
(
self
,
args
):
def
_
run
(
self
,
args
):
"""
List available test modules.
:param args: Command line args received from the list subparser.
"""
view
=
output
.
View
(
app_args
=
args
,
use_paginator
=
True
)
base_test_dir
=
data_dir
.
get_test_dir
()
test_files
=
os
.
listdir
(
base_test_dir
)
test_dirs
=
[]
blength
=
0
for
t
in
test_files
:
inspector
=
path
.
PathInspector
(
path
=
t
)
if
inspector
.
is_python
():
clength
=
len
((
t
.
split
(
'.'
)[
0
]))
if
clength
>
blength
:
blength
=
clength
test_dirs
.
append
((
t
.
split
(
'.'
)[
0
],
os
.
path
.
join
(
base_test_dir
,
t
)))
format_string
=
" %-"
+
str
(
blength
)
+
"s %s"
view
.
notify
(
event
=
"message"
,
msg
=
'Config files read (in order):'
)
for
cfg_path
in
settings
.
config_paths
:
view
.
notify
(
event
=
"message"
,
msg
=
' %s'
%
cfg_path
)
if
settings
.
config_paths_failed
:
view
.
notify
(
event
=
"minor"
,
msg
=
''
)
view
.
notify
(
event
=
"error"
,
msg
=
'Config files that failed to read:'
)
for
cfg_path
in
settings
.
config_paths_failed
:
view
.
notify
(
event
=
"error"
,
msg
=
' %s'
%
cfg_path
)
view
.
notify
(
event
=
"minor"
,
msg
=
''
)
view
.
notify
(
event
=
"message"
,
msg
=
'Tests dir: %s'
%
base_test_dir
)
if
len
(
test_dirs
)
>
0
:
view
.
notify
(
event
=
"minor"
,
msg
=
format_string
%
(
'Alias'
,
'Path'
))
for
test_dir
in
test_dirs
:
view
.
notify
(
event
=
"minor"
,
msg
=
format_string
%
test_dir
)
else
:
view
.
notify
(
event
=
"error"
,
msg
=
'No tests were found on current tests dir'
)
self
.
view
=
output
.
View
(
app_args
=
args
)
paths
=
[
data_dir
.
get_test_dir
()]
if
args
.
paths
:
paths
=
args
.
paths
params_list
=
self
.
test_loader
.
discover_urls
(
paths
)
for
params
in
params_list
:
params
[
'omit_non_tests'
]
=
False
test_suite
=
self
.
test_loader
.
discover
(
params_list
)
error_msg_parts
=
self
.
test_loader
.
validate_ui
(
test_suite
,
ignore_not_test
=
True
,
ignore_access_denied
=
True
,
ignore_broken_symlinks
=
True
)
if
error_msg_parts
:
for
error_msg
in
error_msg_parts
:
self
.
view
.
notify
(
event
=
'error'
,
msg
=
error_msg
)
sys
.
exit
(
exit_codes
.
AVOCADO_FAIL
)
test_matrix
=
[]
stats
=
{
'simple'
:
0
,
'instrumented'
:
0
,
'buggy'
:
0
,
'missing'
:
0
,
'not_a_test'
:
0
,
'broken_symlink'
:
0
,
'access_denied'
:
0
}
for
cls
,
params
in
test_suite
:
id_label
=
''
type_label
=
cls
.
__name__
if
'params'
in
params
:
id_label
=
params
[
'params'
][
'id'
]
else
:
if
'name'
in
params
:
id_label
=
params
[
'name'
]
elif
'path'
in
params
:
id_label
=
params
[
'path'
]
if
cls
==
test
.
SimpleTest
:
stats
[
'simple'
]
+=
1
type_label
=
self
.
term_support
.
healthy_str
(
'SIMPLE'
)
elif
cls
==
test
.
BuggyTest
:
stats
[
'buggy'
]
+=
1
type_label
=
self
.
term_support
.
fail_header_str
(
'BUGGY'
)
elif
cls
==
test
.
NotATest
:
if
not
args
.
verbose
:
continue
stats
[
'not_a_test'
]
+=
1
type_label
=
self
.
term_support
.
warn_header_str
(
'NOT_A_TEST'
)
elif
cls
==
test
.
MissingTest
:
stats
[
'missing'
]
+=
1
type_label
=
self
.
term_support
.
fail_header_str
(
'MISSING'
)
elif
cls
==
loader
.
BrokenSymlink
:
stats
[
'broken_symlink'
]
+=
1
type_label
=
self
.
term_support
.
fail_header_str
(
'BROKEN_SYMLINK'
)
elif
cls
==
loader
.
AccessDeniedPath
:
stats
[
'access_denied'
]
+=
1
type_label
=
self
.
term_support
.
fail_header_str
(
'ACCESS_DENIED'
)
else
:
if
issubclass
(
cls
,
test
.
Test
):
stats
[
'instrumented'
]
+=
1
type_label
=
self
.
term_support
.
healthy_str
(
'INSTRUMENTED'
)
test_matrix
.
append
((
type_label
,
id_label
))
header
=
None
if
args
.
verbose
:
header
=
(
self
.
term_support
.
header_str
(
'Type'
),
self
.
term_support
.
header_str
(
'file'
))
for
line
in
astring
.
tabular_output
(
test_matrix
,
header
=
header
).
splitlines
():
self
.
view
.
notify
(
event
=
'minor'
,
msg
=
"%s"
%
line
)
if
args
.
verbose
:
self
.
view
.
notify
(
event
=
'minor'
,
msg
=
''
)
self
.
view
.
notify
(
event
=
'message'
,
msg
=
(
"SIMPLE: %s"
%
stats
[
'simple'
]))
self
.
view
.
notify
(
event
=
'message'
,
msg
=
(
"INSTRUMENTED: %s"
%
stats
[
'instrumented'
]))
self
.
view
.
notify
(
event
=
'message'
,
msg
=
(
"BUGGY: %s"
%
stats
[
'buggy'
]))
self
.
view
.
notify
(
event
=
'message'
,
msg
=
(
"MISSING: %s"
%
stats
[
'missing'
]))
self
.
view
.
notify
(
event
=
'message'
,
msg
=
(
"NOT_A_TEST: %s"
%
stats
[
'not_a_test'
]))
self
.
view
.
notify
(
event
=
'message'
,
msg
=
(
"ACCESS_DENIED: %s"
%
stats
[
'access_denied'
]))
self
.
view
.
notify
(
event
=
'message'
,
msg
=
(
"BROKEN_SYMLINK: %s"
%
stats
[
'broken_symlink'
]))
def
run
(
self
,
args
):
try
:
self
.
_run
(
args
)
except
KeyboardInterrupt
:
msg
=
(
'Command interrupted by '
'user...'
)
if
self
.
view
is
not
None
:
self
.
view
.
notify
(
event
=
'error'
,
msg
=
msg
)
else
:
sys
.
stderr
.
write
(
msg
)
sys
.
exit
(
exit_codes
.
AVOCADO_FAIL
)
avocado/plugins/vm.py
浏览文件 @
1bd41e53
...
...
@@ -39,7 +39,6 @@ class VMTestRunner(TestRunner):
:param urls: a string with test URLs.
:return: a dictionary with test results.
"""
urls
=
urls
.
split
()
avocado_cmd
=
(
'cd %s; avocado run --force-job-id %s --json - --archive %s'
%
(
self
.
remote_test_dir
,
self
.
result
.
stream
.
job_unique_id
,
" "
.
join
(
urls
)))
result
=
self
.
result
.
vm
.
remote
.
run
(
avocado_cmd
,
ignore_status
=
True
)
...
...
@@ -61,10 +60,8 @@ class VMTestRunner(TestRunner):
:return: a list of test failures.
"""
failures
=
[]
urls
=
[
x
[
'id'
]
for
x
in
params_list
]
self
.
result
.
urls
=
urls
self
.
result
.
setup
()
results
=
self
.
run_test
(
' '
.
join
(
urls
)
)
results
=
self
.
run_test
(
self
.
result
.
urls
)
remote_log_dir
=
os
.
path
.
dirname
(
results
[
'debuglog'
])
self
.
result
.
start_tests
()
for
tst
in
results
[
'tests'
]:
...
...
@@ -187,8 +184,6 @@ class VMTestResult(TestResult):
"""
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"PASS : %d"
%
len
(
self
.
passed
))
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"ERROR : %d"
%
len
(
self
.
errors
))
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"NOT FOUND : %d"
%
len
(
self
.
not_found
))
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"NOT A TEST : %d"
%
len
(
self
.
not_a_test
))
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"FAIL : %d"
%
len
(
self
.
failed
))
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"SKIP : %d"
%
len
(
self
.
skipped
))
self
.
stream
.
notify
(
event
=
'message'
,
msg
=
"WARN : %d"
%
len
(
self
.
warned
))
...
...
@@ -228,24 +223,6 @@ class VMTestResult(TestResult):
TestResult
.
add_error
(
self
,
test
)
self
.
stream
.
set_test_status
(
status
=
'ERROR'
,
state
=
test
)
def
add_not_found
(
self
,
test
):
"""
Called when a test path was not found.
:param test: :class:`avocado.test.Test` instance.
"""
TestResult
.
add_not_found
(
self
,
test
)
self
.
stream
.
set_test_status
(
status
=
'NOT_FOUND'
,
state
=
test
)
def
add_not_a_test
(
self
,
test
):
"""
Called when a file is not an avocado test.
:param test: :class:`avocado.test.Test` instance.
"""
TestResult
.
add_not_a_test
(
self
,
test
)
self
.
stream
.
set_test_status
(
status
=
'NOT_A_TEST'
,
state
=
test
)
def
add_fail
(
self
,
test
):
"""
Called when a test fails.
...
...
avocado/plugins/xunit.py
浏览文件 @
1bd41e53
...
...
@@ -52,7 +52,7 @@ class XmlResult(object):
self
.
testsuite
=
'<testsuite name="avocado" tests="{tests}" errors="{errors}" failures="{failures}" skip="{skip}" time="{total_time}" timestamp="%s">'
%
timestamp
self
.
testcases
=
[]
def
end_testsuite
(
self
,
tests
,
errors
,
not_found
,
failures
,
skip
,
total_time
):
def
end_testsuite
(
self
,
tests
,
errors
,
failures
,
skip
,
total_time
):
"""
End of testsuite node.
...
...
@@ -62,10 +62,8 @@ class XmlResult(object):
:param skip: Number of test skipped.
:param total_time: The total time of test execution.
"""
errors
+=
not_found
# In XML count "not found tests" as error
values
=
{
'tests'
:
tests
,
'errors'
:
errors
,
'not_found'
:
not_found
,
'failures'
:
failures
,
'skip'
:
skip
,
'total_time'
:
total_time
}
...
...
@@ -190,8 +188,6 @@ class xUnitTestResult(TestResult):
self
.
xml
.
add_skip
(
state
)
elif
state
[
'status'
]
==
'FAIL'
:
self
.
xml
.
add_failure
(
state
)
elif
state
[
'status'
]
==
'NOT_FOUND'
:
self
.
xml
.
add_error
(
state
)
elif
state
[
'status'
]
==
'ERROR'
:
self
.
xml
.
add_error
(
state
)
...
...
@@ -204,7 +200,6 @@ class xUnitTestResult(TestResult):
'errors'
:
len
(
self
.
errors
),
'failures'
:
len
(
self
.
failed
),
'skip'
:
len
(
self
.
skipped
),
'not_found'
:
len
(
self
.
not_found
),
'total_time'
:
self
.
total_time
}
self
.
xml
.
end_testsuite
(
**
values
)
contents
=
self
.
xml
.
get_contents
()
...
...
avocado/result.py
浏览文件 @
1bd41e53
...
...
@@ -81,10 +81,6 @@ class TestResultProxy(object):
for
output_plugin
in
self
.
output_plugins
:
output_plugin
.
add_error
(
state
)
def
add_not_found
(
self
,
state
):
for
output_plugin
in
self
.
output_plugins
:
output_plugin
.
add_not_found
(
state
)
def
add_fail
(
self
,
state
):
for
output_plugin
in
self
.
output_plugins
:
output_plugin
.
add_fail
(
state
)
...
...
@@ -127,8 +123,6 @@ class TestResult(object):
self
.
total_time
=
0.0
self
.
passed
=
[]
self
.
errors
=
[]
self
.
not_found
=
[]
self
.
not_a_test
=
[]
self
.
failed
=
[]
self
.
skipped
=
[]
self
.
warned
=
[]
...
...
@@ -189,28 +183,6 @@ class TestResult(object):
"""
self
.
errors
.
append
(
state
)
def
add_not_found
(
self
,
state
):
"""
Called when a test was not found.
Causes: non existing path or could not resolve alias.
:param state: result of :class:`avocado.test.Test.get_state`.
:type state: dict
"""
self
.
not_found
.
append
(
state
)
def
add_not_a_test
(
self
,
state
):
"""
Called when a file is not an avocado test
Causes: Non python, non executable file or python file non executable with no avocado test class in it.
:param state: result of :class:`avocado.test.Test.get_state`.
:type state: dict
"""
self
.
not_a_test
.
append
(
state
)
def
add_fail
(
self
,
state
):
"""
Called when a test fails.
...
...
@@ -245,8 +217,6 @@ class TestResult(object):
"""
status_map
=
{
'PASS'
:
self
.
add_pass
,
'ERROR'
:
self
.
add_error
,
'NOT_FOUND'
:
self
.
add_not_found
,
'NOT_A_TEST'
:
self
.
add_not_a_test
,
'FAIL'
:
self
.
add_fail
,
'TEST_NA'
:
self
.
add_skip
,
'WARN'
:
self
.
add_warn
}
...
...
@@ -285,8 +255,6 @@ class HumanTestResult(TestResult):
self
.
stream
.
notify
(
event
=
"message"
,
msg
=
"FAIL : %d"
%
len
(
self
.
failed
))
self
.
stream
.
notify
(
event
=
"message"
,
msg
=
"SKIP : %d"
%
len
(
self
.
skipped
))
self
.
stream
.
notify
(
event
=
"message"
,
msg
=
"WARN : %d"
%
len
(
self
.
warned
))
self
.
stream
.
notify
(
event
=
"message"
,
msg
=
"NOT FOUND : %d"
%
len
(
self
.
not_found
))
self
.
stream
.
notify
(
event
=
"message"
,
msg
=
"NOT A TEST : %d"
%
len
(
self
.
not_a_test
))
self
.
stream
.
notify
(
event
=
"message"
,
msg
=
"TIME : %.2f s"
%
self
.
total_time
)
def
start_test
(
self
,
state
):
...
...
@@ -327,26 +295,6 @@ class HumanTestResult(TestResult):
TestResult
.
add_error
(
self
,
state
)
self
.
stream
.
set_test_status
(
status
=
'ERROR'
,
state
=
state
)
def
add_not_found
(
self
,
state
):
"""
Called when a test was not found.
:param state: result of :class:`avocado.test.Test.get_state`.
:type state: dict
"""
TestResult
.
add_not_found
(
self
,
state
)
self
.
stream
.
set_test_status
(
status
=
'NOT_FOUND'
,
state
=
state
)
def
add_not_a_test
(
self
,
state
):
"""
Called when a given file is not a test.
:param state: result of :class:`avocado.test.Test.get_state`.
:type state: dict
"""
TestResult
.
add_not_a_test
(
self
,
state
)
self
.
stream
.
set_test_status
(
status
=
'NOT_A_TEST'
,
state
=
state
)
def
add_fail
(
self
,
state
):
"""
Called when a test fails.
...
...
avocado/runner.py
浏览文件 @
1bd41e53
...
...
@@ -74,6 +74,8 @@ class TestRunner(object):
try
:
instance
=
self
.
job
.
test_loader
.
load_test
(
test_factory
)
if
instance
.
runner_queue
is
None
:
instance
.
runner_queue
=
queue
runtime
.
CURRENT_TEST
=
instance
early_state
=
instance
.
get_state
()
queue
.
put
(
early_state
)
...
...
@@ -110,11 +112,11 @@ class TestRunner(object):
test_state
[
'text_output'
]
=
log_file_obj
.
read
()
return
test_state
def
run_suite
(
self
,
params_list
):
def
run_suite
(
self
,
test_suite
):
"""
Run one or more tests and report with test result.
:param
params_list: a list of param dicts
.
:param
test_suite: a list of tests to run
.
:return: a list of test failures.
"""
...
...
@@ -123,7 +125,6 @@ class TestRunner(object):
self
.
job
.
sysinfo
.
start_job_hook
()
self
.
result
.
start_tests
()
q
=
queues
.
SimpleQueue
()
test_suite
=
self
.
job
.
test_loader
.
discover
(
params_list
,
q
)
for
test_factory
in
test_suite
:
p
=
multiprocessing
.
Process
(
target
=
self
.
run_test
,
...
...
docs/source/GetStartedGuide.rst
浏览文件 @
1bd41e53
...
...
@@ -43,23 +43,78 @@ Using the avocado test runner
The test runner is designed to conveniently run tests on your laptop. The tests
you can run are:
* Tests written in python, using the avocado API, which we'll call `native`.
* Tests written in python, using the avocado API, which we'll call
`instrumented`.
* Any executable in your box, really. The criteria for PASS/FAIL is the return
code of the executable. If it returns 0, the test PASSed, if it returned
!= 0, it FAILed. We'll call those tests `simple tests`.
!= 0, it FAILed. We'll call those tests `simple tests`. There is another type
of tests that we'll discuss in the next section.
Native
tests
------------
Listing
tests
------------
-
Avocado looks for avocado "native" tests in some locations, the main one is in
the config file ``/etc/avocado/avocado.conf``, section ``runner``, ``test_dir``
key. You can list tests by::
The ``avocado`` command line tool also has a ``list`` command, that lists the
known tests in a given path, be it a path to an individual test, or a path
to a directory. If no arguments provided, avocado will inspect the contents
of the test location being used by avocado (if you are in doubt about which
one is that, you may use ``avocado config --datadir``). The output looks like::
$ avocado list
Tests available:
failtest
sleeptest
synctest
INSTRUMENTED /usr/share/avocado/tests/abort.py
INSTRUMENTED /usr/share/avocado/tests/datadir.py
INSTRUMENTED /usr/share/avocado/tests/doublefail.py
INSTRUMENTED /usr/share/avocado/tests/doublefree.py
INSTRUMENTED /usr/share/avocado/tests/errortest.py
INSTRUMENTED /usr/share/avocado/tests/failtest.py
INSTRUMENTED /usr/share/avocado/tests/fiotest.py
INSTRUMENTED /usr/share/avocado/tests/gdbtest.py
INSTRUMENTED /usr/share/avocado/tests/gendata.py
INSTRUMENTED /usr/share/avocado/tests/linuxbuild.py
INSTRUMENTED /usr/share/avocado/tests/multiplextest.py
INSTRUMENTED /usr/share/avocado/tests/passtest.py
INSTRUMENTED /usr/share/avocado/tests/skiptest.py
INSTRUMENTED /usr/share/avocado/tests/sleeptenmin.py
INSTRUMENTED /usr/share/avocado/tests/sleeptest.py
INSTRUMENTED /usr/share/avocado/tests/synctest.py
INSTRUMENTED /usr/share/avocado/tests/timeouttest.py
INSTRUMENTED /usr/share/avocado/tests/trinity.py
INSTRUMENTED /usr/share/avocado/tests/warntest.py
INSTRUMENTED /usr/share/avocado/tests/whiteboard.py
Here, ``INSTRUMENTED`` means that the files there are python files with an
avocado
test class in them This means those tests have access to all avocado APIs and
facilities. Let's try to list a directory with a bunch of executable shell
scripts::
$ avocado list examples/wrappers/
SIMPLE examples/wrappers/dummy.sh
SIMPLE examples/wrappers/ltrace.sh
SIMPLE examples/wrappers/perf.sh
SIMPLE examples/wrappers/strace.sh
SIMPLE examples/wrappers/time.sh
SIMPLE examples/wrappers/valgrind.sh
Here, as covered in the previous section, ``SIMPLE`` means that those files are
executables, that avocado will simply execute and return PASS or FAIL
depending on their return codes (PASS -> 0, FAIL -> any integer different
than 0). You can also provide the ``--verbose``, or ``-V`` flag to display files
that were detected but are not avocado tests, along with summary information::
$ avocado list examples/gdb-prerun-scripts/ -V
Type file
NOT_A_TEST examples/gdb-prerun-scripts/README
NOT_A_TEST examples/gdb-prerun-scripts/pass-sigusr1
SIMPLE: 0
INSTRUMENTED: 0
BUGGY: 0
MISSING: 0
NOT_A_TEST: 2
Running Tests
-------------
You can run them using the subcommand ``run``::
...
...
@@ -76,7 +131,7 @@ You can run them using the subcommand ``run``::
TIME : 1.01 s
Job ID
------
======
The Job ID is a SHA1 string that has some information encoded:
...
...
@@ -89,7 +144,7 @@ the purposes of joining on a single database results obtained by jobs run
on different systems.
Simple Tests
------------
============
You can run any number of test in an arbitrary order, as well as mix and match
native tests and simple tests::
...
...
@@ -115,7 +170,7 @@ native tests and simple tests::
TIME : 1.04 s
Debugging tests
---------------
===============
When developing new tests, you frequently want to look at the straight
output of the job log in the stdout, without having to tail the job log.
...
...
docs/source/RemoteMachinePlugin.rst
浏览文件 @
1bd41e53
...
...
@@ -64,7 +64,6 @@ Once everything is verified and covered, you may run your test. Example::
(2/2) examples/tests/failtest.py: FAIL (0.00 s)
PASS : 1
ERROR : 0
NOT FOUND : 0
FAIL : 1
SKIP : 0
WARN : 0
...
...
docs/source/VirtualMachinePlugin.rst
浏览文件 @
1bd41e53
...
...
@@ -71,7 +71,6 @@ Once everything is verified and covered, you may run your test. Example::
(2/2) examples/tests/failtest.py: FAIL (0.00 s)
PASS : 1
ERROR : 0
NOT FOUND : 0
FAIL : 1
SKIP : 0
WARN : 0
...
...
docs/source/WritingTests.rst
浏览文件 @
1bd41e53
...
...
@@ -354,7 +354,6 @@ option --output-check-record all to the test runner::
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 2.20 s
...
...
@@ -388,7 +387,6 @@ Let's record the output for this one::
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 0.01 s
After this is done, you'll notice that a the test data directory
...
...
@@ -418,7 +416,6 @@ happens if we change the ``stdout.expected`` file contents to ``Hello, avocado!`
FAIL : 1
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 0.02 s
Verifying the failure reason::
...
...
man/avocado.rst
浏览文件 @
1bd41e53
...
...
@@ -84,7 +84,6 @@ directories. The output should be similar to::
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 1.00 s
The test directories will vary depending on you system and
...
...
@@ -151,18 +150,65 @@ LISTING TESTS
=============
The `avocado` command line tool also has a `list` command, that lists the
known tests in the standard test directory::
$ avocado list
The output should be similar to::
Tests dir: /home/<user>/local/avocado/tests
Alias Path
sleeptest /home/<user>/local/avocado/tests/sleeptest.py
...
warntest /home/<user>/local/avocado/tests/warntest.py
sleeptenmin /home/<user>/local/avocado/tests/sleeptenmin.py
known tests in a given path, be it a path to an individual test, or a path
to a directory. If no arguments provided, avocado will inspect the contents
of the test location being used by avocado (if you are in doubt about which
one is that, you may use `avocado config --datadir`). The output looks like::
$ avocado list
INSTRUMENTED /usr/share/avocado/tests/abort.py
INSTRUMENTED /usr/share/avocado/tests/datadir.py
INSTRUMENTED /usr/share/avocado/tests/doublefail.py
INSTRUMENTED /usr/share/avocado/tests/doublefree.py
INSTRUMENTED /usr/share/avocado/tests/errortest.py
INSTRUMENTED /usr/share/avocado/tests/failtest.py
INSTRUMENTED /usr/share/avocado/tests/fiotest.py
INSTRUMENTED /usr/share/avocado/tests/gdbtest.py
INSTRUMENTED /usr/share/avocado/tests/gendata.py
INSTRUMENTED /usr/share/avocado/tests/linuxbuild.py
INSTRUMENTED /usr/share/avocado/tests/multiplextest.py
INSTRUMENTED /usr/share/avocado/tests/passtest.py
INSTRUMENTED /usr/share/avocado/tests/skiptest.py
INSTRUMENTED /usr/share/avocado/tests/sleeptenmin.py
INSTRUMENTED /usr/share/avocado/tests/sleeptest.py
INSTRUMENTED /usr/share/avocado/tests/synctest.py
INSTRUMENTED /usr/share/avocado/tests/timeouttest.py
INSTRUMENTED /usr/share/avocado/tests/trinity.py
INSTRUMENTED /usr/share/avocado/tests/warntest.py
INSTRUMENTED /usr/share/avocado/tests/whiteboard.py
Here, `INSTRUMENTED` means that the files there are python files with an avocado
test class in them, therefore, that they are what we call instrumented tests.
This means those tests can use all avocado APIs and facilities. Let's try to
list a directory with a bunch of executable shell scripts::
$ avocado list examples/wrappers/
SIMPLE examples/wrappers/dummy.sh
SIMPLE examples/wrappers/ltrace.sh
SIMPLE examples/wrappers/perf.sh
SIMPLE examples/wrappers/strace.sh
SIMPLE examples/wrappers/time.sh
SIMPLE examples/wrappers/valgrind.sh
Here, `SIMPLE` means that those files are executables, that avocado will simply
execute and return PASS or FAIL depending on their return codes (PASS -> 0,
FAIL -> any integer different than 0). You can also provide the `--verbose`,
or `-V` flag to display files that were detected but are not avocado tests,
along with summary information::
$ avocado list examples/gdb-prerun-scripts/ -V
Type file
NOT_A_TEST examples/gdb-prerun-scripts/README
NOT_A_TEST examples/gdb-prerun-scripts/pass-sigusr1
SIMPLE: 0
INSTRUMENTED: 0
BUGGY: 0
MISSING: 0
NOT_A_TEST: 2
That summarizes the basic commands you should be using more frequently when
you start with avocado. Let's talk now about how avocado stores test results.
EXPLORING RESULTS
=================
...
...
@@ -234,7 +280,6 @@ And the output should look like::
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 16.53 s
The `multiplex` plugin and the test runner supports two kinds of global
...
...
@@ -397,7 +442,6 @@ option --output-check-record all to the test runner::
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 2.20 s
After the reference files are added, the check process is transparent, in the
...
...
@@ -433,7 +477,6 @@ Let's record the output (both stdout and stderr) for this one::
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 0.01 s
After this is done, you'll notice that a the test data directory
...
...
@@ -479,7 +522,6 @@ The output should look like::
(1/1) sleeptest.py: PASS (1.01 s)
PASS : 1
ERROR : 0
NOT FOUND : 0
FAIL : 0
SKIP : 0
WARN : 0
...
...
selftests/all/functional/avocado/basic_tests.py
浏览文件 @
1bd41e53
...
...
@@ -82,7 +82,7 @@ class RunnerOperationTest(unittest.TestCase):
os
.
chdir
(
basedir
)
cmd_line
=
'./scripts/avocado run --sysinfo=off bogustest'
result
=
process
.
run
(
cmd_line
,
ignore_status
=
True
)
expected_rc
=
1
expected_rc
=
2
unexpected_rc
=
3
self
.
assertNotEqual
(
result
.
exit_status
,
unexpected_rc
,
"Avocado crashed (rc %d):
\n
%s"
%
(
unexpected_rc
,
result
))
...
...
@@ -169,10 +169,9 @@ class RunnerOperationTest(unittest.TestCase):
os
.
chdir
(
basedir
)
cmd_line
=
'./scripts/avocado run --sysinfo=off sbrubles'
result
=
process
.
run
(
cmd_line
,
ignore_status
=
True
)
expected_rc
=
1
expected_rc
=
2
self
.
assertEqual
(
result
.
exit_status
,
expected_rc
)
self
.
assertIn
(
'NOT_FOUND'
,
result
.
stdout
)
self
.
assertIn
(
'NOT FOUND : 1'
,
result
.
stdout
)
self
.
assertIn
(
'File not found'
,
result
.
stdout
)
def
test_invalid_unique_id
(
self
):
cmd_line
=
'./scripts/avocado run --sysinfo=off --force-job-id foobar skiptest'
...
...
@@ -441,13 +440,6 @@ class PluginsXunitTest(PluginsTest):
def
test_xunit_plugin_errortest
(
self
):
self
.
run_and_check
(
'errortest'
,
1
,
1
,
1
,
0
,
0
,
0
)
def
test_xunit_plugin_notfoundtest
(
self
):
self
.
run_and_check
(
'sbrubles'
,
1
,
1
,
1
,
0
,
0
,
0
)
def
test_xunit_plugin_mixedtest
(
self
):
self
.
run_and_check
(
'passtest failtest skiptest errortest sbrubles'
,
1
,
5
,
2
,
0
,
1
,
1
)
class
ParseJSONError
(
Exception
):
pass
...
...
@@ -455,7 +447,7 @@ class ParseJSONError(Exception):
class
PluginsJSONTest
(
PluginsTest
):
def
run_and_check
(
self
,
testname
,
e_rc
,
e_ntests
,
e_nerrors
,
e_nnotfound
,
def
run_and_check
(
self
,
testname
,
e_rc
,
e_ntests
,
e_nerrors
,
e_nfailures
,
e_nskip
):
os
.
chdir
(
basedir
)
cmd_line
=
'./scripts/avocado run --sysinfo=off --json - --archive %s'
%
testname
...
...
@@ -478,9 +470,6 @@ class PluginsJSONTest(PluginsTest):
n_errors
=
json_data
[
'errors'
]
self
.
assertEqual
(
n_errors
,
e_nerrors
,
"Different number of expected tests"
)
n_not_found
=
json_data
[
'not_found'
]
self
.
assertEqual
(
n_not_found
,
e_nnotfound
,
"Different number of not found tests"
)
n_failures
=
json_data
[
'failures'
]
self
.
assertEqual
(
n_failures
,
e_nfailures
,
"Different number of expected tests"
)
...
...
@@ -489,23 +478,16 @@ class PluginsJSONTest(PluginsTest):
"Different number of skipped tests"
)
def
test_json_plugin_passtest
(
self
):
self
.
run_and_check
(
'passtest'
,
0
,
1
,
0
,
0
,
0
,
0
)
self
.
run_and_check
(
'passtest'
,
0
,
1
,
0
,
0
,
0
)
def
test_json_plugin_failtest
(
self
):
self
.
run_and_check
(
'failtest'
,
1
,
1
,
0
,
0
,
1
,
0
)
self
.
run_and_check
(
'failtest'
,
1
,
1
,
0
,
1
,
0
)
def
test_json_plugin_skiptest
(
self
):
self
.
run_and_check
(
'skiptest'
,
0
,
1
,
0
,
0
,
0
,
1
)
self
.
run_and_check
(
'skiptest'
,
0
,
1
,
0
,
0
,
1
)
def
test_json_plugin_errortest
(
self
):
self
.
run_and_check
(
'errortest'
,
1
,
1
,
1
,
0
,
0
,
0
)
def
test_json_plugin_notfoundtest
(
self
):
self
.
run_and_check
(
'sbrubles'
,
1
,
1
,
0
,
1
,
0
,
0
)
def
test_json_plugin_mixedtest
(
self
):
self
.
run_and_check
(
'passtest failtest skiptest errortest sbrubles'
,
1
,
5
,
1
,
1
,
1
,
1
)
self
.
run_and_check
(
'errortest'
,
1
,
1
,
1
,
0
,
0
)
if
__name__
==
'__main__'
:
unittest
.
main
()
selftests/all/functional/avocado/loader_tests.py
浏览文件 @
1bd41e53
...
...
@@ -73,11 +73,11 @@ class LoaderTestFunctional(unittest.TestCase):
simple_test
.
save
()
cmd_line
=
'./scripts/avocado run --sysinfo=off %s'
%
simple_test
.
path
result
=
process
.
run
(
cmd_line
,
ignore_status
=
True
)
expected_rc
=
1
expected_rc
=
2
self
.
assertEqual
(
result
.
exit_status
,
expected_rc
,
"Avocado did not return rc %d:
\n
%s"
%
(
expected_rc
,
result
))
self
.
assertIn
(
'
NOT_A_TEST
'
,
result
.
stdout
)
self
.
assertIn
(
'
is not an avocado test
'
,
result
.
stdout
)
simple_test
.
remove
()
def
test_pass
(
self
):
...
...
@@ -137,11 +137,11 @@ class LoaderTestFunctional(unittest.TestCase):
avocado_not_a_test
.
save
()
cmd_line
=
'./scripts/avocado run --sysinfo=off %s'
%
avocado_not_a_test
.
path
result
=
process
.
run
(
cmd_line
,
ignore_status
=
True
)
expected_rc
=
1
expected_rc
=
2
self
.
assertEqual
(
result
.
exit_status
,
expected_rc
,
"Avocado did not return rc %d:
\n
%s"
%
(
expected_rc
,
result
))
self
.
assertIn
(
'
NOT_A_TEST
'
,
result
.
stdout
)
self
.
assertIn
(
'
is not an avocado test
'
,
result
.
stdout
)
avocado_not_a_test
.
remove
()
if
__name__
==
'__main__'
:
...
...
selftests/all/unit/avocado/loader_unittest.py
浏览文件 @
1bd41e53
...
...
@@ -73,8 +73,7 @@ class LoaderTest(unittest.TestCase):
'avocado_loader_unittest'
)
simple_test
.
save
()
test_class
,
test_parameters
=
(
self
.
loader
.
discover_test
(
params
=
{
'id'
:
simple_test
.
path
},
queue
=
self
.
queue
))
self
.
loader
.
discover_test
(
params
=
{
'id'
:
simple_test
.
path
}))
self
.
assertTrue
(
test_class
==
test
.
SimpleTest
,
test_class
)
tc
=
test_class
(
**
test_parameters
)
tc
.
action
()
...
...
@@ -86,8 +85,7 @@ class LoaderTest(unittest.TestCase):
mode
=
0664
)
simple_test
.
save
()
test_class
,
test_parameters
=
(
self
.
loader
.
discover_test
(
params
=
{
'id'
:
simple_test
.
path
},
queue
=
self
.
queue
))
self
.
loader
.
discover_test
(
params
=
{
'id'
:
simple_test
.
path
}))
self
.
assertTrue
(
test_class
==
test
.
NotATest
,
test_class
)
tc
=
test_class
(
**
test_parameters
)
self
.
assertRaises
(
exceptions
.
NotATestError
,
tc
.
action
)
...
...
@@ -99,8 +97,7 @@ class LoaderTest(unittest.TestCase):
'avocado_loader_unittest'
)
avocado_pass_test
.
save
()
test_class
,
test_parameters
=
(
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_pass_test
.
path
},
queue
=
self
.
queue
))
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_pass_test
.
path
}))
self
.
assertTrue
(
str
(
test_class
)
==
"<class 'passtest.PassTest'>"
,
str
(
test_class
))
self
.
assertTrue
(
issubclass
(
test_class
,
test
.
Test
))
...
...
@@ -114,8 +111,7 @@ class LoaderTest(unittest.TestCase):
'avocado_loader_unittest'
)
avocado_buggy_test
.
save
()
test_class
,
test_parameters
=
(
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_buggy_test
.
path
},
queue
=
self
.
queue
))
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_buggy_test
.
path
}))
self
.
assertTrue
(
test_class
==
test
.
SimpleTest
,
test_class
)
tc
=
test_class
(
**
test_parameters
)
self
.
assertRaises
(
exceptions
.
TestFail
,
tc
.
action
)
...
...
@@ -128,8 +124,7 @@ class LoaderTest(unittest.TestCase):
mode
=
0664
)
avocado_buggy_test
.
save
()
test_class
,
test_parameters
=
(
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_buggy_test
.
path
},
queue
=
self
.
queue
))
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_buggy_test
.
path
}))
self
.
assertTrue
(
test_class
==
test
.
BuggyTest
,
test_class
)
tc
=
test_class
(
**
test_parameters
)
self
.
assertRaises
(
ImportError
,
tc
.
action
)
...
...
@@ -142,8 +137,7 @@ class LoaderTest(unittest.TestCase):
mode
=
0664
)
avocado_not_a_test
.
save
()
test_class
,
test_parameters
=
(
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_not_a_test
.
path
},
queue
=
self
.
queue
))
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_not_a_test
.
path
}))
self
.
assertTrue
(
test_class
==
test
.
NotATest
,
test_class
)
tc
=
test_class
(
**
test_parameters
)
self
.
assertRaises
(
exceptions
.
NotATestError
,
tc
.
action
)
...
...
@@ -154,8 +148,7 @@ class LoaderTest(unittest.TestCase):
'avocado_loader_unittest'
)
avocado_not_a_test
.
save
()
test_class
,
test_parameters
=
(
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_not_a_test
.
path
},
queue
=
self
.
queue
))
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_not_a_test
.
path
}))
self
.
assertTrue
(
test_class
==
test
.
SimpleTest
,
test_class
)
tc
=
test_class
(
**
test_parameters
)
# The test can't be executed (no shebang), raising an OSError
...
...
@@ -169,8 +162,7 @@ class LoaderTest(unittest.TestCase):
'avocado_loader_unittest'
)
avocado_simple_test
.
save
()
test_class
,
test_parameters
=
(
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_simple_test
.
path
},
queue
=
self
.
queue
))
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_simple_test
.
path
}))
self
.
assertTrue
(
test_class
==
test
.
SimpleTest
)
tc
=
test_class
(
**
test_parameters
)
tc
.
action
()
...
...
@@ -183,8 +175,7 @@ class LoaderTest(unittest.TestCase):
mode
=
0664
)
avocado_simple_test
.
save
()
test_class
,
test_parameters
=
(
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_simple_test
.
path
},
queue
=
self
.
queue
))
self
.
loader
.
discover_test
(
params
=
{
'id'
:
avocado_simple_test
.
path
}))
self
.
assertTrue
(
test_class
==
test
.
NotATest
)
tc
=
test_class
(
**
test_parameters
)
self
.
assertRaises
(
exceptions
.
NotATestError
,
tc
.
action
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录