提交 f95dc31b 编写于 作者: R Rudá Moura 提交者: Lucas Meneghel Rodrigues

avocado: Add support for recursively finding tests in a directory

Add support to inspect and find tests inside a directory, recursively,
by using the method `discover_url`. It takes a url (path) and
returns the tests parameters it discovers.

The loader object has the .discover() method, that can be used
to inspect the location, and the .validate() method, that will
discover any problems on the user's input.

We are also changing the behavior of avocado to error out on
invalid inputs (missing paths or files that are not avocado
tests). Therefore, the concept of MISSING or NOT_A_TEST both
disappear from the UI. Unittests were updated to reflect the
new status quo.
Signed-off-by: NRudá Moura <rmoura@redhat.com>
Signed-off-by: NLucas Meneghel Rodrigues <lmr@redhat.com>
上级 6e67db5b
......@@ -75,16 +75,6 @@ class TestError(TestBaseException):
status = "ERROR"
class TestNotFoundError(TestBaseException):
"""
Indicates that the test was not found.
Causes: non existing path or could not resolve alias.
"""
status = "NOT_FOUND"
class NotATestError(TestBaseException):
"""
......
......@@ -137,7 +137,6 @@ class TermSupport(object):
self.SKIP = self.COLOR_YELLOW
self.FAIL = self.COLOR_RED
self.ERROR = self.COLOR_RED
self.NOT_FOUND = self.COLOR_YELLOW
self.WARN = self.COLOR_YELLOW
self.PARTIAL = self.COLOR_YELLOW
self.ENDC = self.CONTROL_END
......@@ -157,7 +156,6 @@ class TermSupport(object):
self.SKIP = ''
self.FAIL = ''
self.ERROR = ''
self.NOT_FOUND = ''
self.WARN = ''
self.PARTIAL = ''
self.ENDC = ''
......@@ -237,22 +235,6 @@ class TermSupport(object):
"""
return self.MOVE_BACK + self.ERROR + 'ERROR' + self.ENDC
def not_found_str(self):
"""
Print a warning NOT_FOUND string (yellow colored).
If the output does not support colors, just return the original string.
"""
return self.MOVE_BACK + self.NOT_FOUND + 'NOT_FOUND' + self.ENDC
def not_a_test_str(self):
"""
Print a warning NOT_A_TEST string (yellow colored).
If the output does not support colors, just return the original string.
"""
return self.MOVE_BACK + self.NOT_FOUND + 'NOT_A_TEST' + self.ENDC
def warn_str(self):
"""
Print an warning string (yellow colored).
......@@ -403,8 +385,6 @@ class View(object):
def set_test_status(self, status, state):
mapping = {'PASS': self._log_ui_status_pass,
'ERROR': self._log_ui_status_error,
'NOT_FOUND': self._log_ui_status_not_found,
'NOT_A_TEST': self._log_ui_status_not_a_test,
'FAIL': self._log_ui_status_fail,
'SKIP': self._log_ui_status_skip,
'WARN': self._log_ui_status_warn}
......@@ -523,24 +503,6 @@ class View(object):
normal_error_msg = term_support.error_str() + " (%.2f s)" % t_elapsed
self._log_ui_error_base(normal_error_msg)
def _log_ui_status_not_found(self, t_elapsed):
"""
Log a NOT_FOUND status message for a given operation.
:param t_elapsed: Time it took for the operation to complete.
"""
normal_error_msg = term_support.not_found_str() + " (%.2f s)" % t_elapsed
self._log_ui_error_base(normal_error_msg)
def _log_ui_status_not_a_test(self, t_elapsed):
"""
Log a NOT_A_TEST status message for a given operation.
:param t_elapsed: Time it took for the operation to complete.
"""
normal_error_msg = term_support.not_a_test_str() + " (%.2f s)" % t_elapsed
self._log_ui_error_base(normal_error_msg)
def _log_ui_status_fail(self, t_elapsed):
"""
Log a FAIL status message for a given operation.
......
......@@ -19,8 +19,6 @@ a test or a job in avocado PASSed or FAILed.
mapping = {"TEST_NA": True,
"ABORT": False,
"ERROR": False,
"NOT_FOUND": False,
"NOT_A_TEST": False,
"FAIL": False,
"WARN": False,
"PASS": True,
......
......@@ -195,11 +195,39 @@ class Job(object):
human_plugin = result.HumanTestResult(self.view, self.args)
self.result_proxy.add_output_plugin(human_plugin)
def _multiplex_params_list(self, params_list, multiplex_files):
for mux_file in multiplex_files:
if not os.path.exists(mux_file):
e_msg = "Multiplex file %s doesn't exist." % mux_file
raise exceptions.OptionValidationError(e_msg)
result = []
for params in params_list:
try:
variants = multiplexer.multiplex_yamls(multiplex_files,
self.args.filter_only,
self.args.filter_out)
except SyntaxError:
variants = None
if variants:
tag = 1
for variant in variants:
env = {}
for t in variant:
env.update(dict(t.environment))
env.update({'tag': tag})
env.update({'id': params['id']})
result.append(env)
tag += 1
else:
result.append(params)
return result
def _run(self, urls=None, multiplex_files=None):
"""
Unhandled job method. Runs a list of test URLs to its completion.
:param urls: String with tests to run.
:param urls: String with tests to run, separated by whitespace.
Optionally, a list of tests (each test a string).
:param multiplex_files: File that multiplexes a given test url.
:return: Integer with overall job status. See
......@@ -208,72 +236,55 @@ class Job(object):
:class:`avocado.core.exceptions.JobBaseException` errors,
that configure a job failure.
"""
params_list = []
if urls is None:
if self.args and self.args.url:
if self.args and self.args.url is not None:
urls = self.args.url
else:
if isinstance(urls, str):
urls = urls.split()
if urls is not None:
for url in urls:
if url.startswith(os.path.pardir):
url = os.path.abspath(url)
params_list.append({'id': url})
else:
if isinstance(urls, str):
urls = urls.split()
if not urls:
e_msg = "Empty test ID. A test path or alias must be provided"
raise exceptions.OptionValidationError(e_msg)
self._make_test_loader()
params_list = self.test_loader.discover_urls(urls)
if multiplex_files is None:
if self.args and self.args.multiplex_files is not None:
multiplex_files = self.args.multiplex_files
else:
multiplex_files = multiplex_files
if multiplex_files is not None:
for mux_file in multiplex_files:
if not os.path.exists(mux_file):
e_msg = "Multiplex file %s doesn't exist." % (mux_file)
raise exceptions.OptionValidationError(e_msg)
params_list = []
if urls is not None:
for url in urls:
try:
variants = multiplexer.multiplex_yamls(multiplex_files,
self.args.filter_only,
self.args.filter_out)
except SyntaxError:
variants = None
if variants:
tag = 1
for variant in variants:
env = {}
for t in variant:
env.update(dict(t.environment))
env.update({'tag': tag})
env.update({'id': url})
params_list.append(env)
tag += 1
else:
params_list.append({'id': url})
if not params_list:
e_msg = "Test(s) with empty parameter list or the number of variants is zero"
params_list = self._multiplex_params_list(params_list,
multiplex_files)
try:
test_suite = self.test_loader.discover(params_list)
error_msg_parts = self.test_loader.validate_ui(test_suite)
except KeyboardInterrupt:
raise exceptions.JobError('Command interrupted by user...')
if error_msg_parts:
e_msg = '\n'.join(error_msg_parts)
raise exceptions.OptionValidationError(e_msg)
if not test_suite:
e_msg = ("No tests found within the specified path(s) "
"(Possible reasons: File ownership, permissions, typos)")
raise exceptions.OptionValidationError(e_msg)
if self.args is not None:
self.args.test_result_total = len(params_list)
self.args.test_result_total = len(test_suite)
self._make_test_result()
self._make_test_runner()
self._make_test_loader()
self.view.start_file_logging(self.logfile,
self.loglevel,
self.unique_id)
self.view.logfile = self.logfile
failures = self.test_runner.run_suite(params_list)
failures = self.test_runner.run_suite(test_suite)
self.view.stop_file_logging()
# If it's all good so far, set job status to 'PASS'
if self.status == 'RUNNING':
......@@ -308,7 +319,8 @@ class Job(object):
The test runner figures out which tests need to be run on an empty urls
list by assuming the first component of the shortname is the test url.
:param urls: String with tests to run.
:param urls: String with tests to run, separated by whitespace.
Optionally, a list of tests (each test a string).
:param multiplex_files: File that multiplexes a given test url.
:return: Integer with overall job status. See
......
......@@ -28,13 +28,29 @@ from avocado.core import data_dir
from avocado.utils import path
class _DebugJob(object):
def __init__(self):
self.logdir = '.'
class BrokenSymlink(object):
pass
class AccessDeniedPath(object):
pass
class TestLoader(object):
"""
Test loader class.
"""
def __init__(self, job):
def __init__(self, job=None):
if job is None:
job = _DebugJob()
self.job = job
def _make_missing_test(self, test_name, params):
......@@ -61,7 +77,7 @@ class TestLoader(object):
'job': self.job}
return test_class, test_parameters
def _make_test(self, test_name, test_path, params, queue):
def _make_test(self, test_name, test_path, params):
module_name = os.path.basename(test_path).split('.')[0]
test_module_dir = os.path.dirname(test_path)
sys.path.append(test_module_dir)
......@@ -71,11 +87,10 @@ class TestLoader(object):
'params': params,
'job': self.job}
test_parameters_queue = {'name': test_name,
'base_logdir': self.job.logdir,
'params': params,
'job': self.job,
'runner_queue': queue}
test_parameters_name = {'name': test_name,
'base_logdir': self.job.logdir,
'params': params,
'job': self.job}
try:
f, p, d = imp.find_module(module_name, [test_module_dir])
test_module = imp.load_module(module_name, f, p, d)
......@@ -84,10 +99,11 @@ class TestLoader(object):
if inspect.isclass(obj):
if issubclass(obj, test.Test):
test_class = obj
break
if test_class is not None:
# Module is importable and does have an avocado test class
# inside, let's proceed.
test_parameters = test_parameters_queue
test_parameters = test_parameters_name
else:
if os.access(test_path, os.X_OK):
# Module does not have an avocado test class inside but
......@@ -98,7 +114,7 @@ class TestLoader(object):
# Module does not have an avocado test class inside, and
# it's not executable. Not a Test.
test_class = test.NotATest
test_parameters = test_parameters_queue
test_parameters = test_parameters_name
# Since a lot of things can happen here, the broad exception is
# justified. The user will get it unadulterated anyway, and avocado
......@@ -127,30 +143,31 @@ class TestLoader(object):
params['exception'] = details
else:
test_class = test.NotATest
test_parameters = test_parameters_queue
test_parameters = test_parameters_name
sys.path.pop(sys.path.index(test_module_dir))
return test_class, test_parameters
def discover_test(self, params, queue):
def discover_test(self, params):
"""
Try to discover and resolve a test.
:param params: dictionary with test parameters.
:type params: dict
:param queue: a queue for communicating with the test runner.
:type queue: an instance of :class:`multiprocessing.Queue`
:return: a test factory (a pair of test class and test parameters)
or `None`.
"""
test_name = params.get('id')
test_path = os.path.abspath(test_name)
test_name = test_path = params.get('id')
if os.path.exists(test_path):
if os.access(test_path, os.R_OK) is False:
return (AccessDeniedPath,
{'params': {'id': test_path}})
path_analyzer = path.PathInspector(test_path)
if path_analyzer.is_python():
test_class, test_parameters = self._make_test(test_name,
test_path,
params, queue)
params)
else:
if os.access(test_path, os.X_OK):
test_class, test_parameters = self._make_simple_test(test_path,
......@@ -159,34 +176,180 @@ class TestLoader(object):
test_class, test_parameters = self._make_not_a_test(test_path,
params)
else:
if os.path.islink(test_path):
try:
if not os.path.isfile(os.readlink(test_path)):
return BrokenSymlink, {'params': {'id': test_path}}
except OSError:
return AccessDeniedPath, {'params': {'id': test_path}}
# Try to resolve test ID (keep compatibility)
rel_path = '%s.py' % test_name
test_path = os.path.join(data_dir.get_test_dir(), rel_path)
if os.path.exists(test_path):
test_class, test_parameters = self._make_test(rel_path,
test_path,
params, queue)
params)
else:
test_class, test_parameters = self._make_missing_test(
test_name, params)
return test_class, test_parameters
def discover(self, params_list, queue):
def discover_url(self, url):
"""
Discover (possible) tests from a directory.
Recursively walk in a directory and find tests params.
The tests are returned in alphabetic order.
:param dir_path: the directory path to inspect.
:type dir_path: str
:param ignore_suffix: list of suffix to ignore in paths.
:type ignore_suffix: list
:return: a list of test params (each one a dictionary).
"""
ignore_suffix = ('.data', '.pyc', '.pyo', '__init__.py',
'__main__.py')
params_list = []
def onerror(exception):
norm_url = os.path.abspath(url)
norm_error_filename = os.path.abspath(exception.filename)
if os.path.isdir(norm_url) and norm_url != norm_error_filename:
omit_non_tests = True
else:
omit_non_tests = False
params_list.append({'id': exception.filename,
'omit_non_tests': omit_non_tests})
for dirpath, dirnames, filenames in os.walk(url, onerror=onerror):
for dir_name in dirnames:
if dir_name.startswith('.'):
dirnames.pop(dirnames.index(dir_name))
for file_name in filenames:
if not file_name.startswith('.'):
ignore = False
for suffix in ignore_suffix:
if file_name.endswith(suffix):
ignore = True
if not ignore:
pth = os.path.join(dirpath, file_name)
params_list.append({'id': pth,
'omit_non_tests': True})
return params_list
def discover_urls(self, urls):
"""
Discover (possible) tests from test urls.
:param urls: a list of tests urls.
:type urls: list
:return: a list of test params (each one a dictionary).
"""
params_list = []
for url in urls:
if url == '':
continue
params_list.extend(self.discover_url(url))
return params_list
def discover(self, params_list):
"""
Discover tests for test suite.
:param params_list: a list of test parameters.
:type params_list: list
:param queue: a queue for communicating with the test runner.
:type queue: an instance of :class:`multiprocessing.Queue`
:return: a test suite (a list of test factories).
"""
test_suite = []
for params in params_list:
test_class, test_parameters = self.discover_test(params, queue)
test_suite.append((test_class, test_parameters))
test_factory = self.discover_test(params)
if test_factory is None:
continue
test_class, test_parameters = test_factory
if test_class in [test.NotATest, BrokenSymlink, AccessDeniedPath]:
if not params.get('omit_non_tests'):
test_suite.append((test_class, test_parameters))
else:
test_suite.append((test_class, test_parameters))
return test_suite
@staticmethod
def validate(test_suite):
"""
Find missing files/non-tests provided by the user in the input.
Used mostly for user input validation.
:param test_suite: List with tuples (test_class, test_params)
:return: list of missing files.
"""
missing = []
not_test = []
broken_symlink = []
access_denied = []
for suite in test_suite:
if suite[0] == test.MissingTest:
missing.append(suite[1]['params']['id'])
elif suite[0] == test.NotATest:
not_test.append(suite[1]['params']['id'])
elif suite[0] == BrokenSymlink:
broken_symlink.append(suite[1]['params']['id'])
elif suite[0] == AccessDeniedPath:
access_denied.append(suite[1]['params']['id'])
return missing, not_test, broken_symlink, access_denied
def validate_ui(self, test_suite, ignore_missing=False,
ignore_not_test=False, ignore_broken_symlinks=False,
ignore_access_denied=False):
"""
Validate test suite and deliver error messages to the UI
:param test_suite: List of tuples (test_class, test_params)
:type test_suite: list
:return: List with error messages
:rtype: list
"""
(missing, not_test, broken_symlink,
access_denied) = self.validate(test_suite)
broken_symlink_msg = ''
if (not ignore_broken_symlinks) and broken_symlink:
if len(broken_symlink) == 1:
broken_symlink_msg = ("Cannot access '%s': Broken symlink" %
", ".join(broken_symlink))
elif len(broken_symlink) > 1:
broken_symlink_msg = ("Cannot access '%s': Broken symlinks" %
", ".join(broken_symlink))
access_denied_msg = ''
if (not ignore_access_denied) and access_denied:
if len(access_denied) == 1:
access_denied_msg = ("Cannot access '%s': Access denied" %
", ".join(access_denied))
elif len(access_denied) > 1:
access_denied_msg = ("Cannot access '%s': Access denied" %
", ".join(access_denied))
missing_msg = ''
if (not ignore_missing) and missing:
if len(missing) == 1:
missing_msg = ("Cannot access '%s': File not found" %
", ".join(missing))
elif len(missing) > 1:
missing_msg = ("Cannot access '%s': Files not found" %
", ".join(missing))
not_test_msg = ''
if (not ignore_not_test) and not_test:
if len(not_test) == 1:
not_test_msg = ("File '%s' is not an avocado test" %
", ".join(not_test))
elif len(not_test) > 1:
not_test_msg = ("Files '%s' are not avocado tests" %
", ".join(not_test))
return [msg for msg in
[access_denied_msg, broken_symlink_msg, missing_msg,
not_test_msg] if msg]
def load_test(self, test_factory):
"""
Load test from the test factory.
......
......@@ -98,8 +98,6 @@ class ReportModel(object):
mapping = {"TEST_NA": "warning",
"ABORT": "danger",
"ERROR": "danger",
"NOT_FOUND": "warning",
"NOT_A_TEST": "warning",
"FAIL": "danger",
"WARN": "warning",
"PASS": "success",
......@@ -204,7 +202,6 @@ class HTMLTestResult(TestResult):
'total': len(self.json['tests']),
'pass': len(self.passed),
'errors': len(self.errors),
'not_found': len(self.not_found),
'failures': len(self.failed),
'skip': len(self.skipped),
'time': self.total_time
......
......@@ -77,7 +77,6 @@ class JSONTestResult(TestResult):
'total': self.tests_total,
'pass': len(self.passed),
'errors': len(self.errors),
'not_found': len(self.not_found),
'failures': len(self.failed),
'skip': len(self.skipped),
'time': self.total_time
......
......@@ -39,7 +39,6 @@ class RemoteTestRunner(TestRunner):
:param urls: a string with test URLs.
:return: a dictionary with test results.
"""
urls = urls.split()
avocado_cmd = ('cd %s; avocado run --force-job-id %s --json - --archive %s' %
(self.remote_test_dir, self.result.stream.job_unique_id, " ".join(urls)))
result = self.result.remote.run(avocado_cmd, ignore_status=True)
......@@ -62,10 +61,8 @@ class RemoteTestRunner(TestRunner):
:return: a list of test failures.
"""
failures = []
urls = [x['id'] for x in params_list]
self.result.urls = urls
self.result.setup()
results = self.run_test(' '.join(urls))
results = self.run_test(self.result.urls)
remote_log_dir = os.path.dirname(results['debuglog'])
self.result.start_tests()
for tst in results['tests']:
......@@ -160,8 +157,6 @@ class RemoteTestResult(TestResult):
"""
self.stream.notify(event='message', msg="PASS : %d" % len(self.passed))
self.stream.notify(event='message', msg="ERROR : %d" % len(self.errors))
self.stream.notify(event='message', msg="NOT FOUND : %d" % len(self.not_found))
self.stream.notify(event='message', msg="NOT A TEST : %d" % len(self.not_a_test))
self.stream.notify(event='message', msg="FAIL : %d" % len(self.failed))
self.stream.notify(event='message', msg="SKIP : %d" % len(self.skipped))
self.stream.notify(event='message', msg="WARN : %d" % len(self.warned))
......@@ -201,24 +196,6 @@ class RemoteTestResult(TestResult):
TestResult.add_error(self, test)
self.stream.set_test_status(status='ERROR', state=test)
def add_not_found(self, test):
"""
Called when a test path was not found.
:param test: :class:`avocado.test.Test` instance.
"""
TestResult.add_not_found(self, test)
self.stream.set_test_status(status='NOT_FOUND', state=test)
def add_not_a_test(self, test):
"""
Called when a file is not an avocado test.
:param test: :class:`avocado.test.Test` instance.
"""
TestResult.add_not_a_test(self, test)
self.stream.set_test_status(status='NOT_A_TEST', state=test)
def add_fail(self, test):
"""
Called when a test fails.
......
......@@ -39,7 +39,6 @@ class VMTestRunner(TestRunner):
:param urls: a string with test URLs.
:return: a dictionary with test results.
"""
urls = urls.split()
avocado_cmd = ('cd %s; avocado run --force-job-id %s --json - --archive %s' %
(self.remote_test_dir, self.result.stream.job_unique_id, " ".join(urls)))
result = self.result.vm.remote.run(avocado_cmd, ignore_status=True)
......@@ -61,10 +60,8 @@ class VMTestRunner(TestRunner):
:return: a list of test failures.
"""
failures = []
urls = [x['id'] for x in params_list]
self.result.urls = urls
self.result.setup()
results = self.run_test(' '.join(urls))
results = self.run_test(self.result.urls)
remote_log_dir = os.path.dirname(results['debuglog'])
self.result.start_tests()
for tst in results['tests']:
......@@ -187,8 +184,6 @@ class VMTestResult(TestResult):
"""
self.stream.notify(event='message', msg="PASS : %d" % len(self.passed))
self.stream.notify(event='message', msg="ERROR : %d" % len(self.errors))
self.stream.notify(event='message', msg="NOT FOUND : %d" % len(self.not_found))
self.stream.notify(event='message', msg="NOT A TEST : %d" % len(self.not_a_test))
self.stream.notify(event='message', msg="FAIL : %d" % len(self.failed))
self.stream.notify(event='message', msg="SKIP : %d" % len(self.skipped))
self.stream.notify(event='message', msg="WARN : %d" % len(self.warned))
......@@ -228,24 +223,6 @@ class VMTestResult(TestResult):
TestResult.add_error(self, test)
self.stream.set_test_status(status='ERROR', state=test)
def add_not_found(self, test):
"""
Called when a test path was not found.
:param test: :class:`avocado.test.Test` instance.
"""
TestResult.add_not_found(self, test)
self.stream.set_test_status(status='NOT_FOUND', state=test)
def add_not_a_test(self, test):
"""
Called when a file is not an avocado test.
:param test: :class:`avocado.test.Test` instance.
"""
TestResult.add_not_a_test(self, test)
self.stream.set_test_status(status='NOT_A_TEST', state=test)
def add_fail(self, test):
"""
Called when a test fails.
......
......@@ -52,7 +52,7 @@ class XmlResult(object):
self.testsuite = '<testsuite name="avocado" tests="{tests}" errors="{errors}" failures="{failures}" skip="{skip}" time="{total_time}" timestamp="%s">' % timestamp
self.testcases = []
def end_testsuite(self, tests, errors, not_found, failures, skip, total_time):
def end_testsuite(self, tests, errors, failures, skip, total_time):
"""
End of testsuite node.
......@@ -62,10 +62,8 @@ class XmlResult(object):
:param skip: Number of test skipped.
:param total_time: The total time of test execution.
"""
errors += not_found # In XML count "not found tests" as error
values = {'tests': tests,
'errors': errors,
'not_found': not_found,
'failures': failures,
'skip': skip,
'total_time': total_time}
......@@ -190,8 +188,6 @@ class xUnitTestResult(TestResult):
self.xml.add_skip(state)
elif state['status'] == 'FAIL':
self.xml.add_failure(state)
elif state['status'] == 'NOT_FOUND':
self.xml.add_error(state)
elif state['status'] == 'ERROR':
self.xml.add_error(state)
......@@ -204,7 +200,6 @@ class xUnitTestResult(TestResult):
'errors': len(self.errors),
'failures': len(self.failed),
'skip': len(self.skipped),
'not_found': len(self.not_found),
'total_time': self.total_time}
self.xml.end_testsuite(**values)
contents = self.xml.get_contents()
......
......@@ -81,10 +81,6 @@ class TestResultProxy(object):
for output_plugin in self.output_plugins:
output_plugin.add_error(state)
def add_not_found(self, state):
for output_plugin in self.output_plugins:
output_plugin.add_not_found(state)
def add_fail(self, state):
for output_plugin in self.output_plugins:
output_plugin.add_fail(state)
......@@ -127,8 +123,6 @@ class TestResult(object):
self.total_time = 0.0
self.passed = []
self.errors = []
self.not_found = []
self.not_a_test = []
self.failed = []
self.skipped = []
self.warned = []
......@@ -189,28 +183,6 @@ class TestResult(object):
"""
self.errors.append(state)
def add_not_found(self, state):
"""
Called when a test was not found.
Causes: non existing path or could not resolve alias.
:param state: result of :class:`avocado.test.Test.get_state`.
:type state: dict
"""
self.not_found.append(state)
def add_not_a_test(self, state):
"""
Called when a file is not an avocado test
Causes: Non python, non executable file or python file non executable with no avocado test class in it.
:param state: result of :class:`avocado.test.Test.get_state`.
:type state: dict
"""
self.not_a_test.append(state)
def add_fail(self, state):
"""
Called when a test fails.
......@@ -245,8 +217,6 @@ class TestResult(object):
"""
status_map = {'PASS': self.add_pass,
'ERROR': self.add_error,
'NOT_FOUND': self.add_not_found,
'NOT_A_TEST': self.add_not_a_test,
'FAIL': self.add_fail,
'TEST_NA': self.add_skip,
'WARN': self.add_warn}
......@@ -285,8 +255,6 @@ class HumanTestResult(TestResult):
self.stream.notify(event="message", msg="FAIL : %d" % len(self.failed))
self.stream.notify(event="message", msg="SKIP : %d" % len(self.skipped))
self.stream.notify(event="message", msg="WARN : %d" % len(self.warned))
self.stream.notify(event="message", msg="NOT FOUND : %d" % len(self.not_found))
self.stream.notify(event="message", msg="NOT A TEST : %d" % len(self.not_a_test))
self.stream.notify(event="message", msg="TIME : %.2f s" % self.total_time)
def start_test(self, state):
......@@ -327,26 +295,6 @@ class HumanTestResult(TestResult):
TestResult.add_error(self, state)
self.stream.set_test_status(status='ERROR', state=state)
def add_not_found(self, state):
"""
Called when a test was not found.
:param state: result of :class:`avocado.test.Test.get_state`.
:type state: dict
"""
TestResult.add_not_found(self, state)
self.stream.set_test_status(status='NOT_FOUND', state=state)
def add_not_a_test(self, state):
"""
Called when a given file is not a test.
:param state: result of :class:`avocado.test.Test.get_state`.
:type state: dict
"""
TestResult.add_not_a_test(self, state)
self.stream.set_test_status(status='NOT_A_TEST', state=state)
def add_fail(self, state):
"""
Called when a test fails.
......
......@@ -74,6 +74,8 @@ class TestRunner(object):
try:
instance = self.job.test_loader.load_test(test_factory)
if instance.runner_queue is None:
instance.runner_queue = queue
runtime.CURRENT_TEST = instance
early_state = instance.get_state()
queue.put(early_state)
......@@ -110,11 +112,11 @@ class TestRunner(object):
test_state['text_output'] = log_file_obj.read()
return test_state
def run_suite(self, params_list):
def run_suite(self, test_suite):
"""
Run one or more tests and report with test result.
:param params_list: a list of param dicts.
:param test_suite: a list of tests to run.
:return: a list of test failures.
"""
......@@ -123,7 +125,6 @@ class TestRunner(object):
self.job.sysinfo.start_job_hook()
self.result.start_tests()
q = queues.SimpleQueue()
test_suite = self.job.test_loader.discover(params_list, q)
for test_factory in test_suite:
p = multiprocessing.Process(target=self.run_test,
......
......@@ -64,7 +64,6 @@ Once everything is verified and covered, you may run your test. Example::
(2/2) examples/tests/failtest.py: FAIL (0.00 s)
PASS : 1
ERROR : 0
NOT FOUND : 0
FAIL : 1
SKIP : 0
WARN : 0
......
......@@ -71,7 +71,6 @@ Once everything is verified and covered, you may run your test. Example::
(2/2) examples/tests/failtest.py: FAIL (0.00 s)
PASS : 1
ERROR : 0
NOT FOUND : 0
FAIL : 1
SKIP : 0
WARN : 0
......
......@@ -354,7 +354,6 @@ option --output-check-record all to the test runner::
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 2.20 s
......@@ -388,7 +387,6 @@ Let's record the output for this one::
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 0.01 s
After this is done, you'll notice that a the test data directory
......@@ -418,7 +416,6 @@ happens if we change the ``stdout.expected`` file contents to ``Hello, avocado!`
FAIL : 1
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 0.02 s
Verifying the failure reason::
......
......@@ -84,7 +84,6 @@ directories. The output should be similar to::
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 1.00 s
The test directories will vary depending on you system and
......@@ -234,7 +233,6 @@ And the output should look like::
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 16.53 s
The `multiplex` plugin and the test runner supports two kinds of global
......@@ -397,7 +395,6 @@ option --output-check-record all to the test runner::
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 2.20 s
After the reference files are added, the check process is transparent, in the
......@@ -433,7 +430,6 @@ Let's record the output (both stdout and stderr) for this one::
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 0.01 s
After this is done, you'll notice that a the test data directory
......@@ -479,7 +475,6 @@ The output should look like::
(1/1) sleeptest.py: PASS (1.01 s)
PASS : 1
ERROR : 0
NOT FOUND : 0
FAIL : 0
SKIP : 0
WARN : 0
......
......@@ -82,7 +82,7 @@ class RunnerOperationTest(unittest.TestCase):
os.chdir(basedir)
cmd_line = './scripts/avocado run --sysinfo=off bogustest'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
expected_rc = 2
unexpected_rc = 3
self.assertNotEqual(result.exit_status, unexpected_rc,
"Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
......@@ -169,10 +169,9 @@ class RunnerOperationTest(unittest.TestCase):
os.chdir(basedir)
cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
expected_rc = 2
self.assertEqual(result.exit_status, expected_rc)
self.assertIn('NOT_FOUND', result.stdout)
self.assertIn('NOT FOUND : 1', result.stdout)
self.assertIn('File not found', result.stdout)
def test_invalid_unique_id(self):
cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar skiptest'
......@@ -441,13 +440,6 @@ class PluginsXunitTest(PluginsTest):
def test_xunit_plugin_errortest(self):
self.run_and_check('errortest', 1, 1, 1, 0, 0, 0)
def test_xunit_plugin_notfoundtest(self):
self.run_and_check('sbrubles', 1, 1, 1, 0, 0, 0)
def test_xunit_plugin_mixedtest(self):
self.run_and_check('passtest failtest skiptest errortest sbrubles',
1, 5, 2, 0, 1, 1)
class ParseJSONError(Exception):
pass
......@@ -455,7 +447,7 @@ class ParseJSONError(Exception):
class PluginsJSONTest(PluginsTest):
def run_and_check(self, testname, e_rc, e_ntests, e_nerrors, e_nnotfound,
def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
e_nfailures, e_nskip):
os.chdir(basedir)
cmd_line = './scripts/avocado run --sysinfo=off --json - --archive %s' % testname
......@@ -478,9 +470,6 @@ class PluginsJSONTest(PluginsTest):
n_errors = json_data['errors']
self.assertEqual(n_errors, e_nerrors,
"Different number of expected tests")
n_not_found = json_data['not_found']
self.assertEqual(n_not_found, e_nnotfound,
"Different number of not found tests")
n_failures = json_data['failures']
self.assertEqual(n_failures, e_nfailures,
"Different number of expected tests")
......@@ -489,23 +478,16 @@ class PluginsJSONTest(PluginsTest):
"Different number of skipped tests")
def test_json_plugin_passtest(self):
self.run_and_check('passtest', 0, 1, 0, 0, 0, 0)
self.run_and_check('passtest', 0, 1, 0, 0, 0)
def test_json_plugin_failtest(self):
self.run_and_check('failtest', 1, 1, 0, 0, 1, 0)
self.run_and_check('failtest', 1, 1, 0, 1, 0)
def test_json_plugin_skiptest(self):
self.run_and_check('skiptest', 0, 1, 0, 0, 0, 1)
self.run_and_check('skiptest', 0, 1, 0, 0, 1)
def test_json_plugin_errortest(self):
self.run_and_check('errortest', 1, 1, 1, 0, 0, 0)
def test_json_plugin_notfoundtest(self):
self.run_and_check('sbrubles', 1, 1, 0, 1, 0, 0)
def test_json_plugin_mixedtest(self):
self.run_and_check('passtest failtest skiptest errortest sbrubles',
1, 5, 1, 1, 1, 1)
self.run_and_check('errortest', 1, 1, 1, 0, 0)
if __name__ == '__main__':
unittest.main()
......@@ -73,11 +73,11 @@ class LoaderTestFunctional(unittest.TestCase):
simple_test.save()
cmd_line = './scripts/avocado run --sysinfo=off %s' % simple_test.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
expected_rc = 2
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertIn('NOT_A_TEST', result.stdout)
self.assertIn('is not an avocado test', result.stdout)
simple_test.remove()
def test_pass(self):
......@@ -137,11 +137,11 @@ class LoaderTestFunctional(unittest.TestCase):
avocado_not_a_test.save()
cmd_line = './scripts/avocado run --sysinfo=off %s' % avocado_not_a_test.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
expected_rc = 2
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertIn('NOT_A_TEST', result.stdout)
self.assertIn('is not an avocado test', result.stdout)
avocado_not_a_test.remove()
if __name__ == '__main__':
......
......@@ -73,8 +73,7 @@ class LoaderTest(unittest.TestCase):
'avocado_loader_unittest')
simple_test.save()
test_class, test_parameters = (
self.loader.discover_test(params={'id': simple_test.path},
queue=self.queue))
self.loader.discover_test(params={'id': simple_test.path}))
self.assertTrue(test_class == test.SimpleTest, test_class)
tc = test_class(**test_parameters)
tc.action()
......@@ -86,8 +85,7 @@ class LoaderTest(unittest.TestCase):
mode=0664)
simple_test.save()
test_class, test_parameters = (
self.loader.discover_test(params={'id': simple_test.path},
queue=self.queue))
self.loader.discover_test(params={'id': simple_test.path}))
self.assertTrue(test_class == test.NotATest, test_class)
tc = test_class(**test_parameters)
self.assertRaises(exceptions.NotATestError, tc.action)
......@@ -99,8 +97,7 @@ class LoaderTest(unittest.TestCase):
'avocado_loader_unittest')
avocado_pass_test.save()
test_class, test_parameters = (
self.loader.discover_test(params={'id': avocado_pass_test.path},
queue=self.queue))
self.loader.discover_test(params={'id': avocado_pass_test.path}))
self.assertTrue(str(test_class) == "<class 'passtest.PassTest'>",
str(test_class))
self.assertTrue(issubclass(test_class, test.Test))
......@@ -114,8 +111,7 @@ class LoaderTest(unittest.TestCase):
'avocado_loader_unittest')
avocado_buggy_test.save()
test_class, test_parameters = (
self.loader.discover_test(params={'id': avocado_buggy_test.path},
queue=self.queue))
self.loader.discover_test(params={'id': avocado_buggy_test.path}))
self.assertTrue(test_class == test.SimpleTest, test_class)
tc = test_class(**test_parameters)
self.assertRaises(exceptions.TestFail, tc.action)
......@@ -128,8 +124,7 @@ class LoaderTest(unittest.TestCase):
mode=0664)
avocado_buggy_test.save()
test_class, test_parameters = (
self.loader.discover_test(params={'id': avocado_buggy_test.path},
queue=self.queue))
self.loader.discover_test(params={'id': avocado_buggy_test.path}))
self.assertTrue(test_class == test.BuggyTest, test_class)
tc = test_class(**test_parameters)
self.assertRaises(ImportError, tc.action)
......@@ -142,8 +137,7 @@ class LoaderTest(unittest.TestCase):
mode=0664)
avocado_not_a_test.save()
test_class, test_parameters = (
self.loader.discover_test(params={'id': avocado_not_a_test.path},
queue=self.queue))
self.loader.discover_test(params={'id': avocado_not_a_test.path}))
self.assertTrue(test_class == test.NotATest, test_class)
tc = test_class(**test_parameters)
self.assertRaises(exceptions.NotATestError, tc.action)
......@@ -154,8 +148,7 @@ class LoaderTest(unittest.TestCase):
'avocado_loader_unittest')
avocado_not_a_test.save()
test_class, test_parameters = (
self.loader.discover_test(params={'id': avocado_not_a_test.path},
queue=self.queue))
self.loader.discover_test(params={'id': avocado_not_a_test.path}))
self.assertTrue(test_class == test.SimpleTest, test_class)
tc = test_class(**test_parameters)
# The test can't be executed (no shebang), raising an OSError
......@@ -169,8 +162,7 @@ class LoaderTest(unittest.TestCase):
'avocado_loader_unittest')
avocado_simple_test.save()
test_class, test_parameters = (
self.loader.discover_test(params={'id': avocado_simple_test.path},
queue=self.queue))
self.loader.discover_test(params={'id': avocado_simple_test.path}))
self.assertTrue(test_class == test.SimpleTest)
tc = test_class(**test_parameters)
tc.action()
......@@ -183,8 +175,7 @@ class LoaderTest(unittest.TestCase):
mode=0664)
avocado_simple_test.save()
test_class, test_parameters = (
self.loader.discover_test(params={'id': avocado_simple_test.path},
queue=self.queue))
self.loader.discover_test(params={'id': avocado_simple_test.path}))
self.assertTrue(test_class == test.NotATest)
tc = test_class(**test_parameters)
self.assertRaises(exceptions.NotATestError, tc.action)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册