提交 9b077d2c 编写于 作者: L Lucas Meneghel Rodrigues

Merge pull request #225 from lmr/logging-refactor

Logging refactor - Sprint 14 deliverables
......@@ -12,6 +12,7 @@ all:
@echo "make build-deb-all - Generate both source and binary debian packages"
@echo "make build-rpm-all - Generate both source and binary RPMs"
@echo "make man - Generate the avocado man page"
@echo "make check - Runs tree static check, unittests and functional tests"
@echo "make clean - Get rid of scratch and byte files"
source:
......
......@@ -35,6 +35,7 @@ DEFAULT_LOGGING = {
'level': 'INFO',
'class': 'avocado.core.output.ProgressStreamHandler',
'formatter': 'brief',
'stream': 'ext://sys.stdout',
},
},
'loggers': {
......@@ -51,6 +52,16 @@ DEFAULT_LOGGING = {
'level': 'DEBUG',
'propagate': False,
},
'avocado.test.stdout': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': False,
},
'avocado.test.stderr': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': False,
},
}
}
......
......@@ -45,6 +45,10 @@ class ProgressStreamHandler(logging.StreamHandler):
self.handleError(record)
class PagerNotFoundError(Exception):
pass
class Paginator(object):
"""
......@@ -54,8 +58,17 @@ class Paginator(object):
"""
def __init__(self):
less_cmd = process.find_command('less')
self.pipe = os.popen('%s -FRSX' % less_cmd, 'w')
try:
paginator = "%s -FRSX" % process.find_command('less')
except process.CmdNotFoundError:
paginator = None
paginator = os.environ.get('PAGER', paginator)
if paginator is None:
self.pipe = sys.stdout
else:
self.pipe = os.popen(paginator, 'w')
def __del__(self):
try:
......@@ -72,16 +85,13 @@ class Paginator(object):
def get_paginator():
"""
Get a paginator. If we can't do that, return stdout.
Get a paginator.
The paginator is 'less'. The paginator is a useful feature inspired in
programs such as git, since it lets you scroll up and down large buffers
of text, increasing the program's usability.
The paginator is whatever the user sets as $PAGER, or 'less', or if all else fails, sys.stdout.
It is a useful feature inspired in programs such as git, since it lets you scroll up and down
large buffers of text, increasing the program's usability.
"""
try:
return Paginator()
except process.CmdNotFoundError:
return sys.stdout
return Paginator()
def add_console_handler(logger):
......@@ -162,6 +172,14 @@ class TermSupport(object):
"""
return self.FAIL + msg + self.ENDC
def warn_header_str(self, msg):
"""
Print a warning header string (yellow colored).
If the output does not support colors, just return the original string.
"""
return self.FAIL + msg + self.ENDC
def healthy_str(self, msg):
"""
Print a healthy string (green colored).
......@@ -230,187 +248,339 @@ class TermSupport(object):
term_support = TermSupport()
class OutputManager(object):
class LoggingFile(object):
"""
Takes care of both disk logs and stdout/err logs.
File-like object that will receive messages pass them to logging.
"""
THROBBER_STEPS = ['-', '\\', '|', '/']
THROBBER_MOVES = [term_support.MOVE_BACK + THROBBER_STEPS[0],
term_support.MOVE_BACK + THROBBER_STEPS[1],
term_support.MOVE_BACK + THROBBER_STEPS[2],
term_support.MOVE_BACK + THROBBER_STEPS[3]]
def __init__(self, prefix='', level=logging.DEBUG,
logger=logging.getLogger()):
"""
Constructor. Sets prefixes and which logger is going to be used.
:param prefix - The prefix for each line logged by this object.
"""
self._prefix = prefix
self._level = level
self._buffer = []
self._logger = logger
def write(self, data):
""""
Writes data only if it constitutes a whole line. If it's not the case,
store it in a buffer and wait until we have a complete line.
:param data - Raw data (a string) that will be processed.
"""
# splitlines() discards a trailing blank line, so use split() instead
data_lines = data.split('\n')
if len(data_lines) > 1:
self._buffer.append(data_lines[0])
self._flush_buffer()
for line in data_lines[1:-1]:
self._log_line(line)
if data_lines[-1]:
self._buffer.append(data_lines[-1])
def __init__(self, logger_name='avocado.app'):
self.console_log = logging.getLogger('avocado.app')
self.throbber_pos = 0
def writelines(self, lines):
""""
Writes itertable of lines
def throbber_progress(self, progress_from_test=False):
:param lines: An iterable of strings that will be processed.
"""
Give an interactive indicator of the test progress
for data in lines:
self.write(data)
:param progress_from_test: if indication of progress came explicitly
from the test. If false, it means the test
process is running, but not communicating
test specific progress.
:type progress_from_test: bool
:rtype: None
def _log_line(self, line):
"""
if progress_from_test:
self.log_healthy(self.THROBBER_MOVES[self.throbber_pos], True)
Passes lines of output to the logging module.
"""
self._logger.log(self._level, self._prefix + line)
def _flush_buffer(self):
if self._buffer:
self._log_line(''.join(self._buffer))
self._buffer = []
def flush(self):
self._flush_buffer()
def isatty(self):
return False
class Throbber(object):
"""
Produces a spinner used to notify progress in the application UI.
"""
STEPS = ['-', '\\', '|', '/']
MOVES = [term_support.MOVE_BACK + STEPS[0],
term_support.MOVE_BACK + STEPS[1],
term_support.MOVE_BACK + STEPS[2],
term_support.MOVE_BACK + STEPS[3]]
def __init__(self):
self.position = 0
def _update_position(self):
if self.position == (len(self.MOVES) - 1):
self.position = 0
else:
self.log_partial(self.THROBBER_MOVES[self.throbber_pos], True)
self.position += 1
def render(self):
result = self.MOVES[self.position]
self._update_position()
return result
class View(object):
"""
Takes care of both disk logs and stdout/err logs.
"""
def __init__(self, app_args=None, console_logger='avocado.app', use_paginator=False):
"""
Set up the console logger and the paginator mode.
if self.throbber_pos == (len(self.THROBBER_MOVES) - 1):
self.throbber_pos = 0
:param console_logger: logging.Logger identifier for the main app logger.
:type console_logger: str
:param use_paginator: Whether to use paginator mode. Set it to True if
the program is supposed to output a large list of
lines to the user and you want the user to be able
to scroll through them at will (think git log).
"""
self.app_args = app_args
self.use_paginator = use_paginator
self.console_log = logging.getLogger(console_logger)
if self.use_paginator:
self.paginator = get_paginator()
else:
self.throbber_pos += 1
self.paginator = None
self.throbber = Throbber()
self.tests_info = {}
def notify(self, event='message', msg=None):
mapping = {'message': self._log_ui_header,
'minor': self._log_ui_minor,
'error': self._log_ui_error,
'warning': self._log_ui_warning,
'partial': self._log_ui_partial}
if msg is not None:
mapping[event](msg)
def notify_progress(self, progress):
self._log_ui_throbber_progress(progress)
def add_test(self, state):
self._log(msg=self._get_test_tag(state['tagged_name']),
skip_newline=True)
def set_test_status(self, status, state):
mapping = {'PASS': self._log_ui_status_pass,
'ERROR': self._log_ui_status_error,
'NOT_FOUND': self._log_ui_status_not_found,
'FAIL': self._log_ui_status_fail,
'SKIP': self._log_ui_status_skip,
'WARN': self._log_ui_status_warn}
mapping[status](state['time_elapsed'])
def set_tests_info(self, info):
self.tests_info.update(info)
def _get_test_tag(self, test_name):
return ('(%s/%s) %s: ' %
(self.tests_info['tests_run'],
self.tests_info['tests_total'], test_name))
def _log(self, msg, level=logging.INFO, skip_newline=False):
"""
Write a message to the avocado.app logger.
Write a message to the avocado.app logger or the paginator.
:param msg: Message to write
:type msg: string
"""
extra = {'skip_newline': skip_newline}
self.console_log.log(level=level, msg=msg, extra=extra)
silent = False
show_job_log = False
if self.app_args is not None:
if hasattr(self.app_args, 'silent'):
silent = self.app_args.silent
if hasattr(self.app_args, 'show_job_log'):
show_job_log = self.app_args.show_job_log
if not (silent or show_job_log):
if self.use_paginator:
if not skip_newline:
msg += '\n'
self.paginator.write(msg)
else:
extra = {'skip_newline': skip_newline}
self.console_log.log(level=level, msg=msg, extra=extra)
def _log_ui_info(self, msg, skip_newline=False):
"""
Log a :mod:`logging.INFO` message to the UI.
def start_file_logging(self, logfile, loglevel, unique_id):
:param msg: Message to write.
"""
Start the main file logging.
self._log(msg, level=logging.INFO, skip_newline=skip_newline)
:param logfile: Path to file that will receive logging.
:param loglevel: Level of the logger. Example: :mod:`logging.DEBUG`.
:param unique_id: job.Job() unique id attribute.
def _log_ui_error_base(self, msg, skip_newline=False):
"""
self.job_unique_id = unique_id
self.debuglog = logfile
self.file_handler = logging.FileHandler(filename=logfile)
self.file_handler.setLevel(loglevel)
fmt = '%(asctime)s %(module)-10.10s L%(lineno)-.4d %(levelname)-5.5s| %(message)s'
formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')
self.file_handler.setFormatter(formatter)
test_logger = logging.getLogger('avocado.test')
utils_logger = logging.getLogger('avocado.utils')
linux_logger = logging.getLogger('avocado.linux')
test_logger.addHandler(self.file_handler)
utils_logger.addHandler(self.file_handler)
linux_logger.addHandler(self.file_handler)
Log a :mod:`logging.ERROR` message to the UI.
def stop_file_logging(self):
"""
Simple helper for removing a handler from the current logger.
:param msg: Message to write.
"""
test_logger = logging.getLogger('avocado.test')
utils_logger = logging.getLogger('avocado.utils')
linux_logger = logging.getLogger('avocado.linux')
test_logger.removeHandler(self.file_handler)
utils_logger.removeHandler(self.file_handler)
linux_logger.removeHandler(self.file_handler)
self.file_handler.close()
self._log(msg, level=logging.ERROR, skip_newline=skip_newline)
def info(self, msg, skip_newline=False):
def _log_ui_healthy(self, msg, skip_newline=False):
"""
Log a :mod:`logging.INFO` message.
Log a message that indicates that things are going as expected.
:param msg: Message to write.
"""
self._log(msg, level=logging.INFO, skip_newline=skip_newline)
self._log_ui_info(term_support.healthy_str(msg), skip_newline)
def error(self, msg):
def _log_ui_partial(self, msg, skip_newline=False):
"""
Log a :mod:`logging.INFO` message.
Log a message that indicates something (at least) partially OK
:param msg: Message to write.
"""
self._log(msg, level=logging.ERROR)
self._log_ui_info(term_support.partial_str(msg), skip_newline)
def log_healthy(self, msg, skip_newline=False):
def _log_ui_header(self, msg):
"""
Log a message that indicates something healthy is going on
Log a header message.
:param msg: Message to write.
"""
self.info(term_support.healthy_str(msg), skip_newline)
self._log_ui_info(term_support.header_str(msg))
def log_partial(self, msg, skip_newline=False):
def _log_ui_minor(self, msg):
"""
Log a message that indicates something (at least) partially OK
Log a minor message.
:param msg: Message to write.
"""
self.info(term_support.partial_str(msg), skip_newline)
self._log_ui_info(msg)
def log_header(self, msg):
def _log_ui_error(self, msg):
"""
Log a header message.
Log an error message (useful for critical errors).
:param msg: Message to write.
"""
self.info(term_support.header_str(msg))
self._log_ui_error_base(term_support.fail_header_str(msg))
def log_fail_header(self, msg):
def _log_ui_warning(self, msg):
"""
Log a fail header message (red, for critical errors).
Log a warning message (useful for warning messages).
:param msg: Message to write.
"""
self.info(term_support.fail_header_str(msg))
self._log_ui_info(term_support.warn_header_str(msg))
def log_pass(self, t_elapsed):
def _log_ui_status_pass(self, t_elapsed):
"""
Log a PASS message.
Log a PASS status message for a given operation.
:param t_elapsed: Time it took for the operation to complete.
"""
normal_pass_msg = term_support.pass_str() + " (%.2f s)" % t_elapsed
self.info(normal_pass_msg)
self._log_ui_info(normal_pass_msg)
def log_error(self, t_elapsed):
def _log_ui_status_error(self, t_elapsed):
"""
Log an ERROR message.
Log an ERROR status message for a given operation.
:param t_elapsed: Time it took for the operation to complete.
"""
normal_error_msg = term_support.error_str() + " (%.2f s)" % t_elapsed
self.error(normal_error_msg)
self._log_ui_error_base(normal_error_msg)
def log_not_found(self, t_elapsed):
def _log_ui_status_not_found(self, t_elapsed):
"""
Log a NOT_FOUND message.
Log a NOT_FOUND status message for a given operation.
:param t_elapsed: Time it took for the operation to complete.
"""
normal_error_msg = term_support.not_found_str() + " (%.2f s)" % t_elapsed
self.error(normal_error_msg)
self._log_ui_error_base(normal_error_msg)
def log_fail(self, t_elapsed):
def _log_ui_status_fail(self, t_elapsed):
"""
Log a FAIL message.
Log a FAIL status message for a given operation.
:param t_elapsed: Time it took for the operation to complete.
"""
normal_fail_msg = term_support.fail_str() + " (%.2f s)" % t_elapsed
self.error(normal_fail_msg)
self._log_ui_error_base(normal_fail_msg)
def log_skip(self, t_elapsed):
def _log_ui_status_skip(self, t_elapsed):
"""
Log a SKIP message.
Log a SKIP status message for a given operation.
:param t_elapsed: Time it took for the operation to complete.
"""
normal_skip_msg = term_support.skip_str()
self.info(normal_skip_msg)
self._log_ui_info(normal_skip_msg)
def log_warn(self, t_elapsed):
def _log_ui_status_warn(self, t_elapsed):
"""
Log a WARN message.
Log a WARN status message for a given operation.
:param t_elapsed: Time it took for the operation to complete.
"""
normal_warn_msg = term_support.warn_str() + " (%.2f s)" % t_elapsed
self.error(normal_warn_msg)
self._log_ui_error_base(normal_warn_msg)
def _log_ui_throbber_progress(self, progress_from_test=False):
"""
Give an interactive indicator of the test progress
:param progress_from_test: if indication of progress came explicitly
from the test. If false, it means the test
process is running, but not communicating
test specific progress.
:type progress_from_test: bool
:rtype: None
"""
if progress_from_test:
self._log_ui_healthy(self.throbber.render(), True)
else:
self._log_ui_partial(self.throbber.render(), True)
def start_file_logging(self, logfile, loglevel, unique_id):
"""
Start the main file logging.
:param logfile: Path to file that will receive logging.
:param loglevel: Level of the logger. Example: :mod:`logging.DEBUG`.
:param unique_id: job.Job() unique id attribute.
"""
self.job_unique_id = unique_id
self.debuglog = logfile
self.file_handler = logging.FileHandler(filename=logfile)
self.file_handler.setLevel(loglevel)
fmt = '%(asctime)s %(module)-10.10s L%(lineno)-.4d %(levelname)-5.5s| %(message)s'
formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')
self.file_handler.setFormatter(formatter)
test_logger = logging.getLogger('avocado.test')
linux_logger = logging.getLogger('avocado.linux')
test_logger.addHandler(self.file_handler)
linux_logger.addHandler(self.file_handler)
def stop_file_logging(self):
"""
Simple helper for removing a handler from the current logger.
"""
test_logger = logging.getLogger('avocado.test')
linux_logger = logging.getLogger('avocado.linux')
test_logger.removeHandler(self.file_handler)
linux_logger.removeHandler(self.file_handler)
self.file_handler.close()
......@@ -128,6 +128,8 @@ class TestRunner(object):
:param queue: Multiprocess queue.
:type queue: :class`multiprocessing.Queue` instance.
"""
sys.stdout = output.LoggingFile(logger=logging.getLogger('avocado.test.stdout'))
sys.sterr = output.LoggingFile(logger=logging.getLogger('avocado.test.stderr'))
instance = self.load_test(params, queue)
runtime.CURRENT_TEST = instance
early_state = instance.get_state()
......@@ -214,16 +216,16 @@ class TestRunner(object):
if not test_state['running']:
break
else:
self.job.result_proxy.throbber_progress(True)
self.job.result_proxy.notify_progress(True)
if test_state['paused']:
msg = test_state['paused_msg']
if msg:
self.job.output_manager.log_partial(msg)
self.job.view.notify(event='partial', msg=msg)
except Queue.Empty:
if p.is_alive():
if ctrl_c_count == 0:
self.job.result_proxy.throbber_progress()
self.job.result_proxy.notify_progress()
else:
break
......@@ -239,17 +241,16 @@ class TestRunner(object):
ignore_window)
k_msg_3 = ("A new Ctrl+C sent after that will send a "
"SIGKILL to them")
self.job.output_manager.log_header("\n")
self.job.output_manager.log_header(k_msg_1)
self.job.output_manager.log_header(k_msg_2)
self.job.output_manager.log_header(k_msg_3)
self.job.view.notify(event='message', msg=k_msg_1)
self.job.view.notify(event='message', msg=k_msg_2)
self.job.view.notify(event='message', msg=k_msg_3)
stage_1_msg_displayed = True
ignore_time_started = time.time()
if (ctrl_c_count > 2) and (time_elapsed > ignore_window):
if not stage_2_msg_displayed:
k_msg_3 = ("Ctrl+C received after the ignore window. "
"Killing all active tests")
self.job.output_manager.log_header(k_msg_3)
self.job.view.notify(event='message', msg=k_msg_3)
stage_2_msg_displayed = True
os.kill(p.pid, signal.SIGKILL)
......@@ -267,7 +268,7 @@ class TestRunner(object):
# don't process other tests from the list
if ctrl_c_count > 0:
self.job.output_manager.log_header("")
self.job.view.notify(event='minor', msg='')
break
self.result.check_test(test_state)
......@@ -308,16 +309,23 @@ class Job(object):
if self.args is not None:
self.loglevel = args.log_level or logging.DEBUG
self.multiplex_file = args.multiplex_file
self.show_job_log = args.show_job_log
self.silent = args.silent
else:
self.loglevel = logging.DEBUG
self.multiplex_file = None
self.show_job_log = False
self.silent = False
if self.show_job_log:
if not self.silent:
output.add_console_handler(logging.getLogger('avocado.test'))
self.test_dir = data_dir.get_test_dir()
self.test_index = 1
self.status = "RUNNING"
self.result_proxy = result.TestResultProxy()
self.sysinfo_dir = path.init_dir(self.logdir, 'sysinfo')
self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfo_dir)
self.output_manager = output.OutputManager()
self.view = output.View(app_args=self.args)
def _make_test_runner(self):
if hasattr(self.args, 'test_runner'):
......@@ -329,25 +337,12 @@ class Job(object):
test_result=self.result_proxy)
def _set_output_plugins(self):
plugin_using_stdout = None
e_msg = ("Avocado could not set %s and %s both to output to stdout. ")
e_msg_2 = ("Please set the output flag of one of them to a file "
"to avoid conflicts.")
for key in self.args.__dict__:
if key.endswith('_result'):
result_class = getattr(self.args, key)
if issubclass(result_class, result.TestResult):
result_plugin = result_class(self.output_manager,
result_plugin = result_class(self.view,
self.args)
if result_plugin.output == '-':
if plugin_using_stdout is not None:
e_msg %= (plugin_using_stdout.command_line_arg_name,
result_plugin.command_line_arg_name)
self.output_manager.log_fail_header(e_msg)
self.output_manager.log_fail_header(e_msg_2)
sys.exit(error_codes.numeric_status['AVOCADO_JOB_FAIL'])
else:
plugin_using_stdout = result_plugin
self.result_proxy.add_output_plugin(result_plugin)
def _make_test_result(self):
......@@ -370,24 +365,28 @@ class Job(object):
xunit_file = os.path.join(self.logdir, 'results.xml')
args = argparse.Namespace()
args.xunit_output = xunit_file
xunit_plugin = xunit.xUnitTestResult(self.output_manager, args)
xunit_plugin = xunit.xUnitTestResult(self.view, args)
self.result_proxy.add_output_plugin(xunit_plugin)
# Setup the json plugin to output to the debug directory
json_file = os.path.join(self.logdir, 'results.json')
args = argparse.Namespace()
args.json_output = json_file
json_plugin = jsonresult.JSONTestResult(self.output_manager, args)
json_plugin = jsonresult.JSONTestResult(self.view, args)
self.result_proxy.add_output_plugin(json_plugin)
# If there are no active output plugins besides xunit and json,
# and the option --silent is not present, then set up the human output.
if hasattr(self.args, 'silent') and self.args.silent:
human = False
else:
human = True
if len(self.result_proxy.output_plugins) == 2 and human:
human_plugin = result.HumanTestResult(self.output_manager, self.args)
op_set_stdout = self.result_proxy.output_plugins_using_stdout()
if len(op_set_stdout) > 1:
msg = ('Options %s are trying to use stdout simultaneously' %
" ".join(op_set_stdout))
self.view.notify(event='error', msg=msg)
msg = ('Please set at least one of them to a file to avoid '
'conflicts')
self.view.notify(event='error', msg=msg)
sys.exit(error_codes.numeric_status['AVOCADO_JOB_FAIL'])
if not op_set_stdout:
human_plugin = result.HumanTestResult(self.view, self.args)
self.result_proxy.add_output_plugin(human_plugin)
def _run(self, urls=None, multiplex_file=None):
......@@ -454,12 +453,12 @@ class Job(object):
self._make_test_result()
self._make_test_runner()
self.output_manager.start_file_logging(self.logfile,
self.loglevel,
self.unique_id)
self.output_manager.logfile = self.logfile
self.view.start_file_logging(self.logfile,
self.loglevel,
self.unique_id)
self.view.logfile = self.logfile
failures = self.test_runner.run(params_list)
self.output_manager.stop_file_logging()
self.view.stop_file_logging()
self.sysinfo_logger.end_job_hook()
# If it's all good so far, set job status to 'PASS'
if self.status == 'RUNNING':
......@@ -505,11 +504,11 @@ class Job(object):
except exceptions.JobBaseException, details:
self.status = details.status
fail_class = details.__class__.__name__
self.output_manager.log_fail_header('Avocado job failed: %s: %s' %
(fail_class, details))
self.view.notify(event='error', msg=('Avocado job failed: %s: %s' %
(fail_class, details)))
return error_codes.numeric_status['AVOCADO_JOB_FAIL']
except exceptions.OptionValidationError, details:
self.output_manager.log_fail_header(str(details))
self.view.notify(event='error', msg=str(details))
return error_codes.numeric_status['AVOCADO_JOB_FAIL']
except Exception, details:
......@@ -518,15 +517,15 @@ class Job(object):
tb_info = traceback.format_exception(exc_type, exc_value,
exc_traceback.tb_next)
fail_class = details.__class__.__name__
self.output_manager.log_fail_header('Avocado crashed: %s: %s' %
(fail_class, details))
self.view.notify(event='error', msg=('Avocado crashed: %s: %s' %
(fail_class, details)))
for line in tb_info:
self.output_manager.error(line)
self.output_manager.log_fail_header('Please include the traceback '
'info and command line used on '
'your bug report')
self.output_manager.log_fail_header('Report bugs visiting %s' %
_NEW_ISSUE_LINK)
self.view.notify(event='minor', msg=line)
self.view.notify(event='error', msg=('Please include the traceback '
'info and command line used on '
'your bug report'))
self.view.notify(event='error', msg=('Report bugs visiting %s' %
_NEW_ISSUE_LINK))
return error_codes.numeric_status['AVOCADO_CRASH']
......
......@@ -33,11 +33,10 @@ class DataDirList(plugin.Plugin):
super(DataDirList, self).configure(self.parser)
def run(self, args):
bcolors = output.term_support
pipe = output.get_paginator()
pipe.write(bcolors.header_str('Avocado Data Directories:'))
pipe.write('\n base dir: ' + data_dir.get_base_dir())
pipe.write('\n tests dir: ' + data_dir.get_test_dir())
pipe.write('\n data dir: ' + data_dir.get_data_dir())
pipe.write('\n logs dir: ' + data_dir.get_logs_dir())
pipe.write('\n tmp dir: ' + data_dir.get_tmp_dir())
view = output.View()
view.notify(event="message", msg='Avocado Data Directories:')
view.notify(event="message", msg=' base dir ' + data_dir.get_base_dir())
view.notify(event="message", msg=' tests dir ' + data_dir.get_test_dir())
view.notify(event="message", msg=' data dir ' + data_dir.get_data_dir())
view.notify(event="message", msg=' logs dir ' + data_dir.get_logs_dir())
view.notify(event="message", msg=' tmp dir ' + data_dir.get_tmp_dir())
......@@ -48,7 +48,7 @@ class TestResultJournal(TestResult):
"""
Creates an instance of TestResultJournal.
:param stream: an instance of :class:`avocado.core.output.OutputManager`.
:param stream: an instance of :class:`avocado.core.output.View`.
:param args: an instance of :class:`argparse.Namespace`.
"""
TestResult.__init__(self, stream, args)
......
......@@ -18,6 +18,7 @@ JSON output module.
import json
from avocado.core import output
from avocado.plugins import plugin
from avocado.result import TestResult
......@@ -33,6 +34,7 @@ class JSONTestResult(TestResult):
def __init__(self, stream=None, args=None):
TestResult.__init__(self, stream, args)
self.output = getattr(self.args, 'json_output', '-')
self.view = output.View(app_args=args)
def start_tests(self):
"""
......@@ -80,7 +82,7 @@ class JSONTestResult(TestResult):
})
self.json = json.dumps(self.json)
if self.args.json_output == '-':
print self.json
self.view.notify(event='minor', msg=self.json)
else:
self._save_json()
......
......@@ -33,23 +33,19 @@ class PluginsList(plugin.Plugin):
super(PluginsList, self).configure(self.parser)
def run(self, args):
bcolors = output.term_support
pipe = output.get_paginator()
view = output.View(app_args=args, use_paginator=True)
pm = get_plugin_manager()
pipe.write(bcolors.header_str('Plugins loaded:'))
pipe.write('\n')
view.notify(event='message', msg='Plugins loaded:')
blength = 0
for plug in pm.plugins:
clength = len(plug.name)
if clength > blength:
blength = clength
format_str = " %-" + str(blength) + "s - %s (%s)\n"
format_str = " %-" + str(blength) + "s - %s %s"
for plug in sorted(pm.plugins):
if plug.enabled:
status = bcolors.healthy_str("Enabled")
status = "(Enabled)"
else:
status = bcolors.fail_header_str("Disabled")
pipe.write(format_str % (bcolors.header_str(plug.name),
plug.description,
status))
status = "(Disabled)"
view.notify(event='minor', msg=format_str % (plug.name, plug.description, status))
......@@ -53,41 +53,38 @@ class Multiplexer(plugin.Plugin):
super(Multiplexer, self).configure(self.parser)
def run(self, args):
bcolors = output.term_support
pipe = output.get_paginator()
view = output.View(app_args=args)
if not args.multiplex_file:
pipe.write(bcolors.fail_header_str('A multiplex file is required, aborting...'))
view.notify(event='error', msg='A multiplex file is required, aborting...')
sys.exit(error_codes.numeric_status['AVOCADO_JOB_FAIL'])
multiplex_file = os.path.abspath(args.multiplex_file)
if not os.path.isfile(multiplex_file):
pipe.write(bcolors.fail_header_str('Invalid multiplex file %s' % multiplex_file))
view.notify(event='error', msg='Invalid multiplex file %s' % multiplex_file)
sys.exit(error_codes.numeric_status['AVOCADO_JOB_FAIL'])
if args.tree:
pipe.write(bcolors.header_str('Config file tree structure:'))
pipe.write('\n')
view.notify(event='message', msg='Config file tree structure:')
data = tree.read_ordered_yaml(open(multiplex_file))
t = tree.create_from_ordered_data(data)
pipe.write(t.get_ascii())
view.notify(event='minor', msg=t.get_ascii())
sys.exit(error_codes.numeric_status['AVOCADO_ALL_OK'])
variants = multiplexer.create_variants_from_yaml(open(multiplex_file),
args.filter_only,
args.filter_out)
pipe.write(bcolors.header_str('Variants generated:'))
pipe.write('\n')
view.notify(event='message', msg='Variants generated:')
for (index, tpl) in enumerate(variants):
paths = ', '.join([x.path for x in tpl])
pipe.write('Variant %s: %s\n' % (index+1, paths))
view.notify(event='message', msg='Variant %s: %s' % (index+1, paths))
if args.contents:
env = collections.OrderedDict()
for node in tpl:
env.update(node.environment)
for k in sorted(env.keys()):
pipe.write(' %s: %s\n' % (k, env[k]))
view.notify(event='message', msg=' %s: %s' % (k, env[k]))
sys.exit(error_codes.numeric_status['AVOCADO_ALL_OK'])
......@@ -53,8 +53,7 @@ class TestLister(plugin.Plugin):
:param args: Command line args received from the list subparser.
"""
bcolors = output.term_support
pipe = output.get_paginator()
view = output.View(app_args=args, use_paginator=True)
base_test_dir = data_dir.get_test_dir()
test_files = os.listdir(base_test_dir)
test_dirs = []
......@@ -66,15 +65,14 @@ class TestLister(plugin.Plugin):
if clength > blength:
blength = clength
test_dirs.append((t.split('.')[0], os.path.join(base_test_dir, t)))
format_string = " %-" + str(blength) + "s %s\n"
pipe.write(bcolors.header_str('Tests dir: %s\n' % base_test_dir))
format_string = " %-" + str(blength) + "s %s"
view.notify(event='message', msg='Tests dir: %s' % base_test_dir)
if len(test_dirs) > 0:
pipe.write(bcolors.header_str(format_string % ('Alias', 'Path')))
view.notify(event='message', msg=format_string % ('Alias', 'Path'))
for test_dir in test_dirs:
pipe.write(format_string % test_dir)
view.notify(event='minor', msg=format_string % test_dir)
else:
pipe.write(bcolors.header_str('No tests were found on current '
'tests dir'))
view.notify(event='error', msg='No tests were found on current tests dir')
class TestRunner(plugin.Plugin):
......@@ -113,8 +111,35 @@ class TestRunner(plugin.Plugin):
'server. You should not use this option '
'unless you know exactly what you\'re doing'))
self.parser.add_argument('-s', '--silent', action='store_true', default=False,
help='Silent output, do not display results.')
out = self.parser.add_argument_group('output related arguments')
out.add_argument('-s', '--silent', action='store_true', default=False,
help='Silence stdout')
out.add_argument('--show-job-log', action='store_true', default=False,
help=('Display only the job log on stdout. Useful '
'for test debugging purposes. No output will '
'be displayed if you also specify --silent'))
out_check = self.parser.add_argument_group('output check arguments')
out_check.add_argument('--output-check-record', type=str,
default='none',
help=('Record output streams of your tests '
'to reference files (valid options: '
'none (do not record output streams), '
'all (record both stdout and stderr), '
'stdout (record only stderr), '
'stderr (record only stderr). '
'Default: none'))
out_check.add_argument('--disable-output-check', action='store_true',
default=False,
help=('Disable test output (stdout/stderr) check. '
'If this option is selected, no output will '
'be checked, even if there are reference files '
'present for the test. '
'Default: False (output check enabled)'))
mux = self.parser.add_argument_group('multiplex arguments')
mux.add_argument('-m', '--multiplex-file', type=str, default=None,
......
......@@ -128,7 +128,7 @@ class VMTestResult(TestResult):
"""
Creates an instance of VMTestResult.
:param stream: an instance of :class:`avocado.core.output.OutputManager`.
:param stream: an instance of :class:`avocado.core.output.View`.
:param args: an instance of :class:`argparse.Namespace`.
"""
TestResult.__init__(self, stream, args)
......@@ -152,42 +152,39 @@ class VMTestResult(TestResult):
if self.args.vm_domain is None:
e_msg = ('Please set Virtual Machine Domain with option '
'--vm-domain.')
self.stream.error(e_msg)
self.stream.notify(event='error', msg=e_msg)
raise exceptions.TestSetupFail(e_msg)
if self.args.vm_hostname is None:
e_msg = ('Please set Virtual Machine hostname with option '
'--vm-hostname.')
self.stream.error(e_msg)
self.stream.notify(event='error', msg=e_msg)
raise exceptions.TestSetupFail(e_msg)
self.stream.log_header("VM DOMAIN : %s" %
self.args.vm_domain)
self.stream.log_header("VM LOGIN : %s@%s" %
(self.args.vm_username, self.args.vm_hostname))
self.stream.notify(event='message', msg="VM DOMAIN : %s" % self.args.vm_domain)
self.stream.notify(event='message', msg="VM LOGIN : %s@%s" % (self.args.vm_username, self.args.vm_hostname))
self.vm = virt.vm_connect(self.args.vm_domain,
self.args.vm_hypervisor_uri)
if self.vm is None:
self.stream.error("Could not connect to VM '%s'" % self.args.vm_domain)
self.stream.notify(event='error', msg="Could not connect to VM '%s'" % self.args.vm_domain)
raise exceptions.TestSetupFail()
if self.vm.start() is False:
self.stream.error("Could not start VM '%s'" % self.args.vm_domain)
self.stream.notify(event='error', msg="Could not start VM '%s'" % self.args.vm_domain)
raise exceptions.TestSetupFail()
assert self.vm.domain.isActive() is not False
if self.args.vm_cleanup is True:
self.vm.create_snapshot()
if self.vm.snapshot is None:
self.stream.error("Could not create snapshot on VM '%s'" % self.args.vm_domain)
self.stream.notify(event='error', msg="Could not create snapshot on VM '%s'" % self.args.vm_domain)
raise exceptions.TestSetupFail()
try:
self.vm.setup_login(self.args.vm_hostname,
self.args.vm_username,
self.args.vm_password)
except Exception as err:
self.stream.error("Could not login on VM '%s': %s" % (self.args.vm_hostname,
err))
self.stream.notify(event='error', msg="Could not login on VM '%s': %s" % (self.args.vm_hostname, err))
self.tear_down()
raise exceptions.TestSetupFail()
if self.vm.logged is False or self.vm.remote.uptime() is '':
self.stream.error("Could not login on VM '%s'" % self.args.vm_hostname)
self.stream.notify(event='error', msg="Could not login on VM '%s'" % self.args.vm_hostname)
self.tear_down()
raise exceptions.TestSetupFail()
self._copy_tests()
......@@ -201,21 +198,22 @@ class VMTestResult(TestResult):
Called once before any tests are executed.
"""
TestResult.start_tests(self)
self.stream.log_header("JOB ID : %s" % self.stream.job_unique_id)
self.stream.log_header("JOB LOG : %s" % self.stream.logfile)
self.stream.log_header("TESTS : %s" % self.tests_total)
self.stream.notify(event='message', msg="JOB ID : %s" % self.stream.job_unique_id)
self.stream.notify(event='message', msg="JOB LOG : %s" % self.stream.logfile)
self.stream.notify(event='message', msg="TESTS : %s" % self.tests_total)
self.stream.set_tests_info({'tests_total': self.tests_total})
def end_tests(self):
"""
Called once after all tests are executed.
"""
self.stream.log_header("PASS : %d" % len(self.passed))
self.stream.log_header("ERROR : %d" % len(self.errors))
self.stream.log_header("NOT FOUND : %d" % len(self.not_found))
self.stream.log_header("FAIL : %d" % len(self.failed))
self.stream.log_header("SKIP : %d" % len(self.skipped))
self.stream.log_header("WARN : %d" % len(self.warned))
self.stream.log_header("TIME : %.2f s" % self.total_time)
self.stream.notify(event='message', msg="PASS : %d" % len(self.passed))
self.stream.notify(event='message', msg="ERROR : %d" % len(self.errors))
self.stream.notify(event='message', msg="NOT FOUND : %d" % len(self.not_found))
self.stream.notify(event='message', msg="FAIL : %d" % len(self.failed))
self.stream.notify(event='message', msg="SKIP : %d" % len(self.skipped))
self.stream.notify(event='message', msg="WARN : %d" % len(self.warned))
self.stream.notify(event='message', msg="TIME : %.2f s" % self.total_time)
def start_test(self, test):
"""
......@@ -223,10 +221,7 @@ class VMTestResult(TestResult):
:param test: :class:`avocado.test.Test` instance.
"""
self.test_label = '(%s/%s) %s: ' % (self.tests_run,
self.tests_total,
test['tagged_name'])
self.stream.info(msg=self.test_label, skip_newline=True)
self.stream.add_test(test)
def end_test(self, test):
"""
......@@ -243,7 +238,7 @@ class VMTestResult(TestResult):
:param test: :class:`avocado.test.Test` instance.
"""
TestResult.add_pass(self, test)
self.stream.log_pass(test['time_elapsed'])
self.stream.set_test_status(status='PASS', state=test)
def add_error(self, test):
"""
......@@ -252,7 +247,7 @@ class VMTestResult(TestResult):
:param test: :class:`avocado.test.Test` instance.
"""
TestResult.add_error(self, test)
self.stream.log_error(test['time_elapsed'])
self.stream.set_test_status(status='ERROR', state=test)
def add_not_found(self, test):
"""
......@@ -261,7 +256,7 @@ class VMTestResult(TestResult):
:param test: :class:`avocado.test.Test` instance.
"""
TestResult.add_not_found(self, test)
self.stream.log_not_found(test['time_elapsed'])
self.stream.set_test_status(status='NOT_FOUND', state=test)
def add_fail(self, test):
"""
......@@ -270,7 +265,7 @@ class VMTestResult(TestResult):
:param test: :class:`avocado.test.Test` instance.
"""
TestResult.add_fail(self, test)
self.stream.log_fail(test['time_elapsed'])
self.stream.set_test_status(status='FAIL', state=test)
def add_skip(self, test):
"""
......@@ -279,7 +274,7 @@ class VMTestResult(TestResult):
:param test: :class:`avocado.test.Test` instance.
"""
TestResult.add_skip(self, test)
self.stream.log_skip(test['time_elapsed'])
self.stream.set_test_status(status='SKIP', state=test)
def add_warn(self, test):
"""
......@@ -288,7 +283,7 @@ class VMTestResult(TestResult):
:param test: :class:`avocado.test.Test` instance.
"""
TestResult.add_warn(self, test)
self.stream.log_warn(test['time_elapsed'])
self.stream.set_test_status(status='WARN', state=test)
class RunVM(plugin.Plugin):
......
......@@ -18,6 +18,7 @@ import sys
import datetime
from xml.sax.saxutils import quoteattr
from avocado.core import output
from avocado.plugins import plugin
from avocado.result import TestResult
......@@ -31,9 +32,8 @@ class XmlResult(object):
Handles the XML details for xUnit output.
"""
def __init__(self, output):
def __init__(self):
self.xml = ['<?xml version="1.0" encoding="UTF-8"?>']
self.output = output
def _escape_attr(self, attrib):
return quoteattr(attrib)
......@@ -41,20 +41,8 @@ class XmlResult(object):
def _escape_cdata(self, cdata):
return cdata.replace(']]>', ']]>]]&gt;<![CDATA[')
def save(self, filename=None):
"""
Save the XML document to a file or standard output.
:param filename: File name to save. Use '-' for standard output.
"""
if filename is None:
filename = self.output
xml = '\n'.join(self.xml) + '\n'
if filename == '-':
sys.stdout.write(xml)
else:
with open(filename, 'w') as fresult:
fresult.write(xml)
def get_contents(self):
return '\n'.join(self.xml)
def start_testsuite(self, timestamp):
"""
......@@ -168,12 +156,13 @@ class xUnitTestResult(TestResult):
"""
Creates an instance of xUnitTestResult.
:param stream: an instance of :class:`avocado.core.output.OutputManager`.
:param stream: an instance of :class:`avocado.core.output.View`.
:param args: an instance of :class:`argparse.Namespace`.
"""
TestResult.__init__(self, stream, args)
self.output = getattr(self.args, 'xunit_output', '-')
self.xml = XmlResult(self.output)
self.stream = output.View(app_args=args)
self.xml = XmlResult()
def start_tests(self):
"""
......@@ -219,7 +208,12 @@ class xUnitTestResult(TestResult):
'not_found': len(self.not_found),
'total_time': self.total_time}
self.xml.end_testsuite(**values)
self.xml.save()
contents = self.xml.get_contents()
if self.output == '-':
self.stream.notify(event='minor', msg=contents)
else:
with open(self.output, 'w') as xunit_output:
xunit_output.write(contents)
class XUnit(plugin.Plugin):
......
......@@ -37,10 +37,10 @@ class TestResultProxy(object):
else:
return None
def throbber_progress(self, progress_from_test=False):
def notify_progress(self, progress_from_test=False):
for output_plugin in self.output_plugins:
if hasattr(output_plugin, 'throbber_progress'):
output_plugin.throbber_progress(progress_from_test)
if hasattr(output_plugin, 'notify_progress'):
output_plugin.notify_progress(progress_from_test)
def add_output_plugin(self, plugin):
if not isinstance(plugin, TestResult):
......@@ -48,6 +48,13 @@ class TestResultProxy(object):
"TestResult" % plugin)
self.output_plugins.append(plugin)
def output_plugins_using_stdout(self):
using_stdout = []
for op in self.output_plugins:
if op.output == '-':
using_stdout.append(op.command_line_arg_name)
return using_stdout
def start_tests(self):
for output_plugin in self.output_plugins:
output_plugin.start_tests()
......@@ -108,7 +115,7 @@ class TestResult(object):
"""
Creates an instance of TestResult.
:param stream: an instance of :class:`avocado.core.output.OutputManager`.
:param stream: an instance of :class:`avocado.core.output.View`.
:param args: an instance of :class:`argparse.Namespace`.
"""
self.stream = stream
......@@ -133,6 +140,7 @@ class TestResult(object):
Called once before any tests are executed.
"""
self.tests_run += 1
self.stream.set_tests_info({'tests_run': self.tests_run})
def end_tests(self):
"""
......@@ -158,6 +166,7 @@ class TestResult(object):
"""
self.tests_run += 1
self.total_time += state['time_elapsed']
self.stream.set_tests_info({'tests_run': self.tests_run})
def add_pass(self, state):
"""
......@@ -242,21 +251,22 @@ class HumanTestResult(TestResult):
Called once before any tests are executed.
"""
TestResult.start_tests(self)
self.stream.log_header("JOB ID : %s" % self.stream.job_unique_id)
self.stream.log_header("JOB LOG : %s" % self.stream.logfile)
self.stream.log_header("TESTS : %s" % self.tests_total)
self.stream.notify(event="message", msg="JOB ID : %s" % self.stream.job_unique_id)
self.stream.notify(event="message", msg="JOB LOG : %s" % self.stream.logfile)
self.stream.notify(event="message", msg="TESTS : %s" % self.tests_total)
self.stream.set_tests_info({'tests_total': self.tests_total})
def end_tests(self):
"""
Called once after all tests are executed.
"""
self.stream.log_header("PASS : %d" % len(self.passed))
self.stream.log_header("ERROR : %d" % len(self.errors))
self.stream.log_header("FAIL : %d" % len(self.failed))
self.stream.log_header("SKIP : %d" % len(self.skipped))
self.stream.log_header("WARN : %d" % len(self.warned))
self.stream.log_header("NOT FOUND : %d" % len(self.not_found))
self.stream.log_header("TIME : %.2f s" % self.total_time)
self.stream.notify(event="message", msg="PASS : %d" % len(self.passed))
self.stream.notify(event="message", msg="ERROR : %d" % len(self.errors))
self.stream.notify(event="message", msg="FAIL : %d" % len(self.failed))
self.stream.notify(event="message", msg="SKIP : %d" % len(self.skipped))
self.stream.notify(event="message", msg="WARN : %d" % len(self.warned))
self.stream.notify(event="message", msg="NOT FOUND : %d" % len(self.not_found))
self.stream.notify(event="message", msg="TIME : %.2f s" % self.total_time)
def start_test(self, state):
"""
......@@ -265,10 +275,7 @@ class HumanTestResult(TestResult):
:param state: result of :class:`avocado.test.Test.get_state`.
:type state: dict
"""
self.test_label = '(%s/%s) %s: ' % (self.tests_run,
self.tests_total,
state['tagged_name'])
self.stream.info(msg=self.test_label, skip_newline=True)
self.stream.add_test(state)
def end_test(self, state):
"""
......@@ -287,7 +294,7 @@ class HumanTestResult(TestResult):
:type state: dict
"""
TestResult.add_pass(self, state)
self.stream.log_pass(state['time_elapsed'])
self.stream.set_test_status(status='PASS', state=state)
def add_error(self, state):
"""
......@@ -297,7 +304,7 @@ class HumanTestResult(TestResult):
:type state: dict
"""
TestResult.add_error(self, state)
self.stream.log_error(state['time_elapsed'])
self.stream.set_test_status(status='ERROR', state=state)
def add_not_found(self, state):
"""
......@@ -307,7 +314,7 @@ class HumanTestResult(TestResult):
:type state: dict
"""
TestResult.add_not_found(self, state)
self.stream.log_not_found(state['time_elapsed'])
self.stream.set_test_status(status='NOT_FOUND', state=state)
def add_fail(self, state):
"""
......@@ -317,7 +324,7 @@ class HumanTestResult(TestResult):
:type state: dict
"""
TestResult.add_fail(self, state)
self.stream.log_fail(state['time_elapsed'])
self.stream.set_test_status(status='FAIL', state=state)
def add_skip(self, state):
"""
......@@ -327,7 +334,7 @@ class HumanTestResult(TestResult):
:type state: dict
"""
TestResult.add_skip(self, state)
self.stream.log_skip(state['time_elapsed'])
self.stream.set_test_status(status='SKIP', state=state)
def add_warn(self, state):
"""
......@@ -337,7 +344,7 @@ class HumanTestResult(TestResult):
:type state: dict
"""
TestResult.add_warn(self, state)
self.stream.log_warn(state['time_elapsed'])
self.stream.set_test_status(status='WARN', state=state)
def throbber_progress(self, progress_from_test=False):
self.stream.throbber_progress(progress_from_test)
def notify_progress(self, progress_from_test=False):
self.stream.notify_progress(progress_from_test)
......@@ -20,6 +20,7 @@ framework tests.
import inspect
import logging
import os
import shutil
import sys
import time
import traceback
......@@ -130,6 +131,12 @@ class Test(unittest.TestCase):
self.basedir = os.path.dirname(inspect.getfile(self.__class__))
self.datadir = os.path.join(self.basedir, '%s.data' % basename)
self.expected_stdout_file = os.path.join(self.datadir,
'stdout.expected')
self.expected_stderr_file = os.path.join(self.datadir,
'stderr.expected')
self.workdir = path.init_dir(tmpdir, basename)
self.srcdir = path.init_dir(self.workdir, 'src')
if base_logdir is None:
......@@ -140,12 +147,19 @@ class Test(unittest.TestCase):
self.logdir = path.init_dir(base_logdir, self.tagged_name)
io.set_log_file_dir(self.logdir)
self.logfile = os.path.join(self.logdir, 'debug.log')
self.stdout_file = os.path.join(self.logdir, 'stdout')
self.stderr_file = os.path.join(self.logdir, 'stderr')
self.outputdir = path.init_dir(self.logdir, 'data')
self.sysinfodir = path.init_dir(self.logdir, 'sysinfo')
self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfodir)
self.log = logging.getLogger("avocado.test")
self.stdout_log = logging.getLogger("avocado.test.stdout")
self.stderr_log = logging.getLogger("avocado.test.stderr")
self.log.info('START %s', self.tagged_name)
self.log.debug('')
self.log.debug('Test instance parameters:')
......@@ -269,6 +283,14 @@ class Test(unittest.TestCase):
"""
return os.path.join(self.datadir, basename)
def _register_log_file_handler(self, logger, formatter, filename,
log_level=logging.DEBUG):
file_handler = logging.FileHandler(filename=filename)
file_handler.setLevel(log_level)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return file_handler
def start_logging(self):
"""
Simple helper for adding a file logger to the root logger.
......@@ -282,6 +304,14 @@ class Test(unittest.TestCase):
self.file_handler.setFormatter(formatter)
self.log.addHandler(self.file_handler)
stream_fmt = '%(message)s'
stream_formatter = logging.Formatter(fmt=stream_fmt)
self.stdout_file_handler = self._register_log_file_handler(self.stdout_log, stream_formatter,
self.stdout_file)
self.stderr_file_handler = self._register_log_file_handler(self.stderr_log, stream_formatter,
self.stderr_file)
def stop_logging(self):
"""
Stop the logging activity of the test by cleaning the logger handlers.
......@@ -351,6 +381,32 @@ class Test(unittest.TestCase):
"""
pass
def record_reference_stdout(self):
if not os.path.isdir(self.datadir):
os.makedirs(self.datadir)
shutil.copyfile(self.stdout_file, self.expected_stdout_file)
def record_reference_stderr(self):
if not os.path.isdir(self.datadir):
os.makedirs(self.datadir)
shutil.copyfile(self.stderr_file, self.expected_stderr_file)
def check_reference_stdout(self):
if os.path.isfile(self.expected_stdout_file):
expected = io.read_file(self.expected_stdout_file)
actual = io.read_file(self.stdout_file)
msg = ('Actual test sdtout differs from expected one:\n'
'Actual:\n%s\nExpected:\n%s' % (actual, expected))
self.assertEqual(expected, actual, msg)
def check_reference_stderr(self):
if os.path.isfile(self.expected_stderr_file):
expected = io.read_file(self.expected_stderr_file)
actual = io.read_file(self.stderr_file)
msg = ('Actual test sdterr differs from expected one:\n'
'Actual:\n%s\nExpected:\n%s' % (actual, expected))
self.assertEqual(expected, actual, msg)
def runTest(self, result=None):
"""
Run test method, for compatibility with unittest.TestCase.
......@@ -361,6 +417,8 @@ class Test(unittest.TestCase):
self.sysinfo_logger.start_test_hook()
action_exception = None
cleanup_exception = None
stdout_check_exception = None
stderr_check_exception = None
try:
self.setup()
except Exception, details:
......@@ -377,11 +435,42 @@ class Test(unittest.TestCase):
except Exception, details:
log_exc_info(sys.exc_info())
cleanup_exception = details
if self.job is not None:
job_standalone = self.job.args is None
no_record_mode = (not job_standalone and
self.job.args.output_check_record == 'none')
disable_output_check = (not job_standalone and
self.job.args.disable_output_check)
if job_standalone or no_record_mode:
if not disable_output_check:
try:
self.check_reference_stdout()
except Exception, details:
log_exc_info(sys.exc_info())
stdout_check_exception = details
try:
self.check_reference_stderr()
except Exception, details:
log_exc_info(sys.exc_info())
stderr_check_exception = details
elif not job_standalone:
if self.job.args.output_check_record in ['all', 'stdout']:
self.record_reference_stdout()
if self.job.args.output_check_record in ['all', 'stderr']:
self.record_reference_stderr()
# pylint: disable=E0702
if action_exception is not None:
raise action_exception
elif cleanup_exception is not None:
raise exceptions.TestSetupFail(cleanup_exception)
elif stdout_check_exception is not None:
raise stdout_check_exception
elif stderr_check_exception is not None:
raise stderr_check_exception
self.status = 'PASS'
self.sysinfo_logger.end_test_hook()
......@@ -462,6 +551,14 @@ class DropinTest(Test):
self.path = os.path.abspath(path)
super(DropinTest, self).__init__(name=path, base_logdir=base_logdir,
params=params, tag=tag, job=job)
basedir = os.path.dirname(self.path)
basename = os.path.basename(self.path)
datadirname = basename + '.data'
self.datadir = os.path.join(basedir, datadirname)
self.expected_stdout_file = os.path.join(self.datadir,
'stdout.expected')
self.expected_stderr_file = os.path.join(self.datadir,
'stderr.expected')
def _log_detailed_cmd_info(self, result):
"""
......
......@@ -16,13 +16,25 @@ import os
import process
def make(path, make='make', env='', extra_args='', ignore_status=False):
def make(path, make='make', env='', extra_args='', ignore_status=False, allow_output_check='none'):
"""
Run make, adding MAKEOPTS to the list of options.
:param env: environment variables to be set before calling make
(e.g.: CFLAGS).
:param extra: extra command line arguments to pass to make.
:param allow_output_check: Whether to log the command stream outputs
(stdout and stderr) of the make process in
the test stream files. Valid values: 'stdout',
for allowing only standard output, 'stderr',
to allow only standard error, 'all',
to allow both standard output and error,
and 'none', to allow none to be
recorded (default). The default here is
'none', because usually we don't want
to use the compilation output as a reference
in tests.
:type allow_output_check: str
"""
cwd = os.getcwd()
os.chdir(path)
......@@ -34,6 +46,6 @@ def make(path, make='make', env='', extra_args='', ignore_status=False):
cmd += ' %s' % makeopts
if extra_args:
cmd += ' %s' % extra_args
make_process = process.system(cmd, ignore_status=ignore_status)
make_process = process.system(cmd, ignore_status=ignore_status, allow_output_check=allow_output_check)
os.chdir(cwd)
return make_process
......@@ -32,6 +32,8 @@ from avocado import runtime
from avocado.core import exceptions
log = logging.getLogger('avocado.test')
stdout_log = logging.getLogger('avocado.test.stdout')
stderr_log = logging.getLogger('avocado.test.stderr')
class CmdNotFoundError(Exception):
......@@ -220,7 +222,7 @@ class SubProcess(object):
Run a subprocess in the background, collecting stdout/stderr streams.
"""
def __init__(self, cmd, verbose=True):
def __init__(self, cmd, verbose=True, allow_output_check='all'):
"""
Creates the subprocess object, stdout/err, reader threads and locks.
......@@ -228,6 +230,15 @@ class SubProcess(object):
:type cmd: str
:param verbose: Whether to log the command run and stdout/stderr.
:type verbose: bool
:param allow_output_check: Whether to log the command stream outputs
(stdout and stderr) in the test stream
files. Valid values: 'stdout', for
allowing only standard output, 'stderr',
to allow only standard error, 'all',
to allow both standard output and error
(default), and 'none', to allow
none to be recorded.
:type allow_output_check: str
"""
self.cmd = cmd
self.verbose = verbose
......@@ -237,6 +248,7 @@ class SubProcess(object):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
self.allow_output_check = allow_output_check
self.start_time = time.time()
self.result = CmdResult(cmd)
self.stdout_file = StringIO.StringIO()
......@@ -273,12 +285,21 @@ class SubProcess(object):
:param input_pipe: File like object to the stream.
"""
stream_prefix = "%s"
if input_pipe == self.sp.stdout:
prefix = '[stdout] %s'
if self.allow_output_check in ['none', 'stderr']:
stream_logger = None
else:
stream_logger = stdout_log
output_file = self.stdout_file
lock = self.stdout_lock
elif input_pipe == self.sp.stderr:
prefix = '[stderr] %s'
if self.allow_output_check in ['none', 'stdout']:
stream_logger = None
else:
stream_logger = stderr_log
output_file = self.stderr_file
lock = self.stderr_lock
......@@ -297,6 +318,8 @@ class SubProcess(object):
if tmp.endswith('\n'):
for l in bfr.splitlines():
log.debug(prefix, l)
if stream_logger is not None:
stream_logger.debug(stream_prefix, l)
bfr = ''
finally:
lock.release()
......@@ -429,7 +452,7 @@ class GDBSubProcess(object):
Runs a subprocess inside the GNU Debugger
'''
def __init__(self, cmd, verbose=True):
def __init__(self, cmd, verbose=True, record_stream_files=False):
self.cmd = cmd
self.args = shlex.split(cmd)
......@@ -693,7 +716,7 @@ def get_sub_process_klass(cmd):
return SubProcess
def run(cmd, timeout=None, verbose=True, ignore_status=False):
def run(cmd, timeout=None, verbose=True, ignore_status=False, allow_output_check='all'):
"""
Run a subprocess, returning a CmdResult object.
......@@ -709,11 +732,22 @@ def run(cmd, timeout=None, verbose=True, ignore_status=False):
:param ignore_status: Whether to raise an exception when command returns
=! 0 (False), or not (True).
:type ignore_status: bool
:param allow_output_check: Whether to log the command stream outputs
(stdout and stderr) in the test stream
files. Valid values: 'stdout', for
allowing only standard output, 'stderr',
to allow only standard error, 'all',
to allow both standard output and error
(default), and 'none', to allow
none to be recorded.
:type allow_output_check: str
:return: An :class:`avocado.utils.process.CmdResult` object.
:raise: :class:`avocado.core.exceptions.CmdError`, if ``ignore_status=False``.
"""
klass = get_sub_process_klass(cmd)
sp = klass(cmd=cmd, verbose=verbose)
sp = klass(cmd=cmd, verbose=verbose,
allow_output_check=allow_output_check)
cmd_result = sp.run(timeout=timeout)
fail_condition = cmd_result.exit_status != 0 or cmd_result.interrupted
if fail_condition and not ignore_status:
......@@ -721,7 +755,8 @@ def run(cmd, timeout=None, verbose=True, ignore_status=False):
return cmd_result
def system(cmd, timeout=None, verbose=True, ignore_status=False):
def system(cmd, timeout=None, verbose=True, ignore_status=False,
allow_output_check='all'):
"""
Run a subprocess, returning its exit code.
......@@ -737,16 +772,25 @@ def system(cmd, timeout=None, verbose=True, ignore_status=False):
:param ignore_status: Whether to raise an exception when command returns
=! 0 (False), or not (True).
:type ignore_status: bool
:param allow_output_check: Whether to log the command stream outputs
(stdout and stderr) in the test stream
files. Valid values: 'stdout', for
allowing only standard output, 'stderr',
to allow only standard error, 'all',
to allow both standard output and error
(default), and 'none', to allow
none to be recorded.
:type allow_output_check: str
:return: Exit code.
:rtype: int
:raise: :class:`avocado.core.exceptions.CmdError`, if ``ignore_status=False``.
"""
cmd_result = run(cmd=cmd, timeout=timeout, verbose=verbose,
ignore_status=ignore_status)
cmd_result = run(cmd=cmd, timeout=timeout, verbose=verbose, ignore_status=ignore_status,
allow_output_check=allow_output_check)
return cmd_result.exit_status
def system_output(cmd, timeout=None, verbose=True, ignore_status=False):
def system_output(cmd, timeout=None, verbose=True, ignore_status=False, allow_output_check='all'):
"""
Run a subprocess, returning its output.
......@@ -761,10 +805,19 @@ def system_output(cmd, timeout=None, verbose=True, ignore_status=False):
:type verbose: bool
:param ignore_status: Whether to raise an exception when command returns
=! 0 (False), or not (True).
:param allow_output_check: Whether to log the command stream outputs
(stdout and stderr) in the test stream
files. Valid values: 'stdout', for
allowing only standard output, 'stderr',
to allow only standard error, 'all',
to allow both standard output and error
(default), and 'none', to allow
none to be recorded.
:type allow_output_check: str
:return: Command output.
:rtype: str
:raise: :class:`avocado.core.exceptions.CmdError`, if ``ignore_status=False``.
"""
cmd_result = run(cmd=cmd, timeout=timeout, verbose=verbose,
ignore_status=ignore_status)
cmd_result = run(cmd=cmd, timeout=timeout, verbose=verbose, ignore_status=ignore_status,
allow_output_check=allow_output_check)
return cmd_result.stdout
......@@ -16,7 +16,7 @@ Avocado is primarily being developed on Fedora boxes, but we are making
reasonable efforts that Ubuntu users can use and develop avocado well.
Installing avocado - Fedora
---------------------------
===========================
You can install the rpm package by performing the following commands::
......@@ -25,7 +25,7 @@ You can install the rpm package by performing the following commands::
sudo yum install avocado
Installing avocado - Ubuntu
---------------------------
===========================
You need to add the following lines::
......@@ -37,8 +37,8 @@ performing the following commands::
sudo apt-get update
sudo apt-get install avocado
Running the avocado test runner
-------------------------------
Using the avocado test runner
=============================
The test runner is designed to conveniently run tests on your laptop. The tests
you can run are:
......@@ -48,6 +48,9 @@ you can run are:
code of the executable. If it returns 0, the test PASSed, if it returned
!= 0, it FAILed. We'll call those tests `dropin`.
Native tests
------------
Avocado looks for avocado "native" tests in some locations, the main one is in
the config file ``/etc/avocado/settings.ini``, section ``runner``, ``test_dir``
key. You can list tests by::
......@@ -72,6 +75,9 @@ You can run them using the subcommand ``run``::
WARN : 0
TIME : 1.01 s
Job ID
------
The Job ID is a SHA1 string that has some information encoded:
* Hostname
......@@ -82,6 +88,9 @@ The idea is to have a unique identifier that can be used for job data, for
the purposes of joining on a single database results obtained by jobs run
on different systems.
Drop-In tests
-------------
You can run any number of test in an arbitrary order, as well as mix and match
native tests and dropin tests::
......@@ -105,5 +114,32 @@ native tests and dropin tests::
WARN : 0
TIME : 1.04 s
Some more involved functionalities for the avocado runner are discussed as appropriate, during
the introduction of important concepts.
Debugging tests
---------------
When developing new tests, you frequently want to look at the straight
output of the job log in the stdout, without having to tail the job log.
In order to do that, you can use --show-job-log to the avocado test runner::
$ scripts/avocado run examples/tests/sleeptest.py --show-job-log
Not logging /proc/slabinfo (lack of permissions)
START examples/tests/sleeptest.py
Test instance parameters:
id = examples/tests/sleeptest.py
Default parameters:
sleep_length = 1.0
Test instance params override defaults whenever available
Sleeping for 1.00 seconds
Not logging /var/log/messages (lack of permissions)
PASS examples/tests/sleeptest.py
Not logging /proc/slabinfo (lack of permissions)
As you can see, the UI output is suppressed and only the job log goes to
stdout, making this a useful feature for test development/debugging. Some more
involved functionalities for the avocado runner will be discussed as
appropriate, during the introduction of important concepts.
......@@ -4,10 +4,10 @@
Writing Avocado Tests
=====================
Avocado tests closely resemble autotest tests: All you need to do is to create a
test module, which is a python file with a class that inherits from
:class:`avocado.test.Test`. This class only really needs to implement a method
called `action`, which represents the actual test payload.
To write an avocado test, all you need to do is to create a test module, which
is a python file with a class that inherits from :class:`avocado.test.Test`.
This class only really needs to implement a method called `action`, which
represents the actual test operations.
Simple example
==============
......@@ -274,9 +274,208 @@ location of the test suite code (tarball) through
decompress the suite tarball, followed by ``build.make``, that will build the
suite.
The ``action`` method just gets into the base directory of the compiled suite
and executes the ``./synctest`` command, with appropriate parameters, using
:func:`avocado.utils.process.system`.
In this example, the ``action`` method just gets into the base directory of
the compiled suite and executes the ``./synctest`` command, with appropriate
parameters, using :func:`avocado.utils.process.system`.
Test Output Check and Output Record Mode
========================================
In a lot of occasions, you want to go simpler: just check if the output of a
given application matches an expected output. In order to help with this common
use case, we offer the option ``--output-check-record [mode]`` to the test runner::
--output-check-record OUTPUT_CHECK_RECORD
Record output streams of your tests to reference files
(valid options: none (do not record output streams),
all (record both stdout and stderr), stdout (record
only stderr), stderr (record only stderr). Default:
none
If this option is used, it will store the stdout or stderr of the process (or
both, if you specified ``all``) being executed to reference files: ``stdout.expected``
and ``stderr.expected``. Those files will be recorded in the test data dir. The
data dir is in the same directory as the test source file, named
``[source_file_name.data]``. Let's take as an example the test ``synctest.py``. In a
fresh checkout of avocado, you can see::
examples/tests/synctest.py.data/stderr.expected
examples/tests/synctest.py.data/stdout.expected
From those 2 files, only stdout.expected is non empty::
$ cat examples/tests/synctest.py.data/stdout.expected
PAR : waiting
PASS : sync interrupted
The output files were originally obtained using the test runner and passing the
option --output-check-record all to the test runner::
$ scripts/avocado run --output-check-record all synctest
JOB ID : bcd05e4fd33e068b159045652da9eb7448802be5
JOB LOG : /home/lmr/avocado/job-results/job-2014-09-25T20.20-bcd05e4/job.log
TESTS : 1
(1/1) synctest.py: PASS (2.20 s)
PASS : 1
ERROR : 0
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 2.20 s
After the reference files are added, the check process is transparent, in the sense
that you do not need to provide special flags to the test runner.
Now, every time the test is executed, after it is done running, it will check
if the outputs are exactly right before considering the test as PASSed. If you want to override the default
behavior and skip output check entirely, you may provide the flag ``--disable-output-check`` to the test runner.
The :mod:`avocado.utils.process` APIs have a parameter ``allow_output_check`` (defaults to ``all``), so that you
can select which process outputs will go to the reference files, should you chose to record them. You may choose
``all``, for both stdout and stderr, ``stdout``, for the stdout only, ``stderr``, for only the stderr only, or ``none``,
to allow neither of them to be recorded and checked.
This process works fine also with dropin tests (random programs/shell scripts
that return 0 (PASSed) or != 0 (FAILed). Let's consider our bogus example::
$ cat output_record.sh
#!/bin/bash
echo "Hello, world!"
Let's record the output for this one::
$ scripts/avocado run output_record.sh --output-check-record all
JOB ID : 25c4244dda71d0570b7f849319cd71fe1722be8b
JOB LOG : /home/lmr/avocado/job-results/job-2014-09-25T20.49-25c4244/job.log
TESTS : 1
(1/1) home/lmr/Code/avocado.lmr/output_record.sh: PASS (0.01 s)
PASS : 1
ERROR : 0
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 0.01 s
After this is done, you'll notice that a the test data directory
appeared in the same level of our shell script, containing 2 files::
$ ls output_record.sh.data/
stderr.expected stdout.expected
Let's look what's in each of them::
$ cat output_record.sh.data/stdout.expected
Hello, world!
$ cat output_record.sh.data/stderr.expected
$
Now, every time this test runs, it'll take into account the expected files that
were recorded, no need to do anything else but run the test. Let's see what
happens if we change the ``stdout.expected`` file contents to ``Hello, avocado!``::
$ scripts/avocado run output_record.sh
JOB ID : f0521e524face93019d7cb99c5765aedd933cb2e
JOB LOG : /home/lmr/avocado/job-results/job-2014-09-25T20.52-f0521e5/job.log
TESTS : 1
(1/1) home/lmr/Code/avocado.lmr/output_record.sh: FAIL (0.02 s)
PASS : 0
ERROR : 0
FAIL : 1
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 0.02 s
Verifying the failure reason::
$ cat /home/lmr/avocado/job-results/job-2014-09-25T20.52-f0521e5/job.log
20:52:38 test L0163 INFO | START home/lmr/Code/avocado.lmr/output_record.sh
20:52:38 test L0164 DEBUG|
20:52:38 test L0165 DEBUG| Test instance parameters:
20:52:38 test L0173 DEBUG|
20:52:38 test L0176 DEBUG| Default parameters:
20:52:38 test L0180 DEBUG|
20:52:38 test L0181 DEBUG| Test instance params override defaults whenever available
20:52:38 test L0182 DEBUG|
20:52:38 process L0242 INFO | Running '/home/lmr/Code/avocado.lmr/output_record.sh'
20:52:38 process L0310 DEBUG| [stdout] Hello, world!
20:52:38 test L0565 INFO | Command: /home/lmr/Code/avocado.lmr/output_record.sh
20:52:38 test L0565 INFO | Exit status: 0
20:52:38 test L0565 INFO | Duration: 0.00313782691956
20:52:38 test L0565 INFO | Stdout:
20:52:38 test L0565 INFO | Hello, world!
20:52:38 test L0565 INFO |
20:52:38 test L0565 INFO | Stderr:
20:52:38 test L0565 INFO |
20:52:38 test L0060 ERROR|
20:52:38 test L0063 ERROR| Traceback (most recent call last):
20:52:38 test L0063 ERROR| File "/home/lmr/Code/avocado.lmr/avocado/test.py", line 397, in check_reference_stdout
20:52:38 test L0063 ERROR| self.assertEqual(expected, actual, msg)
20:52:38 test L0063 ERROR| File "/usr/lib64/python2.7/unittest/case.py", line 551, in assertEqual
20:52:38 test L0063 ERROR| assertion_func(first, second, msg=msg)
20:52:38 test L0063 ERROR| File "/usr/lib64/python2.7/unittest/case.py", line 544, in _baseAssertEqual
20:52:38 test L0063 ERROR| raise self.failureException(msg)
20:52:38 test L0063 ERROR| AssertionError: Actual test sdtout differs from expected one:
20:52:38 test L0063 ERROR| Actual:
20:52:38 test L0063 ERROR| Hello, world!
20:52:38 test L0063 ERROR|
20:52:38 test L0063 ERROR| Expected:
20:52:38 test L0063 ERROR| Hello, avocado!
20:52:38 test L0063 ERROR|
20:52:38 test L0064 ERROR|
20:52:38 test L0529 ERROR| FAIL home/lmr/Code/avocado.lmr/output_record.sh -> AssertionError: Actual test sdtout differs from expected one:
Actual:
Hello, world!
Expected:
Hello, avocado!
20:52:38 test L0516 INFO |
As expected, the test failed because we changed its expectations.
Test log, stdout and stderr in native avocado modules
=====================================================
If needed, you can write directly to the expected stdout and stderr files
from the native test scope. It is important to make the distinction between
the following entities:
* The test logs
* The test expected stdout
* The test expected stderr
The first one is used for debugging and informational purposes. The framework
machinery uses logs to give you more detailed info about your test, so they
are not the most reliable source to compare stdout/err. You may log something
into the test logs using the methods in :mod:`avocado.test.Test.log` class
attributes. Consider
the example::
class output_test(test.Test):
def action(self):
self.log.info('This goes to the log and it is only informational')
If you need to write directly to the test stdout and stderr streams, there
are another 2 class attributes for that, :mod:`avocado.test.Test.stdout_log`
and :mod:`avocado.test.Test.stderr_log`, that have the exact same methods
of the log object. So if you want to add stuff to your expected stdout and
stderr streams, you can do something like::
class output_test(test.Test):
def action(self):
self.log.info('This goes to the log and it is only informational')
self.stdout_log.info('This goes to the test stdout (will be recorded)')
self.stderr_log.info('This goes to the test stderr (will be recorded)')
Each one of the last 2 statements will go to the ``stdout.expected`` and
``stderr.expected``, should you choose ``--output-check-record all``, and
will be output to the files ``stderr`` and ``stdout`` of the job results dir
every time that test is executed.
Avocado Tests run on a separate process
=======================================
......@@ -439,6 +638,7 @@ This accomplishes a similar effect to the multiplex setup defined in there.
15:54:31 test L0400 ERROR| ERROR timeouttest.1 -> TestTimeoutError: Timeout reached waiting for timeouttest to end
15:54:31 test L0387 INFO |
Environment Variables for Dropin Tests
======================================
......@@ -473,8 +673,10 @@ Here are the current variables that Avocado exports to the tests:
Wrap Up
=======
While there are certainly other resources that can be used to build your tests,
we recommend you take a look at the example tests present in the ``tests``
directory, that contains a few samples to take some inspiration. It is also
recommended that you take a look at the :doc:`API documentation <api/modules>`
for more possibilities.
We recommend you take a look at the example tests present in the
``examples/tests`` directory, that contains a few samples to take some
inspiration from. That directory, besides containing examples, is also used by
the avocado self test suite to do functional testing of avocado itself.
It is also recommended that you take a look at the
:doc:`API documentation <api/modules>` for more possibilities.
#!/bin/sh
# This test demonstrates that shell scripts have access to avocado runtime
# information, exported through environment variables.
echo "Avocado Version: $AVOCADO_VERSION"
echo "Avocado Test basedir: $AVOCADO_TEST_BASEDIR"
echo "Avocado Test datadir: $AVOCADO_TEST_DATADIR"
echo "Avocado Test workdir: $AVOCADO_TEST_WORKDIR"
echo "Avocado Test srcdir: $AVOCADO_TEST_SRCDIR"
echo "Avocado Test logdir: $AVOCADO_TEST_LOGDIR"
echo "Avocado Test logfile: $AVOCADO_TEST_LOGFILE"
echo "Avocado Test outputdir: $AVOCADO_TEST_OUTPUTDIR"
echo "Avocado Test sysinfodir: $AVOCADO_TEST_SYSINFODIR"
test -d "$AVOCADO_TEST_BASEDIR" -a \
-d "$AVOCADO_TEST_WORKDIR" -a \
-d "$AVOCADO_TEST_SRCDIR" -a \
-d "$AVOCADO_TEST_LOGDIR" -a \
-f "$AVOCADO_TEST_LOGFILE" -a \
-d "$AVOCADO_TEST_OUTPUTDIR" -a \
-d "$AVOCADO_TEST_SYSINFODIR"
#!/bin/sh
# if you execute this script with avocado, avocado will check its stdout
# against the file stdout.expected and its stderr with the file stderr.expected,
# both located in output_check.sh.data, in the same directory as this source
# file.
# The expected files were generated using the option --output-check-record all
# of the avocado runner:
# avocado run output_check.sh --output-check-record all
echo "Hello, world!"
......@@ -42,9 +42,7 @@ class synctest(test.Test):
archive.extract(tarball_path, self.srcdir)
self.srcdir = os.path.join(self.srcdir, 'synctest')
if self.params.debug_symbols:
build.make(self.srcdir,
env='CFLAGS="-g -O0"',
extra_args='synctest')
build.make(self.srcdir, env='CFLAGS="-g -O0"', extra_args='synctest')
else:
build.make(self.srcdir)
......
PAR : waiting
PASS : sync interrupted
......@@ -82,6 +82,34 @@ The test directories will vary depending on you system and
installation method used. Still, it's pretty easy to find that out as shown
in the next section.
DEBUGGING TESTS
===============
When you are developing new tests, frequently you want to look at the straight
output of the job log in the stdout, without having to tail the job log.
In order to do that, you can use --show-job-log to the avocado test runner::
$ scripts/avocado run examples/tests/sleeptest.py --show-job-log
Not logging /proc/slabinfo (lack of permissions)
START examples/tests/sleeptest.py
Test instance parameters:
id = examples/tests/sleeptest.py
Default parameters:
sleep_length = 1.0
Test instance params override defaults whenever available
Sleeping for 1.00 seconds
Not logging /var/log/messages (lack of permissions)
PASS examples/tests/sleeptest.py
Not logging /proc/slabinfo (lack of permissions)
As you can see, the UI output is suppressed and only the job log goes to
stdout, making this a useful feature for test development/debugging.
LISTING TESTS
=============
......@@ -200,6 +228,98 @@ while you are debugging it, avocado has no way to know about its status.
Avocado will automatically send a `continue` command to the debugger
when you disconnect from and exit gdb.
RECORDING TEST REFERENCE OUTPUT
===============================
As a tester, you may want to check if the output of a given application matches
an expected output. In order to help with this common use case, we offer the
option ``--output-check-record [mode]`` to the test runner. If this option is
used, it will store the stdout or stderr of the process (or both, if you
specified ``all``) being executed to reference files: ``stdout.expected`` and
``stderr.expected``.
Those files will be recorded in the test data dir. The data dir is in the same
directory as the test source file, named ``[source_file_name.data]``. Let's
take as an example the test ``synctest.py``. In a fresh checkout of avocado,
you can see::
examples/tests/synctest.py.data/stderr.expected
examples/tests/synctest.py.data/stdout.expected
From those 2 files, only stdout.expected is non empty::
$ cat examples/tests/synctest.py.data/stdout.expected
PAR : waiting
PASS : sync interrupted
The output files were originally obtained using the test runner and passing the
option --output-check-record all to the test runner::
$ avocado run --output-check-record all examples/tests/synctest.py
JOB ID : <id>
JOB LOG : /home/<user>/avocado/job-results/job-<date>-<shortid>/job.log
TESTS : 1
(1/1) examples/tests/synctest.py: PASS (2.20 s)
PASS : 1
ERROR : 0
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 2.20 s
After the reference files are added, the check process is transparent, in the
sense that you do not need to provide special flags to the test runner.
Now, every time the test is executed, after it is done running, it will check
if the outputs are exactly right before considering the test as PASSed. If you
want to override the default behavior and skip output check entirely, you may
provide the flag ``--disable-output-check`` to the test runner.
The ``avocado.utils.process`` APIs have a parameter ``allow_output_check``
(defaults to ``all``), so that you can select which process outputs will go to
the reference files, should you chose to record them. You may choose ``all``,
for both stdout and stderr, ``stdout``, for the stdout only, ``stderr``, for
only the stderr only, or ``none``, to allow neither of them to be recorded and
checked.
This process works fine also with dropin tests (random executables that
return 0 (PASSed) or != 0 (FAILed). Let's consider our bogus example::
$ cat output_record.sh
#!/bin/bash
echo "Hello, world!"
Let's record the output (both stdout and stderr) for this one::
$ avocado run output_record.sh --output-check-record all
JOB ID : <id>
JOB LOG : /home/<user>/avocado/job-results/job-<date>-<shortid>/job.log
TESTS : 1
(1/1) home/lmr/Code/avocado.lmr/output_record.sh: PASS (0.01 s)
PASS : 1
ERROR : 0
FAIL : 0
SKIP : 0
WARN : 0
NOT FOUND : 0
TIME : 0.01 s
After this is done, you'll notice that a the test data directory
appeared in the same level of our shell script, containing 2 files::
$ ls output_record.sh.data/
stderr.expected stdout.expected
Let's look what's in each of them::
$ cat output_record.sh.data/stdout.expected
Hello, world!
$ cat output_record.sh.data/stderr.expected
$
Now, every time this test runs, it'll take into account the expected files that
were recorded, no need to do anything else but run the test.
FILES
=====
......
......@@ -174,7 +174,7 @@ class RunnerOperationTest(unittest.TestCase):
expected_output = 'Empty test ID. A test path or alias must be provided'
expected_output_2 = 'usage:'
self.assertEqual(result.exit_status, expected_rc)
self.assertIn(expected_output, result.stderr)
self.assertIn(expected_output, result.stdout)
self.assertIn(expected_output_2, result.stdout)
def test_not_found(self):
......@@ -183,8 +183,8 @@ class RunnerOperationTest(unittest.TestCase):
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc)
self.assertIn('NOT_FOUND', result.stderr)
self.assertIn('NOT FOUND : 1', result.stderr)
self.assertIn('NOT_FOUND', result.stdout)
self.assertIn('NOT FOUND : 1', result.stdout)
def test_invalid_unique_id(self):
cmd_line = './scripts/avocado run --force-job-id foobar skiptest'
......@@ -197,8 +197,8 @@ class RunnerOperationTest(unittest.TestCase):
cmd_line = './scripts/avocado run --force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 skiptest'
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(0, result.exit_status)
self.assertNotIn('needs to be a 40 digit hex', result.stderr)
self.assertIn('SKIP', result.stderr)
self.assertNotIn('needs to be a 40 digit hex', result.stdout)
self.assertIn('SKIP', result.stdout)
def test_automatic_unique_id(self):
cmd_line = './scripts/avocado run skiptest --json -'
......
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import os
import shutil
import sys
import tempfile
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
basedir = os.path.abspath(basedir)
if os.path.isdir(os.path.join(basedir, 'avocado')):
sys.path.append(basedir)
from avocado.utils import process
OUTPUT_SCRIPT_CONTENTS = """#!/bin/sh
echo "Hello, avocado!"
"""
class RunnerDropinTest(unittest.TestCase):
def setUp(self):
self.base_logdir = tempfile.mkdtemp(prefix='avocado_output_check_functional')
self.output_script = os.path.join(self.base_logdir, 'output_check.sh')
with open(self.output_script, 'w') as output_script_obj:
output_script_obj.write(OUTPUT_SCRIPT_CONTENTS)
os.chmod(self.output_script, 0775)
def test_output_record_none(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run %s --output-check-record none' % self.output_script
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
stdout_file = os.path.join("%s.data/stdout.expected" % self.output_script)
stderr_file = os.path.join("%s.data/stderr.expected" % self.output_script)
self.assertFalse(os.path.isfile(stdout_file))
self.assertFalse(os.path.isfile(stderr_file))
def test_output_record_stdout(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run %s --output-check-record stdout' % self.output_script
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
stdout_file = os.path.join("%s.data/stdout.expected" % self.output_script)
stderr_file = os.path.join("%s.data/stderr.expected" % self.output_script)
self.assertTrue(os.path.isfile(stdout_file))
self.assertFalse(os.path.isfile(stderr_file))
def test_output_record_all(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run %s --output-check-record all' % self.output_script
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
stdout_file = os.path.join("%s.data/stdout.expected" % self.output_script)
stderr_file = os.path.join("%s.data/stderr.expected" % self.output_script)
self.assertTrue(os.path.isfile(stdout_file))
self.assertTrue(os.path.isfile(stderr_file))
def test_output_record_and_check(self):
self.test_output_record_all()
cmd_line = './scripts/avocado run %s' % self.output_script
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
def test_output_tamper_stdout(self):
self.test_output_record_all()
tampered_msg = "I PITY THE FOOL THAT STANDS ON MY WAY!"
stdout_file = os.path.join("%s.data/stdout.expected" % self.output_script)
with open(stdout_file, 'w') as stdout_file_obj:
stdout_file_obj.write(tampered_msg)
cmd_line = './scripts/avocado run %s --xunit -' % self.output_script
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertIn(tampered_msg, result.stdout)
def test_disable_output_check(self):
self.test_output_record_all()
tampered_msg = "I PITY THE FOOL THAT STANDS ON MY WAY!"
stdout_file = os.path.join("%s.data/stdout.expected" % self.output_script)
with open(stdout_file, 'w') as stdout_file_obj:
stdout_file_obj.write(tampered_msg)
cmd_line = './scripts/avocado run %s --disable-output-check --xunit -' % self.output_script
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertNotIn(tampered_msg, result.stdout)
def tearDown(self):
if os.path.isdir(self.base_logdir):
shutil.rmtree(self.base_logdir, ignore_errors=True)
if __name__ == '__main__':
unittest.main()
......@@ -68,9 +68,9 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
error_excerpt = "Avocado could not set --json and --xunit both to output to stdout."
error_excerpt = "Options --json --xunit are trying to use stdout simultaneously"
self.assertIn(error_excerpt, output,
"Missing excepted error message from output:\n%s" % output)
"Missing excerpt error message from output:\n%s" % output)
def test_output_incompatible_setup_2(self):
os.chdir(basedir)
......@@ -81,9 +81,9 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
error_excerpt = "Avocado could not set --json and --vm both to output to stdout."
error_excerpt = "Options --json --vm are trying to use stdout simultaneously"
self.assertIn(error_excerpt, output,
"Missing excepted error message from output:\n%s" % output)
"Missing excerpt error message from output:\n%s" % output)
def test_output_compatible_setup(self):
tmpfile = tempfile.mktemp()
......@@ -128,7 +128,7 @@ class OutputPluginTest(unittest.TestCase):
except OSError:
pass
def test_output_compatible_setup_nooutput(self):
def test_output_compatible_setup_3(self):
tmpfile = tempfile.mktemp()
tmpfile2 = tempfile.mktemp()
os.chdir(basedir)
......@@ -140,8 +140,33 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, "",
"Output is not empty as expected:\n%s" % output)
self.assertNotEqual(output, "", "Output is empty")
# Check if we are producing valid outputs
with open(tmpfile2, 'r') as fp:
json_results = json.load(fp)
debug_log = json_results['debuglog']
self.check_output_files(debug_log)
minidom.parse(tmpfile)
finally:
try:
os.remove(tmpfile)
os.remove(tmpfile2)
except OSError:
pass
def test_output_compatible_setup_nooutput(self):
tmpfile = tempfile.mktemp()
tmpfile2 = tempfile.mktemp()
os.chdir(basedir)
cmd_line = './scripts/avocado run --silent --xunit %s --json %s sleeptest' % (tmpfile, tmpfile2)
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
try:
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, "", "Output is not empty:\n%s" % output)
# Check if we are producing valid outputs
with open(tmpfile2, 'r') as fp:
json_results = json.load(fp)
......@@ -155,6 +180,26 @@ class OutputPluginTest(unittest.TestCase):
except OSError:
pass
def test_show_job_log(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run sleeptest --show-job-log'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
def test_silent_trumps_show_job_log(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run sleeptest --show-job-log --silent'
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, "")
def test_default_enabled_plugins(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run sleeptest'
......
......@@ -37,10 +37,19 @@ class _Stream(object):
def start_file_logging(self, param1, param2):
pass
def log_header(self, param):
def stop_file_logging(self):
pass
def stop_file_logging(self):
def set_tests_info(self, info):
pass
def notify(self, event, msg):
pass
def add_test(self, state):
pass
def set_test_status(self, status, state):
pass
......
......@@ -37,16 +37,19 @@ class _Stream(object):
def start_file_logging(self, param1, param2):
pass
def info(self, msg, skip_newline=False):
def stop_file_logging(self):
pass
def log_header(self, param):
def set_tests_info(self, info):
pass
def stop_file_logging(self):
def notify(self, event, msg):
pass
def add_test(self, state):
pass
def log_pass(self, param1):
def set_test_status(self, status, state):
pass
......
......@@ -36,13 +36,34 @@ class ParseXMLError(Exception):
pass
class _Stream(object):
def start_file_logging(self, param1, param2):
pass
def stop_file_logging(self):
pass
def set_tests_info(self, info):
pass
def notify(self, event, msg):
pass
def add_test(self, state):
pass
def set_test_status(self, status, state):
pass
class xUnitSucceedTest(unittest.TestCase):
def setUp(self):
self.tmpfile = mkstemp()
args = argparse.Namespace()
args.xunit_output = self.tmpfile[1]
self.test_result = xunit.xUnitTestResult(args=args)
self.test_result = xunit.xUnitTestResult(stream=_Stream(), args=args)
self.test_result.start_tests()
self.test1 = test.Test(job=job.Job())
self.test1.status = 'PASS'
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册