提交 2105f912 编写于 作者: L Lukáš Doktor 提交者: Cleber Rosa

avocado: Remove the "View" concept

The "View" concept was developed to abstract the messages and events.
The problem is it was only abused to pass the status to other places, to
colorize the messages and to allow creating paginated view.

There already is a nice class hooked to all important events, the
"TestResult".

For (not only human readable) messages this commit uses the standard
python logging as it's pretty well known, widely used and very scalable.
The colored output is handled by already existing
"ProgressStreamHandler", which maps: DEBUG,INFO,WARNING and >=ERROR
messages to previously existing: minor, message, warning and error event
types.

The paginator was unified and is initialized during logging reconfigure.
During reconfigure all previously logged messages are re-logged into the
output so one does not lose those messages. Another great difference is
that the Paginator is cleaned at exit by avocado and does not require
complex handling to avoid broken console. To use paginator one just
enables it in "args" and writes to any available stream/stdout/stderr
Signed-off-by: NLukáš Doktor <ldoktor@redhat.com>
上级 0a6cd8ce
......@@ -16,6 +16,7 @@
The core Avocado application.
"""
import logging
import os
import signal
......@@ -68,7 +69,7 @@ class AvocadoApp(object):
failures = (self.cli_dispatcher.load_failures +
self.cli_cmd_dispatcher.load_failures)
if failures:
view = output.View(self.parser.args)
log = logging.getLogger("avocado.app")
msg_fmt = 'Failed to load plugin from module "%s": %s'
silenced = settings.get_value('plugins',
'skip_broken_plugin_notification',
......@@ -76,14 +77,19 @@ class AvocadoApp(object):
for failure in failures:
if failure[0].module_name in silenced:
continue
msg = msg_fmt % (failure[0].module_name,
failure[1].__repr__())
view.notify(event='error', msg=msg)
log.error(msg_fmt, failure[0].module_name,
failure[1].__repr__())
def run(self):
try:
extension = self.cli_cmd_dispatcher[self.parser.args.subcommand]
except KeyError:
return
method = extension.obj.run
return method(self.parser.args)
try:
subcmd = self.parser.args.subcommand
extension = self.cli_cmd_dispatcher[subcmd]
except KeyError:
return
method = extension.obj.run
return method(self.parser.args)
finally:
# This makes sure we cleanup the console (stty echo). The only way
# to avoid cleaning it is to kill the less (paginator) directly
output.stop_logging()
......@@ -205,7 +205,7 @@ class HTMLTestResult(TestResult):
Called once before any tests are executed.
"""
TestResult.start_tests(self)
self.json = {'debuglog': self.stream.logfile,
self.json = {'debuglog': self.logfile,
'job_id': runtime.CURRENT_JOB.unique_id,
'tests': []}
......
......@@ -83,6 +83,7 @@ class Job(object):
args = argparse.Namespace()
self.args = args
self.urls = getattr(args, "url", [])
self.log = logging.getLogger("avocado.app")
self.standalone = getattr(self.args, 'standalone', False)
if getattr(self.args, "dry_run", False): # Modify args for dry-run
if not self.args.unique_job_id:
......@@ -95,7 +96,6 @@ class Job(object):
if unique_id is None:
unique_id = job_id.create_unique_job_id()
self.unique_id = unique_id
self.view = output.View(app_args=self.args)
self.logdir = None
raw_log_level = settings.get_value('job.output', 'loglevel',
default='debug')
......@@ -115,9 +115,12 @@ class Job(object):
self.result_proxy = result.TestResultProxy()
self.sysinfo = None
self.timeout = getattr(self.args, 'job_timeout', 0)
self.__logging_file_handler = None
self.__logging_stream_handler = None
self.funcatexit = data_structures.CallbackRegister("JobExit %s"
% self.unique_id,
_TEST_LOGGER)
self.stdout_stderr = None
self.replay_sourcejob = getattr(self.args, 'replay_sourcejob', None)
def _setup_job_results(self):
......@@ -141,6 +144,47 @@ class Job(object):
with open(self.idfile, 'w') as id_file_obj:
id_file_obj.write("%s\n" % self.unique_id)
def __start_job_logging(self):
# Enable file loggers
self.__logging_file_handler = logging.FileHandler(filename=self.logfile)
self.__logging_file_handler.setLevel(self.loglevel)
fmt = ('%(asctime)s %(module)-16.16s L%(lineno)-.4d %('
'levelname)-5.5s| %(message)s')
formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')
self.__logging_file_handler.setFormatter(formatter)
test_logger = logging.getLogger('avocado.test')
test_logger.addHandler(self.__logging_file_handler)
test_logger.setLevel(self.loglevel)
root_logger = logging.getLogger()
root_logger.addHandler(self.__logging_file_handler)
root_logger.setLevel(self.loglevel)
# Enable console loggers
enabled_logs = getattr(self.args, "show", [])
if ('test' in enabled_logs and
'early' not in enabled_logs):
self.stdout_stderr = sys.stdout, sys.stderr
sys.stdout = output.STDOUT
sys.stderr = output.STDERR
self.__logging_stream_handler = logging.StreamHandler()
test_logger.addHandler(self.__logging_stream_handler)
root_logger.addHandler(self.__logging_stream_handler)
def __stop_job_logging(self):
if self.stdout_stderr:
sys.stdout, sys.stderr = self.stdout_stderr
test_logger = logging.getLogger('avocado.test')
root_logger = logging.getLogger()
if self.__logging_file_handler:
test_logger.removeHandler(self.__logging_file_handler)
root_logger.removeHandler(self.__logging_file_handler)
self.__logging_file_handler.close()
# Console loggers
if self.__logging_stream_handler:
test_logger.removeHandler(self.__logging_stream_handler)
root_logger.removeHandler(self.__logging_stream_handler)
def _update_latest_link(self):
"""
Update the latest job result symbolic link [avocado-logs-dir]/latest.
......@@ -232,12 +276,10 @@ class Job(object):
op_set_stdout = self.result_proxy.output_plugins_using_stdout()
if len(op_set_stdout) > 1:
msg = ('Options %s are trying to use stdout simultaneously' %
" ".join(op_set_stdout))
self.view.notify(event='error', msg=msg)
msg = ('Please set at least one of them to a file to avoid '
'conflicts')
self.view.notify(event='error', msg=msg)
self.log.error('Options %s are trying to use stdout '
'simultaneously', " ".join(op_set_stdout))
self.log.error('Please set at least one of them to a file to '
'avoid conflicts')
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
if not op_set_stdout and not self.standalone:
......@@ -401,10 +443,8 @@ class Job(object):
that configure a job failure.
"""
self._setup_job_results()
self.view.start_job_logging(self.logfile,
self.loglevel,
self.unique_id,
self.replay_sourcejob)
self.__start_job_logging()
try:
test_suite = self._make_test_suite(self.urls)
except loader.LoaderError as details:
......@@ -435,13 +475,11 @@ class Job(object):
self._log_job_debug_info(mux)
replay.record(self.args, self.logdir, mux, self.urls)
self.view.logfile = self.logfile
replay_map = getattr(self.args, 'replay_map', None)
failures = self.test_runner.run_suite(test_suite, mux,
timeout=self.timeout,
replay_map=replay_map)
self.view.stop_job_logging()
self.__stop_job_logging()
# If it's all good so far, set job status to 'PASS'
if self.status == 'RUNNING':
self.status = 'PASS'
......@@ -482,11 +520,10 @@ class Job(object):
except exceptions.JobBaseException as details:
self.status = details.status
fail_class = details.__class__.__name__
self.view.notify(event='error', msg=('\nAvocado job failed: %s: %s'
% (fail_class, details)))
self.log.error('\nAvocado job failed: %s: %s', fail_class, details)
return exit_codes.AVOCADO_JOB_FAIL
except exceptions.OptionValidationError as details:
self.view.notify(event='error', msg='\n' + str(details))
self.log.error('\n' + str(details))
return exit_codes.AVOCADO_JOB_FAIL
except Exception as details:
......@@ -495,15 +532,12 @@ class Job(object):
tb_info = traceback.format_exception(exc_type, exc_value,
exc_traceback.tb_next)
fail_class = details.__class__.__name__
self.view.notify(event='error', msg=('\nAvocado crashed: %s: %s' %
(fail_class, details)))
self.log.error('\nAvocado crashed: %s: %s', fail_class, details)
for line in tb_info:
self.view.notify(event='minor', msg=line)
self.view.notify(event='error', msg=('Please include the traceback '
'info and command line used on '
'your bug report'))
self.view.notify(event='error', msg=('Report bugs visiting %s' %
_NEW_ISSUE_LINK))
self.log.debug(line)
self.log.error("Please include the traceback info and command line"
" used on your bug report")
self.log.error('Report bugs visiting %s', _NEW_ISSUE_LINK)
return exit_codes.AVOCADO_FAIL
finally:
if not settings.get_value('runner.behavior', 'keep_tmp_files',
......
......@@ -17,8 +17,8 @@ JSON output module.
"""
import json
import logging
from . import output
from .result import TestResult
......@@ -40,15 +40,15 @@ class JSONTestResult(TestResult):
self.output = force_json_file
else:
self.output = getattr(self.args, 'json_output', '-')
self.view = output.View(app_args=self.args)
self.json = None
self.log = logging.getLogger("avocado.app")
def start_tests(self):
"""
Called once before any tests are executed.
"""
TestResult.start_tests(self)
self.json = {'debuglog': self.stream.logfile,
self.json = {'debuglog': self.logfile,
'tests': []}
def end_test(self, state):
......@@ -93,6 +93,6 @@ class JSONTestResult(TestResult):
})
self.json = json.dumps(self.json)
if self.output == '-':
self.view.notify(event='minor', msg=self.json)
self.log.debug(self.json)
else:
self._save_json()
......@@ -36,8 +36,8 @@ else:
NULL_HANDLER = logutils.NullHandler
STDOUT = sys.stdout
STDERR = sys.stderr
STDOUT = _STDOUT = sys.stdout
STDERR = _STDERR = sys.stderr
BUILTIN_STREAMS = {'app': 'application output',
'test': 'test output',
......@@ -82,6 +82,11 @@ def reconfigure(args):
Adjust logging handlers accordingly to app args and re-log messages.
"""
# Reconfigure stream loggers
global STDOUT
global STDERR
if getattr(args, "paginator", False) == "on" and is_colored_term():
STDOUT = Paginator()
STDERR = STDOUT
enabled = getattr(args, "show", None)
if not isinstance(enabled, list):
enabled = ["app"]
......@@ -104,7 +109,7 @@ def reconfigure(args):
app_logger.addHandler(app_handler)
app_logger.propagate = False
app_logger.level = logging.DEBUG
app_err_handler = logging.StreamHandler()
app_err_handler = ProgressStreamHandler()
app_err_handler.setFormatter(logging.Formatter("%(message)s"))
app_err_handler.addFilter(FilterWarnAndMore())
app_err_handler.stream = STDERR
......@@ -140,7 +145,6 @@ def reconfigure(args):
else:
disable_log_handler("avocado.app.debug")
enable_stderr()
# Add custom loggers
for name in [_ for _ in enabled if _ not in BUILTIN_STREAMS.iterkeys()]:
stream_level = re.split(r'(?<!\\):', name, maxsplit=1)
......@@ -166,6 +170,13 @@ def reconfigure(args):
logging.getLogger(record.name).handle(record)
def stop_logging():
if isinstance(STDOUT, Paginator):
sys.stdout = _STDOUT
sys.stderr = _STDERR
STDOUT.close()
class FilterWarnAndMore(logging.Filter):
def filter(self, record):
......@@ -187,6 +198,14 @@ class ProgressStreamHandler(logging.StreamHandler):
def emit(self, record):
try:
msg = self.format(record)
if record.levelno < logging.INFO: # Most messages are INFO
pass
elif record.levelno < logging.WARNING:
msg = term_support.header_str(msg)
elif record.levelno < logging.ERROR:
msg = term_support.warn_header_str(msg)
else:
msg = term_support.fail_header_str(msg)
stream = self.stream
skip_newline = False
if hasattr(record, 'skip_newline'):
......@@ -260,18 +279,6 @@ class Paginator(object):
pass
def get_paginator():
"""
Get a paginator.
The paginator is whatever the user sets as $PAGER, or 'less', or if all
else fails, sys.stdout. It is a useful feature inspired in programs such
as git, since it lets you scroll up and down large buffers of text,
increasing the program's usability.
"""
return Paginator()
def add_log_handler(logger, klass=logging.StreamHandler, stream=sys.stdout,
level=logging.INFO, fmt='%(name)s: %(message)s'):
"""
......@@ -557,221 +564,3 @@ class Throbber(object):
result = self.MOVES[self.position]
self._update_position()
return result
class View(object):
"""
Takes care of both disk logs and stdout/err logs.
"""
def __init__(self, app_args=None, console_logger='avocado.app',
use_paginator=False):
"""
Set up the console logger and the paginator mode.
:param console_logger: logging.Logger identifier for the main app
logger.
:type console_logger: str
:param use_paginator: Whether to use paginator mode. Set it to True if
the program is supposed to output a large list of
lines to the user and you want the user to be able
to scroll through them at will (think git log).
"""
self.app_args = app_args
self.use_paginator = use_paginator
self.console_log = logging.getLogger(console_logger)
if self.use_paginator:
self.paginator = get_paginator()
else:
self.paginator = None
self.throbber = Throbber()
self.tests_info = {}
self.file_handler = None
self.stream_handler = None
def cleanup(self):
if self.use_paginator:
self.paginator.close()
def notify(self, event='message', msg=None, skip_newline=False):
mapping = {'message': self._log_ui_header,
'minor': self._log_ui_minor,
'error': self._log_ui_error,
'warning': self._log_ui_warning,
'partial': self._log_ui_partial}
if msg is not None:
mapping[event](msg=msg, skip_newline=skip_newline)
def notify_progress(self, progress):
"""
Give an interactive indicator of the test progress
:param progress: if indication of progress came explicitly from the
test. If false, it means the test process is running,
but not communicating test specific progress.
:type progress: bool
:rtype: None
"""
if progress:
self._log_ui_healthy(self.throbber.render(), True)
else:
self._log_ui_partial(self.throbber.render(), True)
def add_test(self, state):
self._log(msg=self._get_test_tag(state['tagged_name']),
skip_newline=True)
def set_test_status(self, status, state):
"""
Log a test status message
:param status: the test status
:param state: test state (used to get 'time_elapsed')
"""
mapping = {'PASS': term_support.pass_str,
'ERROR': term_support.error_str,
'FAIL': term_support.fail_str,
'SKIP': term_support.skip_str,
'WARN': term_support.warn_str,
'INTERRUPTED': term_support.interrupt_str}
if status == 'SKIP':
msg = mapping[status]()
else:
msg = mapping[status]() + " (%.2f s)" % state['time_elapsed']
self._log_ui_info(msg)
def set_tests_info(self, info):
self.tests_info.update(info)
def _get_test_tag(self, test_name):
return (' (%s/%s) %s: ' %
(self.tests_info['tests_run'],
self.tests_info['tests_total'], test_name))
def _log(self, msg, level=logging.INFO, skip_newline=False):
"""
Write a message to the avocado.app logger or the paginator.
:param msg: Message to write
:type msg: string
"""
enabled = getattr(self.app_args, "log", [])
if "app" in enabled and self.use_paginator and level < logging.ERROR:
if not skip_newline:
msg += '\n'
self.paginator.write(msg)
else:
extra = {'skip_newline': skip_newline}
self.console_log.log(level=level, msg=msg, extra=extra)
def _log_ui_info(self, msg, skip_newline=False):
"""
Log a :mod:`logging.INFO` message to the UI.
:param msg: Message to write.
"""
self._log(msg, level=logging.INFO, skip_newline=skip_newline)
def _log_ui_error_base(self, msg, skip_newline=False):
"""
Log a :mod:`logging.ERROR` message to the UI.
:param msg: Message to write.
"""
self._log(msg, level=logging.ERROR, skip_newline=skip_newline)
def _log_ui_healthy(self, msg, skip_newline=False):
"""
Log a message that indicates that things are going as expected.
:param msg: Message to write.
"""
self._log_ui_info(term_support.healthy_str(msg), skip_newline)
def _log_ui_partial(self, msg, skip_newline=False):
"""
Log a message that indicates something (at least) partially OK
:param msg: Message to write.
"""
self._log_ui_info(term_support.partial_str(msg), skip_newline)
def _log_ui_header(self, msg, skip_newline=False):
"""
Log a header message.
:param msg: Message to write.
"""
self._log_ui_info(term_support.header_str(msg), skip_newline)
def _log_ui_minor(self, msg, skip_newline=False):
"""
Log a minor message.
:param msg: Message to write.
"""
self._log_ui_info(msg, skip_newline)
def _log_ui_error(self, msg, skip_newline=False):
"""
Log an error message (useful for critical errors).
:param msg: Message to write.
"""
self._log_ui_error_base(term_support.fail_header_str(msg), skip_newline)
def _log_ui_warning(self, msg, skip_newline=False):
"""
Log a warning message (useful for warning messages).
:param msg: Message to write.
"""
self._log_ui_info(term_support.warn_header_str(msg), skip_newline)
def start_job_logging(self, logfile, loglevel, unique_id, sourcejob=None):
"""
Start the main file logging.
:param logfile: Path to file that will receive logging.
:param loglevel: Level of the logger. Example: :mod:`logging.DEBUG`.
:param unique_id: job.Job() unique id attribute.
"""
self.job_unique_id = unique_id
self.debuglog = logfile
# File loggers
self.file_handler = logging.FileHandler(filename=logfile)
self.file_handler.setLevel(loglevel)
fmt = ('%(asctime)s %(module)-16.16s L%(lineno)-.4d %('
'levelname)-5.5s| %(message)s')
formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')
self.file_handler.setFormatter(formatter)
test_logger = logging.getLogger('avocado.test')
test_logger.addHandler(self.file_handler)
test_logger.setLevel(loglevel)
root_logger = logging.getLogger()
root_logger.addHandler(self.file_handler)
root_logger.setLevel(loglevel)
# Console loggers
if ('test' in self.app_args.show and
'early' not in self.app_args.show):
self.stream_handler = ProgressStreamHandler()
test_logger.addHandler(self.stream_handler)
root_logger.addHandler(self.stream_handler)
self.replay_sourcejob = sourcejob
def stop_job_logging(self):
"""
Simple helper for removing a handler from the current logger.
"""
# File loggers
test_logger = logging.getLogger('avocado.test')
root_logger = logging.getLogger()
test_logger.removeHandler(self.file_handler)
root_logger.removeHandler(self.file_handler)
self.file_handler.close()
# Console loggers
if self.stream_handler:
test_logger.removeHandler(self.stream_handler)
root_logger.removeHandler(self.stream_handler)
......@@ -78,12 +78,11 @@ class RemoteTestRunner(TestRunner):
def setup(self):
""" Setup remote environment and copy test directories """
self.job.view.notify(event='message',
msg=("LOGIN : %s@%s:%d (TIMEOUT: %s seconds)"
% (self.job.args.remote_username,
self.job.args.remote_hostname,
self.job.args.remote_port,
self.job.args.remote_timeout)))
self.job.log.info("LOGIN : %s@%s:%d (TIMEOUT: %s seconds)",
self.job.args.remote_username,
self.job.args.remote_hostname,
self.job.args.remote_port,
self.job.args.remote_timeout)
self.remote = remoter.Remote(self.job.args.remote_hostname,
self.job.args.remote_username,
self.job.args.remote_password,
......@@ -265,8 +264,7 @@ class VMTestRunner(RemoteTestRunner):
def setup(self):
# Super called after VM is found and initialized
self.job.view.notify(event='message', msg="DOMAIN : %s"
% self.job.args.vm_domain)
self.job.log.info("DOMAIN : %s", self.job.args.vm_domain)
self.vm = virt.vm_connect(self.job.args.vm_domain,
self.job.args.vm_hypervisor_uri)
if self.vm is None:
......
......@@ -20,7 +20,6 @@ import pickle
import sys
from . import exit_codes
from . import output
from .test import ReplaySkipTest
from .settings import settings
......@@ -101,7 +100,6 @@ def retrieve_replay_map(resultsdir, replay_filter):
def get_resultsdir(logdir, jobid):
view = output.View()
matches = 0
short_jobid = jobid[:7]
if len(short_jobid) < 7:
......@@ -112,8 +110,9 @@ def get_resultsdir(logdir, jobid):
match_file = id_file
matches += 1
if matches > 1:
msg = "hash '%s' is not unique enough" % jobid
view.notify(event='error', msg=(msg))
from logging import getLogger
getLogger("avocado.app").error("hash '%s' is not unique "
"enough", jobid)
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
if matches == 1:
......
......@@ -6,18 +6,22 @@ Module that implements the actions for the CLI App when the job toplevel
command is used
"""
import logging
from . import base
from ... import connection
log = logging.getLogger("avocado.app")
@base.action
def status(app):
"""
Shows the server status
"""
data = app.connection.request("version/")
app.view.notify(event="message",
msg="Server version: %s" % data.get('version'))
log.info("Server version: %s", data.get('version'))
@base.action
......@@ -29,12 +33,9 @@ def list_brief(app):
data = app.connection.get_api_list()
except connection.UnexpectedHttpStatusCode as e:
if e.received == 403:
app.view.notify(event="error",
msg="Error: Access Forbidden")
log.error("Error: Access Forbidden")
return False
app.view.notify(event="message",
msg="Available APIs:")
log.info("Available APIs:")
for name in data:
app.view.notify(event="message",
msg=" * %s" % name)
log.info(" * %s", name)
......@@ -16,12 +16,12 @@ This is the main entry point for the rest client cli application
"""
import importlib
import logging
import sys
import types
from . import parser
from .. import connection
from ... import output
from ... import exit_codes
......@@ -48,7 +48,7 @@ class App(object):
self.connection = None
self.parser = parser.Parser()
self.parser.add_arguments_on_all_modules()
self.view = output.View()
self.log = logging.getLogger("avocado.app")
def initialize_connection(self):
"""
......@@ -61,16 +61,13 @@ class App(object):
username=self.args.username,
password=self.args.password)
except connection.InvalidConnectionError:
self.view.notify(event="error",
msg="Error: could not connect to the server")
self.log.error("Error: could not connect to the server")
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
except connection.InvalidServerVersionError:
self.view.notify(event="error",
msg=("REST server version is higher than "
"than this client can support."))
self.view.notify(event="error",
msg=("Please use a more recent version "
"of the REST client application."))
self.log.error("REST server version is higher than "
"than this client can support.")
self.log.error("Please use a more recent version "
"of the REST client application.")
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
def dispatch_action(self):
......@@ -108,8 +105,7 @@ class App(object):
self.initialize_connection()
return kallable(self)
else:
self.view.notify(event="error",
msg="Action specified is not implemented")
self.log.error("Action specified is not implemented")
def run(self):
"""
......
......@@ -21,6 +21,9 @@ used by the test runner.
"""
import os
import logging
from . import output
class InvalidOutputPlugin(Exception):
......@@ -124,8 +127,9 @@ class TestResult(object):
:param job: an instance of :class:`avocado.core.job.Job`.
"""
self.job_unique_id = getattr(job, "unique_id", None)
self.logfile = getattr(job, "logfile", None)
self.args = getattr(job, "args", None)
self.stream = getattr(job, "view", None)
self.tests_total = getattr(self.args, 'test_result_total', 1)
self.tests_run = 0
self.total_time = 0.0
......@@ -161,7 +165,6 @@ class TestResult(object):
Called once before any tests are executed.
"""
self.tests_run += 1
self.stream.set_tests_info({'tests_run': self.tests_run})
def end_tests(self):
"""
......@@ -187,7 +190,6 @@ class TestResult(object):
"""
self.tests_run += 1
self.total_time += state['time_elapsed']
self.stream.set_tests_info({'tests_run': self.tests_run})
def add_pass(self, state):
"""
......@@ -265,106 +267,62 @@ class HumanTestResult(TestResult):
Human output Test result class.
"""
def __init__(self, job):
super(HumanTestResult, self).__init__(job)
self.log = logging.getLogger("avocado.app")
self.__throbber = output.Throbber()
def start_tests(self):
"""
Called once before any tests are executed.
"""
TestResult.start_tests(self)
self.stream.notify(event="message", msg="JOB ID : %s" % self.stream.job_unique_id)
if self.stream.replay_sourcejob is not None:
self.stream.notify(event="message", msg="SRC JOB ID : %s" %
self.stream.replay_sourcejob)
self.stream.notify(event="message", msg="JOB LOG : %s" % self.stream.logfile)
self.stream.notify(event="message", msg="TESTS : %s" % self.tests_total)
self.stream.set_tests_info({'tests_total': self.tests_total})
super(HumanTestResult, self).start_tests()
self.log.info("JOB ID : %s", self.job_unique_id)
if getattr(self.args, "replay_sourcejob", None):
self.log.info("SRC JOB ID : %s", self.args.replay_sourcejob)
self.log.info("JOB LOG : %s", self.logfile)
self.log.info("TESTS : %s", self.tests_total)
def end_tests(self):
"""
Called once after all tests are executed.
"""
super(HumanTestResult, self).end_tests()
self._reconcile()
self.stream.notify(event="message",
msg="RESULTS : PASS %d | ERROR %d | FAIL %d | "
"SKIP %d | WARN %d | INTERRUPT %s" %
(len(self.passed), len(self.errors),
len(self.failed), len(self.skipped),
len(self.warned), len(self.interrupted)))
self.log.info("RESULTS : PASS %d | ERROR %d | FAIL %d | SKIP %d | "
"WARN %d | INTERRUPT %s", len(self.passed),
len(self.errors), len(self.failed), len(self.skipped),
len(self.warned), len(self.interrupted))
if self.args is not None:
if 'html_output' in self.args:
logdir = os.path.dirname(self.stream.logfile)
logdir = os.path.dirname(self.logfile)
html_file = os.path.join(logdir, 'html', 'results.html')
self.stream.notify(event="message", msg=("JOB HTML : %s" %
html_file))
self.stream.notify(event="message",
msg="TIME : %.2f s" % self.total_time)
self.log.info("JOB HTML : %s", html_file)
self.log.info("TIME : %.2f s", self.total_time)
def start_test(self, state):
"""
Called when the given test is about to run.
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
self.stream.add_test(state)
super(HumanTestResult, self).start_test(state)
self.log.debug(' (%s/%s) %s: ', self.tests_run, self.tests_total,
state["tagged_name"], extra={"skip_newline": True})
def end_test(self, state):
"""
Called when the given test has been run.
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
TestResult.end_test(self, state)
def add_pass(self, state):
"""
Called when a test succeeded.
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
TestResult.add_pass(self, state)
self.stream.set_test_status(status='PASS', state=state)
def add_error(self, state):
"""
Called when a test had a setup error.
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
TestResult.add_error(self, state)
self.stream.set_test_status(status='ERROR', state=state)
def add_fail(self, state):
"""
Called when a test fails.
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
TestResult.add_fail(self, state)
self.stream.set_test_status(status='FAIL', state=state)
def add_skip(self, state):
"""
Called when a test is skipped.
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
TestResult.add_skip(self, state)
self.stream.set_test_status(status='SKIP', state=state)
def add_warn(self, state):
"""
Called when a test had a warning.
:param state: result of :class:`avocado.core.test.Test.get_state`.
:type state: dict
"""
TestResult.add_warn(self, state)
self.stream.set_test_status(status='WARN', state=state)
def notify_progress(self, progress_from_test=False):
self.stream.notify_progress(progress_from_test)
super(HumanTestResult, self).end_test(state)
status = state["status"]
if status == "TEST_NA":
status = "SKIP"
mapping = {'PASS': output.term_support.PASS,
'ERROR': output.term_support.ERROR,
'FAIL': output.term_support.FAIL,
'SKIP': output.term_support.SKIP,
'WARN': output.term_support.WARN,
'INTERRUPTED': output.term_support.INTERRUPT}
self.log.debug(output.term_support.MOVE_BACK + mapping[status] +
status + output.term_support.ENDC)
def notify_progress(self, progress=False):
if progress:
color = output.term_support.PASS
else:
color = output.term_support.PARTIAL
self.log.debug(color + self.__throbber.render() +
output.term_support.ENDC, extra={"skip_newline": True})
......@@ -65,10 +65,9 @@ class TestStatus(object):
# Let's catch all exceptions, since errors here mean a
# crash in avocado.
except Exception as details:
e_msg = ("\nError receiving message from test: %s -> %s" %
(details.__class__, details))
self.job.view.notify(event="error",
msg=e_msg)
log = logging.getLogger("avocado.app")
log.error("\nError receiving message from test: %s -> %s",
details.__class__, details)
stacktrace.log_exc_info(sys.exc_info(),
'avocado.app.tracebacks')
return None
......@@ -148,7 +147,7 @@ class TestStatus(object):
if msg['paused']:
reason = msg['paused_msg']
if reason:
self.job.view.notify(event='partial', msg=reason)
self.job.log.warning(reason)
else: # test_status
self.status = msg
......@@ -371,17 +370,16 @@ class TestRunner(object):
ctrl_c_count += 1
if ctrl_c_count == 1:
if not stage_1_msg_displayed:
k_msg_1 = ('\nInterrupt requested. Waiting %d seconds '
'for test to finish '
'(ignoring new Ctrl+C until then)' %
ignore_window)
self.job.view.notify(event='message', msg=k_msg_1)
self.job.log.debug("\nInterrupt requested. Waiting %d "
"seconds for test to finish "
"(ignoring new Ctrl+C until then)",
ignore_window)
stage_1_msg_displayed = True
ignore_time_started = time.time()
if (ctrl_c_count > 1) and (time_elapsed > ignore_window):
if not stage_2_msg_displayed:
k_msg_2 = "Killing test subprocess %s" % proc.pid
self.job.view.notify(event='message', msg=k_msg_2)
self.job.log.debug("Killing test subprocess %s",
proc.pid)
stage_2_msg_displayed = True
os.kill(proc.pid, signal.SIGKILL)
......@@ -394,7 +392,7 @@ class TestRunner(object):
# don't process other tests from the list
if ctrl_c_count > 0:
self.job.view.notify(event='minor', msg='')
self.job.log.debug('')
self.result.check_test(test_state)
if not status.mapping[test_state['status']]:
......
......@@ -15,9 +15,9 @@
"""xUnit module."""
import datetime
import logging
from xml.sax.saxutils import quoteattr
from . import output
from .result import TestResult
......@@ -166,7 +166,7 @@ class xUnitTestResult(TestResult):
self.output = force_xunit_file
else:
self.output = getattr(self.args, 'xunit_output', '-')
self.stream = output.View(app_args=self.args)
self.log = logging.getLogger("avocado.app")
self.xml = XmlResult()
def start_tests(self):
......@@ -212,7 +212,7 @@ class xUnitTestResult(TestResult):
self.xml.end_testsuite(**values)
contents = self.xml.get_contents()
if self.output == '-':
self.stream.notify(event='minor', msg=contents)
self.log.debug(contents)
else:
with open(self.output, 'w') as xunit_output:
xunit_output.write(contents)
......@@ -12,7 +12,8 @@
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
from avocado.core import output
import logging
from avocado.core import data_dir
from avocado.core.settings import settings
......@@ -38,41 +39,37 @@ class Config(CLICmd):
'Current: %(default)s')
def run(self, args):
view = output.View(use_paginator=(args.paginator == 'on'))
try:
view.notify(event="message", msg='Config files read (in order):')
for cfg_path in settings.config_paths:
view.notify(event="message", msg=' %s' % cfg_path)
if settings.config_paths_failed:
view.notify(event="minor", msg='')
view.notify(event="error", msg='Config files that failed to read:')
for cfg_path in settings.config_paths_failed:
view.notify(event="error", msg=' %s' % cfg_path)
view.notify(event="minor", msg='')
if not args.datadir:
blength = 0
for section in settings.config.sections():
for value in settings.config.items(section):
clength = len('%s.%s' % (section, value[0]))
if clength > blength:
blength = clength
log = logging.getLogger("avocado.app")
log.info('Config files read (in order):')
for cfg_path in settings.config_paths:
log.debug(' %s' % cfg_path)
if settings.config_paths_failed:
log.error('\nConfig files that failed to read:')
for cfg_path in settings.config_paths_failed:
log.error(' %s' % cfg_path)
log.debug("")
if not args.datadir:
blength = 0
for section in settings.config.sections():
for value in settings.config.items(section):
clength = len('%s.%s' % (section, value[0]))
if clength > blength:
blength = clength
format_str = " %-" + str(blength) + "s %s"
format_str = " %-" + str(blength) + "s %s"
view.notify(event="minor", msg=format_str % ('Section.Key', 'Value'))
for section in settings.config.sections():
for value in settings.config.items(section):
config_key = ".".join((section, value[0]))
view.notify(event="minor", msg=format_str % (config_key, value[1]))
else:
view.notify(event="minor", msg="Avocado replaces config dirs that can't be accessed")
view.notify(event="minor", msg="with sensible defaults. Please edit your local config")
view.notify(event="minor", msg="file to customize values")
view.notify(event="message", msg='')
view.notify(event="message", msg='Avocado Data Directories:')
view.notify(event="minor", msg=' base ' + data_dir.get_base_dir())
view.notify(event="minor", msg=' tests ' + data_dir.get_test_dir())
view.notify(event="minor", msg=' data ' + data_dir.get_data_dir())
view.notify(event="minor", msg=' logs ' + data_dir.get_logs_dir())
finally:
view.cleanup()
log.debug(format_str, 'Section.Key', 'Value')
for section in settings.config.sections():
for value in settings.config.items(section):
config_key = ".".join((section, value[0]))
log.debug(format_str, config_key, value[1])
else:
log.debug("Avocado replaces config dirs that can't be accessed")
log.debug("with sensible defaults. Please edit your local config")
log.debug("file to customize values")
log.debug('')
log.info('Avocado Data Directories:')
log.debug(' base ' + data_dir.get_base_dir())
log.debug(' tests ' + data_dir.get_test_dir())
log.debug(' data ' + data_dir.get_data_dir())
log.debug(' logs ' + data_dir.get_logs_dir())
......@@ -14,10 +14,10 @@
import bz2
import json
import logging
import os
import sys
from avocado.core import output
from avocado.core import exit_codes
from avocado.utils import distro as utils_distro
from avocado.utils import path as utils_path
......@@ -341,25 +341,23 @@ class Distro(CLICmd):
args.distro_def_arch)
def run(self, args):
view = output.View()
log = logging.getLogger("avocado.app")
if args.distro_def_create:
if not (args.distro_def_name and args.distro_def_version and
args.distro_def_arch and args.distro_def_type and
args.distro_def_path):
error_msg = ('Required arguments: name, version, arch, type '
'and path')
view.notify(event="error", msg=error_msg)
log.error('Required arguments: name, version, arch, type '
'and path')
sys.exit(exit_codes.AVOCADO_FAIL)
output_file_name = self.get_output_file_name(args)
if os.path.exists(output_file_name):
error_msg = ('Output file "%s" already exists, will not '
'overwrite it' % output_file_name)
view.notify(event="error", msg=error_msg)
'overwrite it', output_file_name)
log.error(error_msg)
else:
view.notify(event="message",
msg=("Loading distro information from tree... "
"Please wait..."))
log.debug("Loading distro information from tree... "
"Please wait...")
distro = load_from_tree(args.distro_def_name,
args.distro_def_version,
args.distro_def_release,
......@@ -367,14 +365,10 @@ class Distro(CLICmd):
args.distro_def_type,
args.distro_def_path)
save_distro(distro, output_file_name)
view.notify(event="message",
msg=('Distro information saved '
'to "%s"' % output_file_name))
log.debug('Distro information saved to "%s"',
output_file_name)
else:
detected = utils_distro.detect()
msg = 'Detected distribution: %s (%s) version %s release %s' % (
detected.name,
detected.arch,
detected.version,
detected.release)
view.notify(event="message", msg=msg)
log.debug('Detected distribution: %s (%s) version %s release %s',
detected.name, detected.arch, detected.version,
detected.release)
......@@ -14,10 +14,11 @@
Libexec PATHs modifier
"""
import logging
import os
import sys
from avocado.core import exit_codes, output
from avocado.core import exit_codes
from .base import CLICmd
......@@ -37,24 +38,21 @@ class ExecPath(CLICmd):
:param args: Command line args received from the run subparser.
"""
self.view = output.View(app_args=args, use_paginator=False)
log = logging.getLogger("avocado.app")
if 'VIRTUAL_ENV' in os.environ:
self.view.notify(event='minor', msg='libexec')
log.debug('libexec')
elif os.path.exists('/usr/libexec/avocado'):
self.view.notify(event='minor', msg='/usr/libexec/avocado')
log.debug('/usr/libexec/avocado')
elif os.path.exists('/usr/lib/avocado'):
self.view.notify(event='minor', msg='/usr/lib/avocado')
log.debug('/usr/lib/avocado')
else:
for path in os.environ.get('PATH').split(':'):
if (os.path.exists(os.path.join(path, 'avocado')) and
os.path.exists(os.path.join(os.path.dirname(path),
'libexec'))):
self.view.notify(event='minor',
msg=os.path.join(os.path.dirname(path),
'libexec'))
log.debug(os.path.join(os.path.dirname(path), 'libexec'))
break
else:
self.view.notify(event='error',
msg="Can't locate avocado libexec path")
log.error("Can't locate avocado libexec path")
sys.exit(exit_codes.AVOCADO_FAIL)
return sys.exit(exit_codes.AVOCADO_ALL_OK)
......@@ -15,10 +15,10 @@
HTML output module.
"""
import logging
import sys
from avocado.core import exit_codes
from avocado.core import output
from avocado.core.html import HTMLTestResult
from avocado.core.result import register_test_result_class
......@@ -59,11 +59,9 @@ class HTML(CLI):
def run(self, args):
if 'html_output' in args and args.html_output == '-':
view = output.View(app_args=args)
view.notify(event='error',
msg='HTML to stdout not supported '
'(not all HTML resources can be embedded '
'on a single file)')
log = logging.getLogger("avocado.app")
log.error('HTML to stdout not supported (not all HTML resources '
'can be embedded on a single file)')
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
if 'html_output' in args and args.html_output is not None:
......
......@@ -12,6 +12,7 @@
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import logging
import sys
from avocado.core import exit_codes, output
......@@ -29,8 +30,7 @@ class TestLister(object):
"""
def __init__(self, args):
use_paginator = args.paginator == 'on'
self.view = output.View(app_args=args, use_paginator=use_paginator)
self.log = logging.getLogger("avocado.app")
try:
loader.loader.load_plugins(args)
except loader.LoaderError as details:
......@@ -48,8 +48,7 @@ class TestLister(object):
return loader.loader.discover(paths,
which_tests=which_tests)
except loader.LoaderUnhandledUrlError as details:
self.view.notify(event="error", msg=str(details))
self.view.cleanup()
self.log.error(str(details))
sys.exit(exit_codes.AVOCADO_FAIL)
def _get_test_matrix(self, test_suite):
......@@ -109,12 +108,12 @@ class TestLister(object):
output.term_support.header_str('Test'))
for line in astring.iter_tabular_output(test_matrix, header=header):
self.view.notify(event='minor', msg="%s" % line)
self.log.debug(line)
if self.args.verbose:
self.view.notify(event='minor', msg='')
self.log.debug("")
for key in sorted(stats):
self.view.notify(event='message', msg=("%s: %s" % (key.upper(), stats[key])))
self.log.info("%s: %s", key.upper(), stats[key])
def _list(self):
self._extra_listing()
......@@ -123,20 +122,11 @@ class TestLister(object):
self._display(test_matrix, stats)
def list(self):
rc = 0
try:
self._list()
except KeyboardInterrupt:
rc = exit_codes.AVOCADO_FAIL
msg = 'Command interrupted by user...'
if self.view is not None:
self.view.notify(event='error', msg=msg)
else:
sys.stderr.write(msg)
finally:
if self.view:
self.view.cleanup()
return rc
self.log.error('Command interrupted by user...')
return exit_codes.AVOCADO_FAIL
class List(CLICmd):
......
......@@ -12,6 +12,7 @@
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import logging
import sys
from avocado.core import exit_codes, output
......@@ -83,22 +84,21 @@ class Multiplex(CLICmd):
def run(self, args):
self._activate(args)
view = output.View(app_args=args)
log = logging.getLogger("avocado.app")
err = None
if args.tree and args.debug:
err = "Option --tree is incompatible with --debug."
elif not args.tree and args.inherit:
err = "Option --inherit can be only used with --tree"
if err:
view.notify(event="error", msg=err)
log.error(err)
sys.exit(exit_codes.AVOCADO_FAIL)
try:
mux_tree = multiplexer.yaml2tree(args.multiplex_files,
args.filter_only, args.filter_out,
args.debug)
except IOError as details:
view.notify(event='error',
msg=details.strerror)
log.error(details.strerror)
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
if args.system_wide:
mux_tree.merge(args.default_multiplex_tree)
......@@ -112,12 +112,11 @@ class Multiplex(CLICmd):
verbose += 2
use_utf8 = settings.get_value("runner.output", "utf8",
key_type=bool, default=None)
view.notify(event='minor', msg=tree.tree_view(mux_tree, verbose,
use_utf8))
log.debug(tree.tree_view(mux_tree, verbose, use_utf8))
sys.exit(exit_codes.AVOCADO_ALL_OK)
variants = multiplexer.MuxTree(mux_tree)
view.notify(event='message', msg='Variants generated:')
log.info('Variants generated:')
for (index, tpl) in enumerate(variants):
if not args.debug:
paths = ', '.join([x.path for x in tpl])
......@@ -129,8 +128,8 @@ class Multiplex(CLICmd):
"Unknown"),
cend)
for _ in tpl])
view.notify(event='minor', msg='%sVariant %s: %s' %
(('\n' if args.contents else ''), index + 1, paths))
log.debug('%sVariant %s: %s', '\n' if args.contents else '',
index + 1, paths)
if args.contents:
env = set()
for node in tpl:
......@@ -141,6 +140,6 @@ class Multiplex(CLICmd):
continue
fmt = ' %%-%ds => %%s' % max([len(_[0]) for _ in env])
for record in sorted(env):
view.notify(event='minor', msg=fmt % record)
log.debug(fmt, *record)
sys.exit(exit_codes.AVOCADO_ALL_OK)
......@@ -15,7 +15,9 @@
Plugins information plugin
"""
from avocado.core import dispatcher, output
import logging
from avocado.core import dispatcher
from avocado.utils import astring
from .base import CLICmd
......@@ -38,25 +40,23 @@ class Plugins(CLICmd):
'Current: %(default)s')
def run(self, args):
view = output.View(app_args=args,
use_paginator=args.paginator == 'on')
log = logging.getLogger("avocado.app")
cli_cmds = dispatcher.CLICmdDispatcher()
msg = 'Plugins that add new commands (avocado.plugins.cli.cmd):'
view.notify(event='message', msg=msg)
log.info(msg)
plugin_matrix = []
for plugin in sorted(cli_cmds):
plugin_matrix.append((plugin.name, plugin.obj.description))
for line in astring.iter_tabular_output(plugin_matrix):
view.notify(event='minor', msg=line)
log.debug(line)
msg = 'Plugins that add new options to commands (avocado.plugins.cli):'
cli = dispatcher.CLIDispatcher()
view.notify(event='message', msg=msg)
log.info(msg)
plugin_matrix = []
for plugin in sorted(cli):
plugin_matrix.append((plugin.name, plugin.obj.description))
for line in astring.iter_tabular_output(plugin_matrix):
view.notify(event='minor', msg=line)
log.debug(line)
......@@ -15,10 +15,10 @@
"""Run tests on a remote machine."""
import getpass
import logging
import sys
from avocado.core import exit_codes
from avocado.core import output
from avocado.core import remoter
from avocado.core.remote import RemoteTestResult
from avocado.core.remote import RemoteTestRunner
......@@ -91,12 +91,11 @@ class Remote(CLI):
if not getattr(args, arg):
missing.append(arg)
if missing:
view = output.View(app_args=args)
e_msg = ('Use of %s requires %s arguments to be set. Please set %s'
'.' % (enable_arg, ', '.join(required_args),
', '.join(missing)))
log = logging.getLogger("avocado.app")
log.error("Use of %s requires %s arguments to be set. Please set "
"%s.", enable_arg, ', '.join(required_args),
', '.join(missing))
view.notify(event='error', msg=e_msg)
return sys.exit(exit_codes.AVOCADO_FAIL)
return True
......
......@@ -13,6 +13,7 @@
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import argparse
import logging
import os
import sys
......@@ -20,7 +21,6 @@ from .base import CLI
from avocado.core import replay
from avocado.core import status
from avocado.core import exit_codes
from avocado.core import output
from avocado.core.settings import settings
......@@ -95,7 +95,7 @@ class Replay(CLI):
if getattr(args, 'replay_jobid', None) is None:
return
view = output.View()
log = logging.getLogger("avocado.app")
err = None
if args.replay_teststatus and args.multiplex_files:
......@@ -107,7 +107,7 @@ class Replay(CLI):
elif args.remote_hostname:
err = "Currently we don't replay jobs in remote hosts."
if err is not None:
view.notify(event="error", msg=err)
log.error(err)
sys.exit(exit_codes.AVOCADO_FAIL)
if args.replay_datadir is not None:
......@@ -115,12 +115,11 @@ class Replay(CLI):
else:
logs_dir = settings.get_value('datadir.paths', 'logs_dir',
default=None)
self.logdir = os.path.expanduser(logs_dir)
resultsdir = replay.get_resultsdir(self.logdir, args.replay_jobid)
logdir = os.path.expanduser(logs_dir)
resultsdir = replay.get_resultsdir(logdir, args.replay_jobid)
if resultsdir is None:
msg = "Can't find job results directory in '%s'" % self.logdir
view.notify(event='error', msg=(msg))
log.error("Can't find job results directory in '%s'", logdir)
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
sourcejob = replay.get_id(os.path.join(resultsdir, 'id'),
......@@ -128,47 +127,42 @@ class Replay(CLI):
if sourcejob is None:
msg = ("Can't find matching job id '%s' in '%s' directory."
% (args.replay_jobid, resultsdir))
view.notify(event='error', msg=(msg))
log.error(msg)
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
setattr(args, 'replay_sourcejob', sourcejob)
if getattr(args, 'url', None):
msg = ('Overriding the replay urls with urls provided in '
'command line.')
view.notify(event='warning', msg=(msg))
log.warn('Overriding the replay urls with urls provided in '
'command line.')
else:
urls = replay.retrieve_urls(resultsdir)
if urls is None:
msg = 'Source job urls data not found. Aborting.'
view.notify(event='error', msg=(msg))
log.error('Source job urls data not found. Aborting.')
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
else:
setattr(args, 'url', urls)
if args.replay_ignore and 'config' in args.replay_ignore:
msg = ("Ignoring configuration from source job with "
"--replay-ignore.")
view.notify(event='warning', msg=(msg))
log.warn("Ignoring configuration from source job with "
"--replay-ignore.")
else:
self.load_config(resultsdir)
if args.replay_ignore and 'mux' in args.replay_ignore:
msg = "Ignoring multiplex from source job with --replay-ignore."
view.notify(event='warning', msg=(msg))
log.warn("Ignoring multiplex from source job with "
"--replay-ignore.")
else:
if getattr(args, 'multiplex_files', None) is not None:
msg = ('Overriding the replay multiplex with '
'--multiplex-files.')
view.notify(event='warning', msg=(msg))
log.warn('Overriding the replay multiplex with '
'--multiplex-file.')
# Use absolute paths to avoid problems with os.chdir
args.multiplex_files = [os.path.abspath(_)
for _ in args.multiplex_files]
else:
mux = replay.retrieve_mux(resultsdir)
if mux is None:
msg = 'Source job multiplex data not found. Aborting.'
view.notify(event='error', msg=(msg))
log.error('Source job multiplex data not found. Aborting.')
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
else:
setattr(args, "multiplex_files", mux)
......@@ -184,6 +178,5 @@ class Replay(CLI):
if os.path.exists(pwd):
os.chdir(pwd)
else:
view.notify(event="warning", msg="Directory used in the replay"
" source job '%s' does not exist, using '.' "
"instead" % pwd)
log.warn("Directory used in the replay source job '%s' does "
"not exist, using '.' instead", pwd)
......@@ -16,10 +16,10 @@
Base Test Runner Plugins.
"""
import logging
import sys
from avocado.core import exit_codes
from avocado.core import output
from avocado.core import job
from avocado.core import loader
from avocado.core import multiplexer
......@@ -164,10 +164,9 @@ class Run(CLICmd):
if timeout < 1:
raise ValueError()
except (ValueError, TypeError):
self.view.notify(
event='error',
msg=("Invalid number '%s' for job timeout. "
"Use an integer number greater than 0") % raw_timeout)
log = logging.getLogger("avocado.app")
log.error("Invalid number '%s' for job timeout. Use an "
"integer number greater than 0", raw_timeout)
sys.exit(exit_codes.AVOCADO_FAIL)
else:
timeout = 0
......@@ -180,14 +179,14 @@ class Run(CLICmd):
:param args: Command line args received from the run subparser.
"""
self._activate(args)
self.view = output.View(app_args=args)
if args.unique_job_id is not None:
try:
int(args.unique_job_id, 16)
if len(args.unique_job_id) != 40:
raise ValueError
except ValueError:
self.view.notify(event='error', msg='Unique Job ID needs to be a 40 digit hex number')
log = logging.getLogger("avocado.app")
log.error('Unique Job ID needs to be a 40 digit hex number')
sys.exit(exit_codes.AVOCADO_FAIL)
args.job_timeout = self._validate_job_timeout(args.job_timeout)
job_instance = job.Job(args)
......
......@@ -15,10 +15,10 @@
"""Run tests on Virtual Machine."""
import getpass
import logging
import sys
from avocado.core import exit_codes
from avocado.core import output
from avocado.core import virt
from avocado.core.remote import VMTestResult
from avocado.core.remote import VMTestRunner
......@@ -95,12 +95,11 @@ class VM(CLI):
if not getattr(args, arg):
missing.append(arg)
if missing:
view = output.View(app_args=args)
e_msg = ('Use of %s requires %s arguments to be set. Please set %s'
'.' % (enable_arg, ', '.join(required_args),
', '.join(missing)))
log = logging.getLogger("avocado.app")
log.error("Use of %s requires %s arguments to be set. Please set "
"%s.", enable_arg, ', '.join(required_args),
', '.join(missing))
view.notify(event='error', msg=e_msg)
return sys.exit(exit_codes.AVOCADO_FAIL)
return True
......
......@@ -12,11 +12,11 @@
# Copyright: Red Hat Inc. 2014
# Author: Ruda Moura <rmoura@redhat.com>
import logging
import os
import sys
from avocado.core import exit_codes
from avocado.core import output
from avocado.utils import process
from .base import CLI
......@@ -51,11 +51,10 @@ class Wrapper(CLI):
def run(self, args):
wraps = getattr(args, "wrapper", None)
if wraps:
view = output.View(app_args=args)
log = logging.getLogger("avocado.app")
if getattr(args, 'gdb_run_bin', None):
view.notify(event='error',
msg='Command line option --wrapper is incompatible'
' with option --gdb-run-bin.')
log.error('Command line option --wrapper is incompatible'
' with option --gdb-run-bin.\n%s', args.wrapper)
sys.exit(exit_codes.AVOCADO_FAIL)
for wrap in args.wrapper:
......@@ -64,15 +63,13 @@ class Wrapper(CLI):
script = os.path.abspath(wrap)
process.WRAP_PROCESS = os.path.abspath(script)
else:
view.notify(event='error',
msg="You can't have multiple global"
" wrappers at once.")
log.error("You can't have multiple global "
"wrappers at once.")
sys.exit(exit_codes.AVOCADO_FAIL)
else:
script, cmd = wrap.split(':', 1)
script = os.path.abspath(script)
process.WRAP_PROCESS_NAMES_EXPR.append((script, cmd))
if not os.path.exists(script):
view.notify(event='error',
msg="Wrapper '%s' not found!" % script)
log.error("Wrapper '%s' not found!", script)
sys.exit(exit_codes.AVOCADO_FAIL)
......@@ -88,7 +88,7 @@ class ReplayTests(unittest.TestCase):
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Ignoring multiplex from source job with --replay-ignore.'
self.assertIn(msg, result.stdout)
self.assertIn(msg, result.stderr)
def test_run_replay_invalidstatus(self):
cmd_line = ('./scripts/avocado run --replay %s --replay-test-status E '
......
from flexmock import flexmock
import unittest
import os
import json
......@@ -11,25 +10,10 @@ from avocado.core import jsonresult
from avocado.core import job
class _Stream(object):
class FakeJob(object):
def start_job_logging(self, param1, param2):
pass
def stop_job_logging(self):
pass
def set_tests_info(self, info):
pass
def notify(self, event, msg):
pass
def add_test(self, state):
pass
def set_test_status(self, status, state):
pass
def __init__(self, args):
self.args = args
class JSONResultTest(unittest.TestCase):
......@@ -44,10 +28,7 @@ class JSONResultTest(unittest.TestCase):
self.tmpfile = tempfile.mkstemp()
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
args = argparse.Namespace(json_output=self.tmpfile[1])
stream = _Stream()
stream.logfile = 'debug.log'
dummyjob = flexmock(view=stream, args=args)
self.test_result = jsonresult.JSONTestResult(dummyjob)
self.test_result = jsonresult.JSONTestResult(FakeJob(args))
self.test_result.filename = self.tmpfile[1]
self.test_result.start_tests()
self.test1 = SimpleTest(job=job.Job(), base_logdir=self.tmpdir)
......
......@@ -5,7 +5,6 @@ import os
from flexmock import flexmock, flexmock_teardown
from avocado.core import output
from avocado.core import remoter
from avocado.core import remote
from avocado.utils import archive
......@@ -30,9 +29,6 @@ class RemoteTestRunnerTest(unittest.TestCase):
""" Tests RemoteTestRunner """
def setUp(self):
View = flexmock(output.View)
view = output.View()
view.should_receive('notify')
Args = flexmock(test_result_total=1,
remote_username='username',
remote_hostname='hostname',
......@@ -43,7 +39,9 @@ class RemoteTestRunnerTest(unittest.TestCase):
show_job_log=False,
multiplex_files=['foo.yaml', 'bar/baz.yaml'],
dry_run=True)
job = flexmock(args=Args, view=view,
log = flexmock()
log.should_receive("info")
job = flexmock(args=Args, log=log,
urls=['/tests/sleeptest', '/tests/other/test',
'passtest'], unique_id='sleeptest.1',
logdir="/local/path")
......@@ -154,9 +152,6 @@ class RemoteTestRunnerSetup(unittest.TestCase):
def setUp(self):
Remote = flexmock()
View = flexmock(output.View)
view = output.View()
view.should_receive('notify')
remote_remote = flexmock(remoter)
(remote_remote.should_receive('Remote')
.with_args('hostname', 'username', 'password', 22, 60)
......@@ -172,7 +167,9 @@ class RemoteTestRunnerSetup(unittest.TestCase):
remote_no_copy=False,
remote_timeout=60,
show_job_log=False)
job = flexmock(args=Args, view=view)
log = flexmock()
log.should_receive("info")
job = flexmock(args=Args, log=log)
self.runner = remote.RemoteTestRunner(job, None)
def tearDown(self):
......
......@@ -6,7 +6,6 @@ from flexmock import flexmock, flexmock_teardown
from avocado.core.remote import VMTestRunner
from avocado.core import virt
from avocado.core import output
JSON_RESULTS = ('Something other than json\n'
'{"tests": [{"test": "sleeptest.1", "url": "sleeptest", '
......@@ -22,9 +21,6 @@ class VMTestRunnerSetup(unittest.TestCase):
""" Tests the VMTestRunner setup() method """
def setUp(self):
View = flexmock(output.View)
view = output.View()
view.should_receive('notify')
mock_vm = flexmock(snapshot=True,
domain=flexmock(isActive=lambda: True))
flexmock(virt).should_receive('vm_connect').and_return(mock_vm).once().ordered()
......@@ -43,7 +39,9 @@ class VMTestRunnerSetup(unittest.TestCase):
vm_no_copy=False,
vm_timeout=120,
vm_hypervisor_uri='my_hypervisor_uri')
job = flexmock(args=Args, view=view)
log = flexmock()
log.should_receive("info")
job = flexmock(args=Args, log=log)
self.runner = VMTestRunner(job, None)
mock_vm.should_receive('stop').once().ordered()
mock_vm.should_receive('restore_snapshot').once().ordered()
......
from flexmock import flexmock
import argparse
import unittest
import os
......@@ -15,25 +14,10 @@ class ParseXMLError(Exception):
pass
class _Stream(object):
class FakeJob(object):
def start_job_logging(self, param1, param2):
pass
def stop_job_logging(self):
pass
def set_tests_info(self, info):
pass
def notify(self, event, msg):
pass
def add_test(self, state):
pass
def set_test_status(self, status, state):
pass
def __init__(self, args):
self.args = args
class xUnitSucceedTest(unittest.TestCase):
......@@ -49,8 +33,7 @@ class xUnitSucceedTest(unittest.TestCase):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
args = argparse.Namespace()
args.xunit_output = self.tmpfile[1]
dummy_job = flexmock(view=_Stream(), args=args)
self.test_result = xunit.xUnitTestResult(dummy_job)
self.test_result = xunit.xUnitTestResult(FakeJob(args))
self.test_result.start_tests()
self.test1 = SimpleTest(job=job.Job(), base_logdir=self.tmpdir)
self.test1.status = 'PASS'
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册