diff --git a/avocado/__init__.py b/avocado/__init__.py index 493df6fe827153b9624f37a279550fd7295b68cf..e6ffc5afec3cde76aa256dc9671a5ba8f3c388f9 100644 --- a/avocado/__init__.py +++ b/avocado/__init__.py @@ -36,13 +36,18 @@ DEFAULT_LOGGING = { 'class': 'logging.StreamHandler', 'formatter': 'brief', }, + 'app': { + 'level': 'INFO', + 'class': 'avocado.core.output.ProgressStreamHandler', + 'formatter': 'brief', + }, }, 'loggers': { 'avocado': { 'handlers': ['console'], }, 'avocado.app': { - 'handlers': ['console'], + 'handlers': ['app'], 'level': 'INFO', 'propagate': False, }, diff --git a/avocado/core/output.py b/avocado/core/output.py index a4f19c644ece310db1180c8e6d050de9fd3de402..b57d381346be3a5333bb7bf6deb401f4c945ef32 100644 --- a/avocado/core/output.py +++ b/avocado/core/output.py @@ -22,6 +22,29 @@ import sys from avocado.utils import process +class ProgressStreamHandler(logging.StreamHandler): + + """ + Handler class that allows users to skip new lines on each emission. + """ + + def emit(self, record): + try: + msg = self.format(record) + stream = self.stream + skip_newline = False + if hasattr(record, 'skip_newline'): + skip_newline = record.skip_newline + stream.write(msg) + if not skip_newline: + stream.write('\n') + self.flush() + except (KeyboardInterrupt, SystemExit): + raise + except: + self.handleError(record) + + def get_paginator(): """ Get a pipe. If we can't do that, return stdout. @@ -95,21 +118,21 @@ class TermColors(object): self.WARN = '' self.ENDC = '' - def header_str(self, sr): + def header_str(self, msg): """ Print a header string (blue colored). If the output does not support colors, just return the original string. """ - return self.HEADER + sr + self.ENDC + return self.HEADER + msg + self.ENDC - def fail_header_str(self, sr): + def fail_header_str(self, msg): """ Print a fail header string (red colored). If the output does not support colors, just return the original string. """ - return self.FAIL + sr + self.ENDC + return self.FAIL + msg + self.ENDC def pass_str(self): """ @@ -164,13 +187,15 @@ class OutputManager(object): def __init__(self, logger_name='avocado.app'): self.console_log = logging.getLogger('avocado.app') - def _log(self, sr, level=logging.INFO): + def _log(self, msg, level=logging.INFO, skip_newline=False): """ Write a message to the avocado.app logger. - :param sr: String to write. + :param msg: Message to write + :type msg: string """ - self.console_log.log(level, sr) + extra = {'skip_newline': skip_newline} + self.console_log.log(level=level, msg=msg, extra=extra) def start_file_logging(self, logfile, level): """ @@ -205,88 +230,79 @@ class OutputManager(object): linux_logger.removeHandler(self.file_handler) self.file_handler.close() - def info(self, sr): + def info(self, msg, skip_newline=False): """ Log a :mod:`logging.INFO` message. - :param sr: String to write. + :param msg: Message to write. """ - self._log(sr, level=logging.INFO) + self._log(msg, level=logging.INFO, skip_newline=skip_newline) - def error(self, sr): + def error(self, msg): """ Log a :mod:`logging.INFO` message. - :param sr: String to write. + :param msg: Message to write. """ - self._log(sr, level=logging.ERROR) + self._log(msg, level=logging.ERROR) - def log_header(self, sr): + def log_header(self, msg): """ Log a header message. - :param sr: String to write. + :param msg: Message to write. """ - self.info(colors.header_str(sr)) + self.info(colors.header_str(msg)) - def log_fail_header(self, sr): + def log_fail_header(self, msg): """ Log a fail header message (red, for critical errors). - :param sr: String to write. + :param msg: Message to write. """ - self.info(colors.fail_header_str(sr)) + self.info(colors.fail_header_str(msg)) - def log_pass(self, label, t_elapsed): + def log_pass(self, t_elapsed): """ - Log a test PASS message. + Log a PASS message. - :param label: Label for the PASS message (test name + index). - :param t_elapsed: Time it took for test to complete. + :param t_elapsed: Time it took for the operation to complete. """ - normal_pass_msg = (label + " " + colors.pass_str() + - " (%.2f s)" % t_elapsed) + normal_pass_msg = colors.pass_str() + " (%.2f s)" % t_elapsed self.info(normal_pass_msg) - def log_error(self, label, t_elapsed): + def log_error(self, t_elapsed): """ - Log a test ERROR message. + Log an ERROR message. - :param label: Label for the FAIL message (test name + index). - :param t_elapsed: Time it took for test to complete. + :param t_elapsed: Time it took for the operation to complete. """ - normal_error_msg = (label + " " + colors.error_str() + - " (%.2f s)" % t_elapsed) + normal_error_msg = colors.error_str() + " (%.2f s)" % t_elapsed self.error(normal_error_msg) - def log_fail(self, label, t_elapsed): + def log_fail(self, t_elapsed): """ - Log a test FAIL message. + Log a FAIL message. - :param label: Label for the FAIL message (test name + index). - :param t_elapsed: Time it took for test to complete. + :param t_elapsed: Time it took for the operation to complete. """ - normal_fail_msg = (label + " " + colors.fail_str() + - " (%.2f s)" % t_elapsed) + normal_fail_msg = colors.fail_str() + " (%.2f s)" % t_elapsed self.error(normal_fail_msg) - def log_skip(self, label, t_elapsed): + def log_skip(self, t_elapsed): """ - Log a test SKIP message. + Log a SKIP message. - :param label: Label for the SKIP message (test name + index). - :param t_elapsed: Time it took for test to complete. + :param t_elapsed: Time it took for the operation to complete. """ - normal_skip_msg = (label + " " + colors.skip_str()) + normal_skip_msg = colors.skip_str() self.info(normal_skip_msg) - def log_warn(self, label, t_elapsed): + def log_warn(self, t_elapsed): """ - Log a test WARN message. + Log a WARN message. - :param label: Label for the WARN message (test name + index). - :param t_elapsed: Time it took for test to complete. + :param t_elapsed: Time it took for the operation to complete. """ - normal_warn_msg = (label + " " + colors.warn_str() + - " (%.2f s)" % t_elapsed) + normal_warn_msg = colors.warn_str() + " (%.2f s)" % t_elapsed self.error(normal_warn_msg) diff --git a/avocado/job.py b/avocado/job.py index 42bb3bdb108c3a09f2079f21e93a406af3f98137..298b16a399ad358c4fffbc99fe1ce4c6534c27bf 100644 --- a/avocado/job.py +++ b/avocado/job.py @@ -53,9 +53,9 @@ class TestRunner(object): self.job = job self.result = test_result - def _load_test_instance(self, params): + def load_test(self, params): """ - Find the test url from the first component of the test shortname, and load the url. + Resolve and load the test url from the the test shortname. :param params: Dictionary with test params. :type params: dict @@ -63,7 +63,7 @@ class TestRunner(object): """ shortname = params.get('shortname') url = shortname.split('.')[0] - path_attempt = os.path.abspath(url) + path_attempt = os.path.abspath(shortname) if os.path.exists(path_attempt): test_class = test.DropinTest test_instance = test_class(path=path_attempt, @@ -85,18 +85,6 @@ class TestRunner(object): job=self.job) return test_instance - def run_test(self, params): - """ - Run a single test. - - :param params: Dictionary with test params. - :type params: dict - :return: an instance of :class:`avocado.test.Test`. - """ - test_instance = self._load_test_instance(params) - test_instance.run_avocado() - return test_instance - def run(self, params_list): """ Run one or more tests and report with test result. @@ -108,7 +96,9 @@ class TestRunner(object): failures = [] self.result.start_tests() for params in params_list: - test_instance = self.run_test(params) + test_instance = self.load_test(params) + self.result.start_test(test_instance) + test_instance.run_avocado() self.result.check_test(test_instance) if not status.mapping[test_instance.status]: failures.append(test_instance.name) diff --git a/avocado/plugins/vm.py b/avocado/plugins/vm.py index c640f13af381917c990fc2fd6d2cfc9aad91a381..ba7fc8d6af7ee2ae3c87d53d1679892f964146b2 100644 --- a/avocado/plugins/vm.py +++ b/avocado/plugins/vm.py @@ -184,6 +184,8 @@ class VMTestResult(TestResult): self.tests_total, test.tagged_name) + self.stream.info(msg=self.test_label, skip_newline=True) + def end_test(self, test): """ Called when the given test has been run. @@ -199,7 +201,7 @@ class VMTestResult(TestResult): :param test: :class:`avocado.test.Test` instance. """ TestResult.add_pass(self, test) - self.stream.log_pass(self.test_label, test.time_elapsed) + self.stream.log_pass(test.time_elapsed) def add_error(self, test): """ @@ -208,7 +210,7 @@ class VMTestResult(TestResult): :param test: :class:`avocado.test.Test` instance. """ TestResult.add_error(self, test) - self.stream.log_error(self.test_label, test.time_elapsed) + self.stream.log_error(test.time_elapsed) def add_fail(self, test): """ @@ -217,7 +219,7 @@ class VMTestResult(TestResult): :param test: :class:`avocado.test.Test` instance. """ TestResult.add_fail(self, test) - self.stream.log_fail(self.test_label, test.time_elapsed) + self.stream.log_fail(test.time_elapsed) def add_skip(self, test): """ @@ -226,7 +228,7 @@ class VMTestResult(TestResult): :param test: :class:`avocado.test.Test` instance. """ TestResult.add_skip(self, test) - self.stream.log_skip(self.test_label, test.time_elapsed) + self.stream.log_skip(test.time_elapsed) def add_warn(self, test): """ @@ -235,7 +237,7 @@ class VMTestResult(TestResult): :param test: :class:`avocado.test.Test` instance. """ TestResult.add_warn(self, test) - self.stream.log_warn(self.test_label, test.time_elapsed) + self.stream.log_warn(test.time_elapsed) class RunVM(plugin.Plugin): diff --git a/avocado/result.py b/avocado/result.py index 1c5e684d93a3bc2e95c93c3c17d1e16d1433e50e..072a3da35db163fe1211deb79cbd0d0bd9d7e1e6 100644 --- a/avocado/result.py +++ b/avocado/result.py @@ -13,7 +13,12 @@ # Authors: Lucas Meneghel Rodrigues # Ruda Moura -"""Test result module.""" +""" +Contains the definition of the TestResult class, used for output in avocado. + +It also contains the most basic test result class, HumanTestResult, +used by the test runner. +""" class TestResult(object): @@ -116,7 +121,6 @@ class TestResult(object): :param test: an instance of :class:`avocado.test.Test`. """ - self.start_test(test) status_map = {'PASS': self.add_pass, 'ERROR': self.add_error, 'FAIL': self.add_fail, @@ -161,6 +165,7 @@ class HumanTestResult(TestResult): self.test_label = '(%s/%s) %s: ' % (self.tests_run, self.tests_total, test.tagged_name) + self.stream.info(msg=self.test_label, skip_newline=True) def end_test(self, test): """ @@ -177,7 +182,7 @@ class HumanTestResult(TestResult): :param test: an instance of :class:`avocado.test.Test`. """ TestResult.add_pass(self, test) - self.stream.log_pass(self.test_label, test.time_elapsed) + self.stream.log_pass(test.time_elapsed) def add_error(self, test): """ @@ -186,7 +191,7 @@ class HumanTestResult(TestResult): :param test: an instance of :class:`avocado.test.Test`. """ TestResult.add_error(self, test) - self.stream.log_error(self.test_label, test.time_elapsed) + self.stream.log_error(test.time_elapsed) def add_fail(self, test): """ @@ -195,7 +200,7 @@ class HumanTestResult(TestResult): :param test: an instance of :class:`avocado.test.Test`. """ TestResult.add_fail(self, test) - self.stream.log_fail(self.test_label, test.time_elapsed) + self.stream.log_fail(test.time_elapsed) def add_skip(self, test): """ @@ -204,7 +209,7 @@ class HumanTestResult(TestResult): :param test: an instance of :class:`avocado.test.Test`. """ TestResult.add_skip(self, test) - self.stream.log_skip(self.test_label, test.time_elapsed) + self.stream.log_skip(test.time_elapsed) def add_warn(self, test): """ @@ -213,4 +218,4 @@ class HumanTestResult(TestResult): :param test: an instance of :class:`avocado.test.Test`. """ TestResult.add_warn(self, test) - self.stream.log_warn(self.test_label, test.time_elapsed) + self.stream.log_warn(test.time_elapsed) diff --git a/avocado/test.py b/avocado/test.py index 91de0eed05bd312c86458c4c9bf93ad9c14bdc8f..b5dd8cfe26225467607937d48db22c4d5af8a32b 100644 --- a/avocado/test.py +++ b/avocado/test.py @@ -30,6 +30,40 @@ from avocado.utils import process from avocado.utils.params import Params from avocado import sysinfo +log = logging.getLogger("avocado.test") + + +def tb_info(exc_info): + """ + Prepare traceback info. + + :param exc_info: Exception info produced by sys.exc_info() + """ + exc_type, exc_value, exc_traceback = exc_info + tb_info = traceback.format_exception(exc_type, exc_value, + exc_traceback.tb_next) + return tb_info + + +def log_exc_info(exc_info): + """ + Log exception info. + + :param exc_info: Exception info produced by sys.exc_info() + """ + for line in tb_info(exc_info): + for l in line.splitlines(): + log.error(l) + + +def prepare_exc_info(exc_info): + """ + Prepare traceback info. + + :param exc_info: Exception info produced by sys.exc_info() + """ + return "".join(tb_info(exc_info)) + class Test(unittest.TestCase): @@ -239,11 +273,13 @@ class Test(unittest.TestCase): try: self.setup() except Exception, details: + log_exc_info(sys.exc_info()) raise exceptions.TestSetupFail(details) self.action() try: self.cleanup() except Exception, details: + log_exc_info(sys.exc_info()) raise exceptions.TestSetupFail(details) self.status = 'PASS' @@ -260,18 +296,12 @@ class Test(unittest.TestCase): self.status = detail.status self.fail_class = detail.__class__.__name__ self.fail_reason = detail - exc_type, exc_value, exc_traceback = sys.exc_info() - tb_info = traceback.format_exception(exc_type, exc_value, - exc_traceback.tb_next) - self.traceback = "".join(tb_info) + self.traceback = prepare_exc_info(sys.exc_info()) except AssertionError, detail: self.status = 'FAIL' self.fail_class = detail.__class__.__name__ self.fail_reason = detail - exc_type, exc_value, exc_traceback = sys.exc_info() - tb_info = traceback.format_exception(exc_type, exc_value, - exc_traceback.tb_next) - self.traceback = "".join(tb_info) + self.traceback = prepare_exc_info(sys.exc_info()) except Exception, detail: self.status = 'FAIL' self.fail_class = detail.__class__.__name__ diff --git a/avocado/utils/archive.py b/avocado/utils/archive.py index 3e21eb911d541529c1f336038fa0f5ac914e53d5..9bebf7c175f72b37850ab49cb0e93192abbfa8ee 100644 --- a/avocado/utils/archive.py +++ b/avocado/utils/archive.py @@ -26,11 +26,14 @@ """ Library used to transparently uncompress compressed files. """ +import logging import os import shutil import tarfile import zipfile +log = logging.getLogger('avocado.test') + class ArchiveException(Exception): @@ -172,14 +175,19 @@ class TarArchive(BaseArchive): except (KeyError, AttributeError) as exc: # Some corrupt tar files seem to produce this # (specifically bad symlinks) - print("In the tar file %s the member %s is invalid: %s" % - (name, member.name, exc)) + log.error("In the tar file %s the member %s is " + "invalid: %s" % (name, member.name, exc)) else: dirname = os.path.dirname(filename) if dirname and not os.path.exists(dirname): os.makedirs(dirname) with open(filename, 'wb') as outfile: - shutil.copyfileobj(extracted, outfile) + if extracted is not None: + shutil.copyfileobj(extracted, outfile) + else: + log.error("Member correspondent to file %s does " + "not seem to be a regular file or a link", + filename) finally: if extracted: extracted.close() diff --git a/avocado/utils/process.py b/avocado/utils/process.py index c4f95cf8b058f985195e4817b76797832227617b..529d73e441d7fc4a6eaa9f09c4e71e629332626c 100644 --- a/avocado/utils/process.py +++ b/avocado/utils/process.py @@ -18,9 +18,12 @@ Functions dedicated to find and run external commands. import logging import os -import subprocess +import StringIO +import signal import shlex +import subprocess import time +import threading from avocado.core import exceptions from avocado.utils import misc @@ -100,70 +103,257 @@ class CmdResult(object): self.duration, self.stdout, self.stderr)) -def run(cmd, verbose=True, ignore_status=False): - """ - Run a subprocess. +class SubProcess(object): - This is a light, yet compatible implementation of - :mod:`autotest.client.shared.utils.run`, so we can run commands in tests and - other avocado programs, if need to be. + """ + Run a subprocess in the background, collecting stdout/stderr streams. + """ - :param verbose: Wether to print the command run. - :param ignore_status: Wether to raise an exception when command returns + def __init__(self, cmd, verbose=True): + """ + Creates the subprocess object, stdout/err, reader threads and locks. + + :param cmd: Command line to run. + :type cmd: str + :param verbose: Whether to log the command run and stdout/stderr. + :type verbose: bool + """ + args = shlex.split(cmd) + self.verbose = verbose + if self.verbose: + log.info("Running '%s'", cmd) + self.sp = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + self.start_time = time.time() + self.result = CmdResult(cmd) + self.stdout_file = StringIO.StringIO() + self.stderr_file = StringIO.StringIO() + self.stdout_lock = threading.Lock() + self.stdout_thread = threading.Thread(target=self._fd_drainer, + name="%s-stdout" % cmd, + args=[self.sp.stdout]) + self.stdout_thread.daemon = True + self.stderr_lock = threading.Lock() + self.stderr_thread = threading.Thread(target=self._fd_drainer, + name="%s-stderr" % cmd, + args=[self.sp.stderr]) + self.stderr_thread.daemon = True + self.stdout_thread.start() + self.stderr_thread.start() + + def _fd_drainer(self, input_pipe): + """ + Read from input_pipe, storing and logging output. + + :param input_pipe: File like object to the stream. + """ + if input_pipe == self.sp.stdout: + prefix = '[stdout] %s' + output_file = self.stdout_file + lock = self.stdout_lock + elif input_pipe == self.sp.stderr: + prefix = '[stdout] %s' + output_file = self.stderr_file + lock = self.stderr_lock + + fileno = input_pipe.fileno() + + bfr = '' + while True: + tmp = os.read(fileno, 1024) + if tmp == '': + break + lock.acquire() + try: + output_file.write(tmp) + if self.verbose: + bfr += tmp + if tmp.endswith('\n'): + for l in bfr.splitlines(): + log.debug(prefix, l) + bfr = '' + finally: + lock.release() + + def get_stdout(self): + """ + Get the full stdout of the subprocess so far. + + :return: Standard output of the process. + :rtype: str + """ + self.stdout_lock.acquire() + stdout = self.stdout_file.getvalue() + self.stdout_lock.release() + return stdout + + def get_stderr(self): + """ + Get the full stderr of the subprocess so far. + + :return: Standard error of the process. + :rtype: str + """ + self.stderr_lock.acquire() + stderr = self.stderr_file.getvalue() + self.stderr_lock.release() + return stderr + + def terminate(self): + """ + Send a :attr:`signal.SIGTERM` to the process. + """ + try: + os.kill(self.sp.pid, signal.SIGTERM) + except: + pass + + def kill(self): + """ + Send a :attr:`signal.SIGKILL` to the process. + """ + try: + os.kill(self.sp.pid, signal.SIGKILL) + except: + pass + + def wait(self, timeout=None): + """ + Wait for the process to end, filling and returning the result attr. + + :param timeout: Time (seconds) we'll wait until the process is + finished. If it's not, we'll try to terminate it + and get a status. + :type timeout: float + :returns: The command result object. + :rtype: A :class:`avocado.utils.process.CmdResult` instance. + """ + if timeout is None: + self.sp.wait() + self.result.exit_status = self.sp.returncode + + if timeout > 0: + start_time = time.time() + while time.time() - start_time < timeout: + self.result.exit_status = self.sp.poll() + if self.result.exit_status is not None: + break + else: + # Give one second to check if we can successfully kill the process + timeout = 1 + + if self.result.exit_status is None: + self.terminate() + # Timeout here should be 1 second (see comment above) + stop_time = time.time() + timeout + while time.time() < stop_time: + self.result.exit_status = self.sp.poll() + if self.result.exit_status is not None: + break + else: + self.kill() + self.result.exit_status = self.sp.poll() + + duration = time.time() - self.start_time + self.result.duration = duration + + self.cleanup() + + return self.result + + def cleanup(self): + """ + Close subprocess stdout and stderr, and put values into result obj. + """ + # Cleaning up threads + self.stdout_thread.join(1) + self.stderr_thread.join(1) + # Last sanity check + e_msg = 'Stdout thread for %s is still alive' % self.sp.pid + assert not self.stdout_thread.isAlive(), e_msg + e_msg = 'Stderr thread for %s is still alive' % self.sp.pid + assert not self.stderr_thread.isAlive(), e_msg + # If this fails, we're dealing with a zombie process + e_msg = 'Zombie Process %s' % self.sp.pid + assert self.result.exit_status is not None, e_msg + # Clean subprocess pipes and populate stdout/err + self.sp.stdout.close() + self.sp.stderr.close() + self.result.stdout = self.get_stdout() + self.result.stderr = self.get_stderr() + + +def run(cmd, timeout=None, verbose=True, ignore_status=False): + """ + Run a subprocess, returning a CmdResult object. + + :param cmd: Command line to run. + :type cmd: str + :param timeout: Time limit in seconds before attempting to kill the + running process. This function will take a few seconds + longer than 'timeout' to complete if it has to kill the + process. + :type timeout: float + :param verbose: Whether to log the command run and stdout/stderr. + :type verbose: bool + :param ignore_status: Whether to raise an exception when command returns =! 0 (False), or not (True). + :type ignore_status: bool :return: An :class:`avocado.utils.process.CmdResult` object. - :raise: :class:`avocado.utils.process.CmdResult`, if ``ignore_status=False``. + :raise: :class:`avocado.core.exceptions.CmdError`, if ``ignore_status=False``. """ - if verbose: - log.info("Running '%s'", cmd) - args = shlex.split(cmd) - start = time.time() - p = subprocess.Popen(args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - stdout, stderr = p.communicate() - duration = time.time() - start - result = CmdResult(cmd) - result.exit_status = p.returncode - result.stdout = stdout - result.stderr = stderr - result.duration = duration - if p.returncode != 0 and not ignore_status: - raise exceptions.CmdError(cmd, result) - return result - - -def system(cmd, verbose=True, ignore_status=False): + sp = SubProcess(cmd=cmd, verbose=verbose) + cmd_result = sp.wait(timeout=timeout) + if cmd_result.exit_status != 0 and not ignore_status: + raise exceptions.CmdError(cmd, sp.result) + return cmd_result + + +def system(cmd, timeout=None, verbose=True, ignore_status=False): """ Run a subprocess, returning its exit code. - This is a light, yet compatible implementation of - :mod:`autotest.client.shared.utils.run`, so we can run commands in tests and - other avocado programs, if need to be. - - :param verbose: Wether to print the command run. - :param ignore_status: Wether to raise an exception when command returns + :param cmd: Command line to run. + :type cmd: str + :param timeout: Time limit in seconds before attempting to kill the + running process. This function will take a few seconds + longer than 'timeout' to complete if it has to kill the + process. + :type timeout: float + :param verbose: Whether to log the command run and stdout/stderr. + :type verbose: bool + :param ignore_status: Whether to raise an exception when command returns =! 0 (False), or not (True). - :return: An exit code. - :raise: :class:`avocado.utils.process.CmdResult`, if ``ignore_status=False``. + :type ignore_status: bool + :return: Exit code. + :rtype: int + :raise: :class:`avocado.core.exceptions.CmdError`, if ``ignore_status=False``. """ - cmd_result = run(cmd, verbose, ignore_status) + cmd_result = run(cmd=cmd, timeout=timeout, verbose=verbose, + ignore_status=ignore_status) return cmd_result.exit_status -def system_output(cmd, verbose=True, ignore_status=False): +def system_output(cmd, timeout=None, verbose=True, ignore_status=False): """ Run a subprocess, returning its output. - This is a light, yet compatible implementation of - :mod:`autotest.client.shared.utils.run`, so we can run commands in tests and - other avocado programs, if need to be. - - :param verbose: Wether to print the command run. - :param ignore_status: Wether to raise an exception when command returns + :param cmd: Command line to run. + :type cmd: str + :param timeout: Time limit in seconds before attempting to kill the + running process. This function will take a few seconds + longer than 'timeout' to complete if it has to kill the + process. + :type timeout: float + :param verbose: Whether to log the command run and stdout/stderr. + :type verbose: bool + :param ignore_status: Whether to raise an exception when command returns =! 0 (False), or not (True). - :return: A string with the process output. - :raise: :class:`avocado.utils.process.CmdResult`, if ``ignore_status=False``. + :return: Command output. + :rtype: str + :raise: :class:`avocado.core.exceptions.CmdError`, if ``ignore_status=False``. """ - cmd_result = run(cmd, verbose, ignore_status) + cmd_result = run(cmd=cmd, timeout=timeout, verbose=verbose, + ignore_status=ignore_status) return cmd_result.stdout diff --git a/selftests/all/functional/avocado/basic_tests.py b/selftests/all/functional/avocado/basic_tests.py index 1c41a8c9d3d4a699a1d18c989fd7840de1bae61f..fe56e25142f2da20f272dec3e5f8232c291f6306 100644 --- a/selftests/all/functional/avocado/basic_tests.py +++ b/selftests/all/functional/avocado/basic_tests.py @@ -16,7 +16,9 @@ import unittest import os +import shutil import sys +import tempfile # simple magic for using scripts within a source tree basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..') @@ -26,6 +28,14 @@ if os.path.isdir(os.path.join(basedir, 'avocado')): from avocado.utils import process +PASS_SCRIPT_CONTENTS = """#!/bin/sh +true +""" + +FAIL_SCRIPT_CONTENTS = """#!/bin/sh +false +""" + class RunnerOperationTest(unittest.TestCase): @@ -53,5 +63,64 @@ class RunnerOperationTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) + +class RunnerDropinTest(unittest.TestCase): + + def setUp(self): + self.base_logdir = tempfile.mkdtemp(prefix='avocado_dropin_functional') + self.pass_script = os.path.join(self.base_logdir, 'avocado_pass.sh') + with open(self.pass_script, 'w') as pass_script_obj: + pass_script_obj.write(PASS_SCRIPT_CONTENTS) + os.chmod(self.pass_script, 0775) + + self.fail_script = os.path.join(self.base_logdir, 'avocado_fail.sh') + with open(self.fail_script, 'w') as fail_script_obj: + fail_script_obj.write(FAIL_SCRIPT_CONTENTS) + os.chmod(self.fail_script, 0775) + + def test_dropin_pass(self): + os.chdir(basedir) + cmd_line = './scripts/avocado run %s' % self.pass_script + result = process.run(cmd_line, ignore_status=True) + expected_rc = 0 + self.assertEqual(result.exit_status, expected_rc, + "Avocado did not return rc %d:\n%s" % + (expected_rc, result)) + + def test_dropin_fail(self): + os.chdir(basedir) + cmd_line = './scripts/avocado run %s' % self.fail_script + result = process.run(cmd_line, ignore_status=True) + expected_rc = 1 + self.assertEqual(result.exit_status, expected_rc, + "Avocado did not return rc %d:\n%s" % + (expected_rc, result)) + + def tearDown(self): + if os.path.isdir(self.base_logdir): + shutil.rmtree(self.base_logdir, ignore_errors=True) + + +class PluginsOperationTest(unittest.TestCase): + + def setUp(self): + self.base_outputdir = tempfile.mkdtemp(prefix='avocado_plugins') + + def test_sysinfo_plugin(self): + os.chdir(basedir) + cmd_line = './scripts/avocado sysinfo %s' % self.base_outputdir + result = process.run(cmd_line, ignore_status=True) + expected_rc = 0 + self.assertEqual(result.exit_status, expected_rc, + "Avocado did not return rc %d:\n%s" % + (expected_rc, result)) + sysinfo_files = os.listdir(self.base_outputdir) + self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir") + + def tearDown(self): + if os.path.isdir(self.base_outputdir): + shutil.rmtree(self.base_outputdir, ignore_errors=True) + + if __name__ == '__main__': unittest.main() diff --git a/selftests/all/unit/avocado/vm_unittest.py b/selftests/all/unit/avocado/vm_unittest.py index 3592f919f085d5f95b730a5916ec25e9cb710c91..5f61293974482fb87848812ea7a5065a50e6074a 100755 --- a/selftests/all/unit/avocado/vm_unittest.py +++ b/selftests/all/unit/avocado/vm_unittest.py @@ -35,13 +35,16 @@ class _Stream(object): def start_file_logging(self, param1, param2): pass + def info(self, msg, skip_newline=False): + pass + def log_header(self, param): pass def stop_file_logging(self): pass - def log_pass(self, param1, param2): + def log_pass(self, param1): pass @@ -65,6 +68,7 @@ class VMResultTest(unittest.TestCase): test = vm.Test(name=tst['test'], time=tst['time'], status=tst['status']) + self.test_result.start_test(test) self.test_result.check_test(test) if not status.mapping[test.status]: failures.append(test.tagged_name)