提交 1ee56a64 编写于 作者: L Lucas Meneghel Rodrigues 提交者: Lucas Meneghel Rodrigues

Merge pull request #138 from lmr/sprint-11

Sprint 11
......@@ -33,6 +33,7 @@ import shutil
import time
import tempfile
from avocado.core import job_id
from avocado.utils import path
from avocado.settings import settings
......@@ -49,13 +50,13 @@ SETTINGS_TMP_DIR = os.path.expanduser(settings.get_value('runner', 'tmp_dir'))
SYSTEM_BASE_DIR = '/var/lib/avocado'
SYSTEM_TEST_DIR = os.path.join(SYSTEM_BASE_DIR, 'tests')
SYSTEM_DATA_DIR = os.path.join(SYSTEM_BASE_DIR, 'data')
SYSTEM_LOG_DIR = os.path.join(SYSTEM_BASE_DIR, 'logs')
SYSTEM_LOG_DIR = os.path.join(SYSTEM_BASE_DIR, 'job-results')
SYSTEM_TMP_DIR = '/var/tmp/avocado'
USER_BASE_DIR = os.path.expanduser('~/avocado')
USER_TEST_DIR = os.path.join(USER_BASE_DIR, 'tests')
USER_DATA_DIR = os.path.join(USER_BASE_DIR, 'data')
USER_LOG_DIR = os.path.join(USER_BASE_DIR, 'logs')
USER_LOG_DIR = os.path.join(USER_BASE_DIR, 'job-results')
USER_TMP_DIR = '/var/tmp/avocado'
......@@ -189,7 +190,7 @@ def get_logs_dir():
return _get_rw_dir(SETTINGS_LOG_DIR, SYSTEM_LOG_DIR, USER_LOG_DIR)
def get_job_logs_dir(args=None):
def get_job_logs_dir(args=None, unique_id=None):
"""
Create a log directory for a job, or a stand alone execution of a test.
......@@ -199,12 +200,16 @@ def get_job_logs_dir(args=None):
(optional).
:rtype: basestring
"""
start_time = time.strftime('%Y-%m-%d-%H.%M.%S')
start_time = time.strftime('%Y-%m-%dT%H.%M')
if args is not None:
logdir = args.logdir or get_logs_dir()
else:
logdir = get_logs_dir()
debugbase = 'run-%s' % start_time
# Stand alone tests handling
if unique_id is None:
unique_id = job_id.get_job_id()
debugbase = 'job-%s-%s' % (start_time, unique_id[:7])
debugdir = path.init_dir(logdir, debugbase)
latestdir = os.path.join(logdir, "latest")
try:
......
......@@ -22,9 +22,13 @@ The current return codes are:
:class:`avocado.core.exceptions.JobError` exception.
* AVOCADO_CRASH
Something else went wrong and avocado plain crashed.
* AVOCADO_JOB_INTERRUPTED
The job was explicitly interrupted. Usually this means that a user
hit CTRL+C while the job was still running.
"""
numeric_status = {"AVOCADO_ALL_OK": 0,
"AVOCADO_TESTS_FAIL": 1,
"AVOCADO_JOB_FAIL": 2,
"AVOCADO_CRASH": 3}
"AVOCADO_CRASH": 3,
"AVOCADO_JOB_INTERRUPTED": 4}
......@@ -83,6 +83,14 @@ class TestTimeoutError(TestBaseException):
status = "ERROR"
class TestInterruptedError(TestBaseException):
"""
Indicates that the test was interrupted by the user (Ctrl+C)
"""
status = "INTERRUPTED"
class TestAbortError(TestBaseException):
"""
......
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Authors: Lucas Meneghel Rodrigues <lmr@redhat.com>
import hashlib
import random
import socket
import time
_RAND_POOL = random.SystemRandom()
_HOSTNAME = socket.gethostname()
def get_job_id():
"""
Create a job ID SHA1.
:return: SHA1 string
:rtype: str
"""
info = '%s-%s-%s' % (_HOSTNAME,
time.strftime('%Y-%m-%dT%H:%M:%S'),
_RAND_POOL.getrandbits(64))
h = hashlib.sha1()
h.update(info)
return h.hexdigest()
......@@ -219,13 +219,15 @@ class OutputManager(object):
extra = {'skip_newline': skip_newline}
self.console_log.log(level=level, msg=msg, extra=extra)
def start_file_logging(self, logfile, loglevel):
def start_file_logging(self, logfile, loglevel, unique_id):
"""
Start the main file logging.
:param logfile: Path to file that will receive logging.
:param loglevel: Level of the logger. Example: :mod:`logging.DEBUG`.
:param unique_id: job.Job() unique id attribute.
"""
self.job_unique_id = unique_id
self.debuglog = logfile
self.file_handler = logging.FileHandler(filename=logfile)
self.file_handler.setLevel(loglevel)
......
......@@ -25,7 +25,8 @@ mapping = {"TEST_NA": True,
"START": True,
"ALERT": False,
"RUNNING": False,
"NOSTATUS": False}
"NOSTATUS": False,
"INTERRUPTED": False}
feedback = {
# Test did not advertise current status, but process running the test is
......
......@@ -26,7 +26,6 @@ import sys
import signal
import time
import traceback
import uuid
import Queue
from avocado.core import data_dir
......@@ -34,6 +33,7 @@ from avocado.core import output
from avocado.core import status
from avocado.core import exceptions
from avocado.core import error_codes
from avocado.core import job_id
from avocado.utils import archive
from avocado.utils import path
from avocado import multiplex_config
......@@ -79,9 +79,11 @@ class TestRunner(object):
module_name = os.path.basename(test_path).split('.')[0]
if not os.path.exists(test_path):
# Try to resolve test ID (keep compatibility)
test_path = os.path.join(data_dir.get_test_dir(), '%s.py' % t_id)
rel_path = '%s.py' % t_id
test_path = os.path.join(data_dir.get_test_dir(), rel_path)
if os.path.exists(test_path):
path_analyzer = path.PathInspector(test_path)
t_id = rel_path
else:
test_class = test.MissingTest
test_instance = test_class(name=t_id,
......@@ -126,10 +128,15 @@ class TestRunner(object):
e_msg = "Timeout reached waiting for %s to end" % instance
raise exceptions.TestTimeoutError(e_msg)
def interrupt_handler(signum, frame):
e_msg = "Test %s interrupted by user" % instance
raise exceptions.TestInterruptedError(e_msg)
instance = self.load_test(params)
queue.put(instance.get_state())
signal.signal(signal.SIGUSR1, timeout_handler)
signal.signal(signal.SIGINT, interrupt_handler)
self.result.start_test(instance.get_state())
try:
......@@ -168,7 +175,9 @@ class TestRunner(object):
p = multiprocessing.Process(target=self.run_test,
args=(params, q,))
cycle_timeout = 1
cycle_timeout = 0.01
ui_progress_cycle = 0.25
ui_progress_count = 0
time_started = time.time()
should_quit = False
test_state = None
......@@ -193,7 +202,10 @@ class TestRunner(object):
test_state = q.get(timeout=cycle_timeout)
except Queue.Empty:
if p.is_alive():
self.job.result_proxy.throbber_progress()
ui_progress_count += cycle_timeout
if ui_progress_count >= ui_progress_cycle:
self.job.result_proxy.throbber_progress()
ui_progress_count = 0
else:
should_quit = True
......@@ -230,14 +242,18 @@ class Job(object):
:param args: an instance of :class:`argparse.Namespace`.
"""
self.args = args
if args is not None:
self.unique_id = args.unique_id or str(uuid.uuid4())
self.unique_id = args.unique_id or job_id.get_job_id()
else:
self.unique_id = str(uuid.uuid4())
self.logdir = data_dir.get_job_logs_dir(self.args)
self.logfile = os.path.join(self.logdir, "debug.log")
self.unique_id = job_id.get_job_id()
self.logdir = data_dir.get_job_logs_dir(self.args, self.unique_id)
self.logfile = os.path.join(self.logdir, "job.log")
self.idfile = os.path.join(self.logdir, "id")
with open(self.idfile, 'w') as id_file_obj:
id_file_obj.write("%s\n" % self.unique_id)
if self.args is not None:
self.loglevel = args.log_level or logging.DEBUG
self.multiplex_file = args.multiplex_file
......@@ -376,7 +392,8 @@ class Job(object):
self._make_test_runner()
self.output_manager.start_file_logging(self.logfile,
self.loglevel)
self.loglevel,
self.unique_id)
self.output_manager.logfile = self.logfile
failures = self.test_runner.run(params_list)
self.output_manager.stop_file_logging()
......@@ -431,6 +448,12 @@ class Job(object):
except exceptions.OptionValidationError, details:
self.output_manager.log_fail_header(str(details))
return error_codes.numeric_status['AVOCADO_JOB_FAIL']
except KeyboardInterrupt:
for child in multiprocessing.active_children():
os.kill(child.pid, signal.SIGINT)
self.output_manager.log_header('\n')
self.output_manager.log_header('Interrupted by user request')
sys.exit(error_codes.numeric_status['AVOCADO_JOB_INTERRUPTED'])
except Exception, details:
self.status = "ERROR"
......
......@@ -20,7 +20,7 @@ from avocado.core import data_dir
class DataDirList(plugin.Plugin):
"""
Implements the avocado 'datadir' functionality.
Implements the avocado 'datadir' subcommand
"""
name = 'datadir'
......
......@@ -120,7 +120,7 @@ class TestResultJournal(TestResult):
class Journal(plugin.Plugin):
"""
Test journal plugin
Test journal
"""
name = 'journal'
......
......@@ -50,6 +50,8 @@ class JSONTestResult(TestResult):
:type state: dict
"""
TestResult.end_test(self, state)
if 'job_id' not in self.json:
self.json['job_id'] = state['job_unique_id']
t = {'test': state['tagged_name'],
'url': state['name'],
'time': state['time_elapsed'],
......@@ -85,7 +87,7 @@ class JSONTestResult(TestResult):
class JSON(plugin.Plugin):
"""
JSON output plugin.
JSON output
"""
name = 'json'
......
......@@ -20,7 +20,7 @@ from avocado.core import output
class PluginsList(plugin.Plugin):
"""
Implements the avocado 'plugins' functionality.
Implements the avocado 'plugins' subcommand
"""
name = 'plugins_list'
......@@ -38,7 +38,17 @@ class PluginsList(plugin.Plugin):
pm = get_plugin_manager()
pipe.write(bcolors.header_str('Plugins loaded:'))
pipe.write('\n')
blength = 0
for plug in pm.plugins:
status = "Enabled" if plug.enabled else "Disabled"
pipe.write(' %s - %s (%s)\n' % (plug.name, plug.__doc__.strip(),
status))
clength = len(plug.name)
if clength > blength:
blength = clength
format_str = " %-" + str(blength) + "s - %s (%s)\n"
for plug in sorted(pm.plugins):
if plug.enabled:
status = bcolors.healthy_str("Enabled")
else:
status = bcolors.fail_header_str("Disabled")
pipe.write(format_str % (bcolors.header_str(plug.name), plug.__doc__.strip(),
status))
......@@ -24,10 +24,10 @@ from avocado import multiplex_config
class Multiplexer(plugin.Plugin):
"""
Implements the avocado 'multiplex' functionality.
Implements the avocado 'multiplex' subcommand
"""
name = 'plugins_list'
name = 'multiplexer'
enabled = True
def configure(self, app_parser, cmd_parser):
......
......@@ -29,7 +29,7 @@ from avocado import job
class TestLister(plugin.Plugin):
"""
Implements the avocado 'list' functionality.
Implements the avocado 'list' subcommand
"""
name = 'test_lister'
......@@ -54,22 +54,32 @@ class TestLister(plugin.Plugin):
"""
bcolors = output.term_support
pipe = output.get_paginator()
test_files = os.listdir(data_dir.get_test_dir())
base_test_dir = data_dir.get_test_dir()
test_files = os.listdir(base_test_dir)
test_dirs = []
blength = 0
for t in test_files:
inspector = path.PathInspector(path=t)
if inspector.is_python():
test_dirs.append(t.split('.')[0])
pipe.write(bcolors.header_str('Tests available:'))
pipe.write("\n")
for test_dir in test_dirs:
pipe.write(" %s\n" % test_dir)
clength = len((t.split('.')[0]))
if clength > blength:
blength = clength
test_dirs.append((t.split('.')[0], os.path.join(base_test_dir, t)))
format_string = " %-" + str(blength) + "s %s\n"
pipe.write(bcolors.header_str('Tests dir: %s\n' % base_test_dir))
if len(test_dirs) > 0:
pipe.write(bcolors.header_str(format_string % ('Alias', 'Path')))
for test_dir in test_dirs:
pipe.write(format_string % test_dir)
else:
pipe.write(bcolors.header_str('No tests were found on current '
'tests dir'))
class TestRunner(plugin.Plugin):
"""
Implements the avocado 'run' functionality.
Implements the avocado 'run' subcommand
"""
name = 'test_runner'
......@@ -123,7 +133,7 @@ class TestRunner(plugin.Plugin):
class SystemInformation(plugin.Plugin):
"""
Collect system information and log.
Collect system information
"""
name = 'sysinfo'
......
......@@ -196,18 +196,18 @@ class VMTestResult(TestResult):
Called once before any tests are executed.
"""
TestResult.start_tests(self)
self.stream.log_header("TOTAL TESTS: %s" % self.tests_total)
self.stream.log_header("TESTS: %s" % self.tests_total)
def end_tests(self):
"""
Called once after all tests are executed.
"""
self.stream.log_header("TOTAL PASSED: %d" % len(self.passed))
self.stream.log_header("TOTAL ERROR: %d" % len(self.errors))
self.stream.log_header("TOTAL FAILED: %d" % len(self.failed))
self.stream.log_header("TOTAL SKIPPED: %d" % len(self.skipped))
self.stream.log_header("TOTAL WARNED: %d" % len(self.warned))
self.stream.log_header("ELAPSED TIME: %.2f s" % self.total_time)
self.stream.log_header("PASS : %d" % len(self.passed))
self.stream.log_header("ERROR: %d" % len(self.errors))
self.stream.log_header("FAIL : %d" % len(self.failed))
self.stream.log_header("SKIP : %d" % len(self.skipped))
self.stream.log_header("WARN : %d" % len(self.warned))
self.stream.log_header("TIME : %.2f s" % self.total_time)
self.stream.log_header("DEBUG LOG: %s" % self.stream.logfile)
def start_test(self, test):
......@@ -279,7 +279,7 @@ class VMTestResult(TestResult):
class RunVM(plugin.Plugin):
"""
Run tests on Virtual Machine plugin.
Run tests on a Virtual Machine
"""
name = 'run_vm'
......
......@@ -221,7 +221,7 @@ class xUnitTestResult(TestResult):
class XUnit(plugin.Plugin):
"""
xUnit output plugin.
xUnit output
"""
name = 'xunit'
......
......@@ -244,19 +244,20 @@ class HumanTestResult(TestResult):
Called once before any tests are executed.
"""
TestResult.start_tests(self)
self.stream.log_header("DEBUG LOG: %s" % self.stream.logfile)
self.stream.log_header("TOTAL TESTS: %s" % self.tests_total)
self.stream.log_header("JOB ID : %s" % self.stream.job_unique_id)
self.stream.log_header("JOB LOG: %s" % self.stream.logfile)
self.stream.log_header("TESTS : %s" % self.tests_total)
def end_tests(self):
"""
Called once after all tests are executed.
"""
self.stream.log_header("TOTAL PASSED: %d" % len(self.passed))
self.stream.log_header("TOTAL ERROR: %d" % len(self.errors))
self.stream.log_header("TOTAL FAILED: %d" % len(self.failed))
self.stream.log_header("TOTAL SKIPPED: %d" % len(self.skipped))
self.stream.log_header("TOTAL WARNED: %d" % len(self.warned))
self.stream.log_header("ELAPSED TIME: %.2f s" % self.total_time)
self.stream.log_header("PASS : %d" % len(self.passed))
self.stream.log_header("ERROR: %d" % len(self.errors))
self.stream.log_header("FAIL : %d" % len(self.failed))
self.stream.log_header("SKIP : %d" % len(self.skipped))
self.stream.log_header("WARN : %d" % len(self.warned))
self.stream.log_header("TIME : %.2f s" % self.total_time)
def start_test(self, state):
"""
......
......@@ -126,13 +126,10 @@ class Test(unittest.TestCase):
self.srcdir = path.init_dir(self.workdir, 'src')
if base_logdir is None:
base_logdir = data_dir.get_job_logs_dir()
base_logdir = os.path.join(base_logdir, 'test-results')
self.tagged_name = self.get_tagged_name(base_logdir)
# We need log directory names to be unique
tagged_name = self.tagged_name.replace('/', '.')
if tagged_name.startswith('.'):
tagged_name = tagged_name[1:]
self.logdir = path.init_dir(base_logdir, tagged_name)
self.logdir = path.init_dir(base_logdir, self.tagged_name)
self.logfile = os.path.join(self.logdir, 'debug.log')
self.outputdir = path.init_dir(self.logdir, 'data')
self.sysinfodir = path.init_dir(self.logdir, 'sysinfo')
......@@ -259,16 +256,23 @@ class Test(unittest.TestCase):
:return: String `test.tag`.
"""
if self.name.startswith('/'):
self.name = self.name[1:]
if self.tag is not None:
return "%s.%s" % (self.name, self.tag)
tag = 1
tagged_name = "%s.%s" % (self.name, tag)
tag = 0
if tag == 0:
tagged_name = self.name
else:
tagged_name = "%s.%s" % (self.name, tag)
test_logdir = os.path.join(logdir, tagged_name)
while os.path.isdir(test_logdir):
tag += 1
tagged_name = "%s.%s" % (self.name, tag)
test_logdir = os.path.join(logdir, tagged_name)
self.tag = str(tag)
return tagged_name
def setup(self):
......@@ -399,6 +403,8 @@ class Test(unittest.TestCase):
self.fail_reason)
else:
if self.status is None:
self.status = 'INTERRUPTED'
self.log.info("%s %s", self.status,
self.tagged_name)
......
......@@ -203,21 +203,23 @@ class SubProcess(object):
"""
Send a :attr:`signal.SIGTERM` to the process.
"""
try:
os.kill(self.sp.pid, signal.SIGTERM)
except:
pass
self.send_signal(signal.SIGTERM)
def kill(self):
"""
Send a :attr:`signal.SIGKILL` to the process.
"""
try:
os.kill(self.sp.pid, signal.SIGKILL)
except:
pass
self.send_signal(signal.SIGKILL)
def wait(self, timeout=None):
def send_signal(self, sig):
"""
Send the specified signal to the process.
:param sig: Signal to send.
"""
self.sp.send_signal(sig)
def wait(self, timeout=None, sig=signal.SIGTERM):
"""
Wait for the process to end, filling and returning the result attr.
......@@ -243,7 +245,7 @@ class SubProcess(object):
timeout = 1
if self.result.exit_status is None:
self.terminate()
self.send_signal(sig)
# Timeout here should be 1 second (see comment above)
stop_time = time.time() + timeout
while time.time() < stop_time:
......
......@@ -20,7 +20,7 @@ Installing avocado - Fedora
You can install the rpm package by performing the following commands::
sudo curl http://copr.fedoraproject.org/coprs/lmr/Autotest/repo/fedora-20-i386/ -o /etc/yum.repos.d/autotest.repo
sudo curl http://copr.fedoraproject.org/coprs/lmr/Autotest/repo/fedora-20/lmr-Autotest-fedora-20.repo -o /etc/yum.repos.d/autotest.repo
sudo yum update
sudo yum install avocado
......@@ -29,8 +29,8 @@ Installing avocado - Ubuntu
You need to add the following lines::
deb http://ppa.launchpad.net/lmr/autotest/ubuntu saucy main
deb-src http://ppa.launchpad.net/lmr/autotest/ubuntu saucy main
deb http://ppa.launchpad.net/lmr/autotest/ubuntu trusty main
deb-src http://ppa.launchpad.net/lmr/autotest/ubuntu trusty main
To the file ``/etc/apt/sources.list``. After that you can install avocado by
performing the following commands::
......@@ -61,37 +61,50 @@ key. You can list tests by::
You can run them using the subcommand ``run``::
$ scripts/avocado run sleeptest
DEBUG LOG: /home/lmr/Code/avocado/logs/run-2014-04-23-19.06.39/debug.log
TOTAL TESTS: 1
(1/1) sleeptest.1: PASS (1.09 s)
TOTAL PASSED: 1
TOTAL ERROR: 0
TOTAL FAILED: 0
TOTAL SKIPPED: 0
TOTAL WARNED: 0
ELAPSED TIME: 1.09 s
$ avocado run sleeptest
JOB ID : 381b849a62784228d2fd208d929cc49f310412dc
JOB LOG: /home/lmr/avocado/job-results/job-2014-08-12T15.39-381b849a/job.log
TESTS : 1
(1/1) sleeptest.1: PASS (1.01 s)
PASS : 1
ERROR: 0
FAIL : 0
SKIP : 0
WARN : 0
TIME : 1.01 s
The Job ID is a SHA1 string that has some information encoded:
* Hostname
* ISO timestamp
* 64 bit integer
The idea is to have a unique identifier that can be used for job data, for
the purposes of joining on a single database results obtained by jobs run
on different systems.
You can run any number of test in an arbitrary order, as well as mix and match
native tests and dropin tests::
$ echo '#!/bin/bash' > /tmp/script_that_passes.sh
$ echo 'true' >> /tmp/script_that_passes.sh
$ scripts/avocado run "failtest sleeptest synctest failtest synctest /tmp/script_that_passes.sh"
DEBUG LOG: /home/lmr/Code/avocado/logs/run-2014-04-23-19.16.46/debug.log
TOTAL TESTS: 6
(1/6) failtest.1: FAIL (0.09 s)
(2/6) sleeptest.1: PASS (1.09 s)
(3/6) synctest.1: PASS (2.33 s)
(4/6) failtest.2: FAIL (0.10 s)
(5/6) synctest.2: PASS (1.94 s)
(6/6) script_that_passes.1: PASS (0.11 s)
TOTAL PASSED: 4
TOTAL ERROR: 0
TOTAL FAILED: 2
TOTAL SKIPPED: 0
TOTAL WARNED: 0
ELAPSED TIME: 5.67 s
$ chmod +x /tmp/script_that_passes.sh
$ avocado run "failtest sleeptest synctest failtest synctest /tmp/script_that_passes.sh"
JOB ID : 86911e49b5f2c36caeea41307cee4fecdcdfa121
JOB LOG: /home/lmr/avocado/job-results/job-2014-08-12T15.42-86911e49/job.log
TESTS : 6
(1/6) failtest.1: FAIL (0.00 s)
(2/6) sleeptest.1: PASS (1.00 s)
(3/6) synctest.1: ERROR (0.01 s)
(4/6) failtest.2: FAIL (0.00 s)
(5/6) synctest.2: ERROR (0.01 s)
(6/6) /tmp/script_that_passes.sh.1: PASS (0.02 s)
PASS : 2
ERROR: 2
FAIL : 2
SKIP : 0
WARN : 0
TIME : 1.04 s
Some more involved functionalities for the avocado runner are discussed as appropriate, during
the introduction of important concepts.
......@@ -27,18 +27,19 @@ print while executing tests::
Or the more verbose avocado output::
$ scripts/avocado run "sleeptest failtest synctest"
DEBUG LOG: /home/lmr/Code/avocado/logs/run-2014-04-24-18.17.52/debug.log
TOTAL TESTS: 3
(1/3) sleeptest.1: PASS (1.09 s)
(2/3) failtest.1: FAIL (0.10 s)
(3/3) synctest.1: PASS (1.98 s)
TOTAL PASSED: 2
TOTAL ERROR: 0
TOTAL FAILED: 1
TOTAL SKIPPED: 0
TOTAL WARNED: 0
ELAPSED TIME: 3.17 s
$ avocado run "sleeptest failtest synctest"
JOB ID : 5ffe479262ea9025f2e4e84c4e92055b5c79bdc9
JOB LOG: /home/lmr/avocado/job-results/job-2014-08-12T15.57-5ffe4792/job.log
TESTS : 3
(1/3) sleeptest.1: PASS (1.01 s)
(2/3) failtest.1: FAIL (0.00 s)
(3/3) synctest.1: PASS (1.98 s)
PASS : 1
ERROR: 1
FAIL : 1
SKIP : 0
WARN : 0
TIME : 3.17 s
The most important thing is to remember that programs should never need to parse
human output to figure out what happened with your test run.
......
......@@ -14,7 +14,7 @@ List available plugins
Avocado has a builtin ``plugins`` command that lets you list available
plugins::
$ scripts/avocado plugins
$ avocado plugins
Plugins loaded:
test_lister - Implements the avocado 'list' functionality. (Enabled)
sysinfo - Collect system information and log. (Enabled)
......@@ -68,7 +68,7 @@ location that contains plugins, that will be automatically loaded. In the
avocado source tree, the ``avocado_hello.py`` example is available under
``examples/plugins``. So, in order to enable the hello plugin, you can do a::
$ scripts/avocado --plugins examples/plugins/ plugins
$ avocado --plugins examples/plugins/ plugins
Plugins loaded:
test_lister - Implements the avocado 'list' functionality. (Enabled)
sysinfo - Collect system information and log. (Enabled)
......@@ -82,7 +82,7 @@ Run it
To run it, you can simply call the newly registered runner command ``hello``::
$ scripts/avocado --plugins examples/plugins/ hello
$ avocado --plugins examples/plugins/ hello
The classical Hello World! plugin example.
Wrap Up
......
......@@ -3,41 +3,114 @@ Results Specification
=====================
On a machine that executed tests, job results are available under
``[logdir]/run-[timestamp]``, where ``logdir`` is the configured avocado
``[job-results]/job-[timestamp]-[short job ID]``, where ``logdir`` is the configured avocado
logs directory (see the data dir plugin), and the directory name includes
a timestamp, such as ``run-2014-06-13-19.30.43``. A typical
a timestamp, such as ``job-2014-08-12T15.44-565e8de``. A typical
results directory structure can be seen below ::
run-2014-06-13-19.30.43
|-- debug.log
|-- results.json
|-- results.xml
|-- sleeptest.1
| |-- debug.log
| `-- sysinfo
| |-- post
| `-- pre
|-- sysinfo
| |-- post
| `-- pre
/home/lmr/avocado/job-results/job-2014-08-13T00.45-4a92bc0/
├── id
├── job.log
├── results.json
├── results.xml
├── sysinfo
│   ├── post
│   │   ├── brctl_show
│   │   ├── cmdline
│   │   ├── cpuinfo
│   │   ├── current_clocksource
│   │   ├── df_-mP
│   │   ├── dmesg_-c
│   │   ├── dmidecode
│   │   ├── fdisk_-l
│   │   ├── gcc_--version
│   │   ├── hostname
│   │   ├── ifconfig_-a
│   │   ├── interrupts
│   │   ├── ip_link
│   │   ├── ld_--version
│   │   ├── lscpu
│   │   ├── lspci_-vvnn
│   │   ├── meminfo
│   │   ├── modules
│   │   ├── mount
│   │   ├── mounts
│   │   ├── numactl_--hardware_show
│   │   ├── partitions
│   │   ├── scaling_governor
│   │   ├── uname_-a
│   │   ├── uptime
│   │   └── version
│   └── pre
│   ├── brctl_show
│   ├── cmdline
│   ├── cpuinfo
│   ├── current_clocksource
│   ├── df_-mP
│   ├── dmesg_-c
│   ├── dmidecode
│   ├── fdisk_-l
│   ├── gcc_--version
│   ├── hostname
│   ├── ifconfig_-a
│   ├── interrupts
│   ├── ip_link
│   ├── ld_--version
│   ├── lscpu
│   ├── lspci_-vvnn
│   ├── meminfo
│   ├── modules
│   ├── mount
│   ├── mounts
│   ├── numactl_--hardware_show
│   ├── partitions
│   ├── scaling_governor
│   ├── uname_-a
│   ├── uptime
│   └── version
└── test-results
└── tests
├── sleeptest.py.long
│   ├── data
│   ├── debug.log
│   └── sysinfo
│   ├── post
│   └── pre
├── sleeptest.py.medium
│   ├── data
│   ├── debug.log
│   └── sysinfo
│   ├── post
│   └── pre
└── sleeptest.py.short
├── data
├── debug.log
└── sysinfo
├── post
└── pre
20 directories, 59 files
From what you can see, the results dir has:
1) A human readable 'debug.log' in the top level, with human readable logs of
1) A human readable ``id`` in the top level, with the job SHA1.
2) A human readable ``job.log`` in the top level, with human readable logs of
the task
2) A machine readable 'results.xml' in the top level, with a summary of the
3) A machine readable ``results.xml`` in the top level, with a summary of the
job information in xUnit format.
3) A top level 'sysinfo' dir, with sub directories 'pre' and 'post', that store
4) A top level ``sysinfo`` dir, with sub directories ``pre`` and ``post``, that store
sysinfo files pre job and post job, respectively.
4) Subdirectory with any number of tagged testnames. Those tagged testnames
represent instances of test execution results.
5) Subdirectory ``test-results``, that contains a number of subdirectories
(tagged testnames). Those tagged testnames represent instances of test
execution results.
Test execution instances specification
======================================
The instances should have:
1) A top level human readable debug.log, with test debug information
2) A 'sysinfo' subdir, with sub directories 'pre' and 'post, that store
1) A top level human readable ``test.log``, with test debug information
2) A ``sysinfo`` subdir, with sub directories ``pre`` and ``post``, that store
sysinfo files pre test and post test, respectively.
3) A ``data`` subdir, where the test can output a number of files if necessary.
......@@ -112,39 +112,42 @@ Using a multiplex file
You may use the avocado runner with a multiplex file to provide params and matrix
generation for sleeptest just like::
$ avocado run sleeptest --multiplex tests/sleeptest/sleeptest.mplx
DEBUG LOG: /home/lmr/avocado/logs/run-2014-05-13-15.44.54/debug.log
TOTAL TESTS: 3
(1/3) sleeptest.short: PASS (0.64 s)
(2/3) sleeptest.medium: PASS (1.11 s)
(3/3) sleeptest.long: PASS (5.12 s)
TOTAL PASSED: 3
TOTAL ERROR: 0
TOTAL FAILED: 0
TOTAL SKIPPED: 0
TOTAL WARNED: 0
ELAPSED TIME: 6.87 s
Note that, as your multiplex file specifies all parameters for sleeptest, you can simply
leave the test url list empty, such as::
$ avocado run --multiplex tests/sleeptest/sleeptest.mplx
$ avocado run sleeptest --multiplex tests/sleeptest.py.data/sleeptest.mplx
JOB ID : d565e8dec576d6040f894841f32a836c751f968f
JOB LOG: /home/lmr/avocado/job-results/job-2014-08-12T15.44-d565e8de/job.log
TESTS : 3
(1/3) sleeptest.short: PASS (0.50 s)
(2/3) sleeptest.medium: PASS (1.01 s)
(3/3) sleeptest.long: PASS (5.01 s)
PASS : 3
ERROR: 0
FAIL : 0
SKIP : 0
WARN : 0
TIME : 6.52 s
Note that, as your multiplex file specifies all parameters for sleeptest, you
can't leave the test ID empty::
$ scripts/avocado run --multiplex tests/sleeptest/sleeptest.mplx
Empty test ID. A test path or alias must be provided
If you want to run some tests that don't require params set by the multiplex file, you can::
$ avocado run "sleeptest synctest" --multiplex tests/sleeptest/sleeptest.mplx
DEBUG LOG: /home/lmr/avocado/logs/run-2014-05-13-15.47.55/debug.log
TOTAL TESTS: 4
(1/4) sleeptest.short: PASS (0.61 s)
(2/4) sleeptest.medium: PASS (1.11 s)
(3/4) sleeptest.long: PASS (5.11 s)
(4/4) synctest.1: PASS (1.85 s)
TOTAL PASSED: 4
TOTAL ERROR: 0
TOTAL FAILED: 0
TOTAL SKIPPED: 0
TOTAL WARNED: 0
ELAPSED TIME: 8.69 s
$ avocado run "sleeptest synctest" --multiplex tests/sleeptest.py.data/sleeptest.mplx
JOB ID : dd91ea5f8b42b2f084702315688284f7e8aa220a
JOB LOG: /home/lmr/avocado/job-results/job-2014-08-12T15.49-dd91ea5f/job.log
TESTS : 4
(1/4) sleeptest.short: PASS (0.50 s)
(2/4) sleeptest.medium: PASS (1.01 s)
(3/4) sleeptest.long: PASS (5.01 s)
(4/4) synctest.1: ERROR (1.85 s)
PASS : 3
ERROR: 1
FAIL : 0
SKIP : 0
WARN : 0
TIME : 8.69 s
Avocado tests are also unittests
================================
......@@ -174,15 +177,17 @@ you want to use it, don't forget to ``chmod +x`` your test.
Executing an avocado test gives::
$ tests/sleeptest/sleeptest.py
DEBUG LOG: /home/lmr/avocado/logs/run-2014-04-23-21.11.37/debug.log
TOTAL TESTS: 1
(1/1) sleeptest.1: PASS (1.11 s)
TOTAL PASSED: 1
TOTAL FAILED: 0
TOTAL SKIPPED: 0
TOTAL WARNED: 0
ELAPSED TIME: 1.11 s
$ tests/sleeptest.py
JOB ID : de6c1e4c227c786dc4d926f6fca67cda34d96276
JOB LOG: /home/lmr/avocado/job-results/job-2014-08-12T15.48-de6c1e4c/job.log
TESTS : 1
(1/1) sleeptest.1: PASS (1.00 s)
PASS : 1
ERROR: 0
FAIL : 0
SKIP : 0
WARN : 0
TIME : 1.00 s
Running tests with nosetests
============================
......@@ -193,11 +198,11 @@ assemble a fully automated test grid, plus richer test API for tests on the
Linux platform. Regardless, the fact that an avocado class is also an unittest
cass, you can run them with the ``nosetests`` application::
$ nosetests tests/sleeptest/sleeptest.py
$ nosetests tests/sleeptest.py
.
----------------------------------------------------------------------
Ran 1 test in 1.092s
Ran 1 test in 1.004s
OK
Setup and cleanup methods
......@@ -303,52 +308,54 @@ impact your test grid. You can account for that possibility and set up a
::
avocado run sleeptest --multiplex /tmp/sleeptest-example.mplx
DEBUG LOG: /home/lmr/avocado/logs/run-2014-06-10-16.13.33/debug.log
TOTAL TESTS: 1
(1/1) sleeptest.1: ERROR (3.00 s)
TOTAL PASSED: 0
TOTAL ERROR: 1
TOTAL FAILED: 0
TOTAL SKIPPED: 0
TOTAL WARNED: 0
ELAPSED TIME: 3.00 s
$ avocado run sleeptest --multiplex /tmp/sleeptest-example.mplx
JOB ID : 6d5a2ff16bb92395100fbc3945b8d253308728c9
JOB LOG: /home/lmr/avocado/job-results/job-2014-08-12T15.52-6d5a2ff1/job.log
TESTS : 1
(1/1) sleeptest.1: ERROR (2.97 s)
PASS : 0
ERROR: 1
FAIL : 0
SKIP : 0
WARN : 0
TIME : 2.97 s
::
$ cat /home/lmr/avocado/logs/run-2014-06-10-16.13.33/debug.log
16:13:33 test L0136 INFO | START sleeptest.1
16:13:33 test L0137 DEBUG|
16:13:33 test L0138 DEBUG| Test instance parameters:
16:13:33 test L0145 DEBUG| _name_map_file = {'sleeptest-example.mplx': 'sleeptest'}
16:13:33 test L0145 DEBUG| _short_name_map_file = {'sleeptest-example.mplx': 'sleeptest'}
16:13:33 test L0145 DEBUG| dep =
16:13:33 test L0145 DEBUG| name = sleeptest
16:13:33 test L0145 DEBUG| shortname = sleeptest
16:13:33 test L0145 DEBUG| sleep_length = 5.0
16:13:33 test L0145 DEBUG| sleep_length_type = float
16:13:33 test L0145 DEBUG| timeout = 3.0
16:13:33 test L0145 DEBUG| timeout_type = float
16:13:33 test L0147 DEBUG|
16:13:33 test L0150 DEBUG| Default parameters:
16:13:33 test L0152 DEBUG| sleep_length = 1.0
16:13:33 test L0154 DEBUG|
16:13:33 test L0155 DEBUG| Test instance params override defaults whenever available
16:13:33 test L0156 DEBUG|
16:13:33 test L0162 INFO | Test timeout set. Will wait 3.00 s for PID 23953 to end
16:13:33 test L0163 INFO |
16:13:33 sysinfo L0154 DEBUG| Not logging /proc/slabinfo (lack of permissions)
16:13:33 sleeptest L0035 DEBUG| Sleeping for 5.00 seconds
16:13:36 test L0054 ERROR|
16:13:36 test L0057 ERROR| Traceback (most recent call last):
16:13:36 test L0057 ERROR| File "/home/lmr/Code/avocado/tests/sleeptest/sleeptest.py", line 36, in action
16:13:36 test L0057 ERROR| time.sleep(self.params.sleep_length)
16:13:36 test L0057 ERROR| File "/home/lmr/Code/avocado/avocado/job.py", line 101, in timeout_handler
16:13:36 test L0057 ERROR| raise exceptions.TestTimeoutError(e_msg)
16:13:36 test L0057 ERROR| TestTimeoutError: Timeout reached waiting for sleeptest to end
16:13:36 test L0058 ERROR|
16:13:36 test L0376 ERROR| ERROR sleeptest.1 -> TestTimeoutError: Timeout reached waiting for sleeptest to end
16:13:36 test L0363 INFO |
$ cat /home/lmr/avocado/job-results/job-2014-08-12T15.52-6d5a2ff1/job.log
15:52:51 test L0143 INFO | START sleeptest.1
15:52:51 test L0144 DEBUG|
15:52:51 test L0145 DEBUG| Test log: /home/lmr/avocado/job-results/job-2014-08-12T15.52-6d5a2ff1/sleeptest.1/test.log
15:52:51 test L0146 DEBUG| Test instance parameters:
15:52:51 test L0153 DEBUG| _name_map_file = {'sleeptest-example.mplx': 'sleeptest'}
15:52:51 test L0153 DEBUG| _short_name_map_file = {'sleeptest-example.mplx': 'sleeptest'}
15:52:51 test L0153 DEBUG| dep = []
15:52:51 test L0153 DEBUG| id = sleeptest
15:52:51 test L0153 DEBUG| name = sleeptest
15:52:51 test L0153 DEBUG| shortname = sleeptest
15:52:51 test L0153 DEBUG| sleep_length = 5.0
15:52:51 test L0153 DEBUG| sleep_length_type = float
15:52:51 test L0153 DEBUG| timeout = 3.0
15:52:51 test L0153 DEBUG| timeout_type = float
15:52:51 test L0154 DEBUG|
15:52:51 test L0157 DEBUG| Default parameters:
15:52:51 test L0159 DEBUG| sleep_length = 1.0
15:52:51 test L0161 DEBUG|
15:52:51 test L0162 DEBUG| Test instance params override defaults whenever available
15:52:51 test L0163 DEBUG|
15:52:51 test L0169 INFO | Test timeout set. Will wait 3.00 s for PID 15670 to end
15:52:51 test L0170 INFO |
15:52:51 sleeptest L0035 DEBUG| Sleeping for 5.00 seconds
15:52:54 test L0057 ERROR|
15:52:54 test L0060 ERROR| Traceback (most recent call last):
15:52:54 test L0060 ERROR| File "/home/lmr/Code/avocado.lmr/tests/sleeptest.py", line 36, in action
15:52:54 test L0060 ERROR| time.sleep(self.params.sleep_length)
15:52:54 test L0060 ERROR| File "/home/lmr/Code/avocado.lmr/avocado/job.py", line 127, in timeout_handler
15:52:54 test L0060 ERROR| raise exceptions.TestTimeoutError(e_msg)
15:52:54 test L0060 ERROR| TestTimeoutError: Timeout reached waiting for sleeptest to end
15:52:54 test L0061 ERROR|
15:52:54 test L0400 ERROR| ERROR sleeptest.1 -> TestTimeoutError: Timeout reached waiting for sleeptest to end
15:52:54 test L0387 INFO |
If you pass that multiplex file to the runner multiplexer, this will register
......@@ -390,45 +397,47 @@ This accomplishes a similar effect to the multiplex setup defined in there.
::
$ scripts/avocado run timeouttest
DEBUG LOG: /home/lmr/avocado/logs/run-2014-06-10-16.14.19/debug.log
TOTAL TESTS: 1
(1/1) timeouttest.1: ERROR (3.00 s)
TOTAL PASSED: 0
TOTAL ERROR: 1
TOTAL FAILED: 0
TOTAL SKIPPED: 0
TOTAL WARNED: 0
ELAPSED TIME: 3.00 s
$ avocado run timeouttest
JOB ID : d78498a54504b481192f2f9bca5ebb9bbb820b8a
JOB LOG: /home/lmr/avocado/job-results/job-2014-08-12T15.54-d78498a5/job.log
TESTS : 1
(1/1) timeouttest.1: ERROR (2.97 s)
PASS : 0
ERROR: 1
FAIL : 0
SKIP : 0
WARN : 0
TIME : 2.97 s
::
$ cat /home/lmr/avocado/logs/run-2014-06-10-16.14.19/debug.log
16:14:19 test L0136 INFO | START timeouttest.1
16:14:19 test L0137 DEBUG|
16:14:19 test L0138 DEBUG| Test instance parameters:
16:14:19 test L0145 DEBUG| shortname = timeouttest
16:14:19 test L0147 DEBUG|
16:14:19 test L0150 DEBUG| Default parameters:
16:14:19 test L0152 DEBUG| sleep_time = 5.0
16:14:19 test L0152 DEBUG| timeout = 3.0
16:14:19 test L0154 DEBUG|
16:14:19 test L0155 DEBUG| Test instance params override defaults whenever available
16:14:19 test L0156 DEBUG|
16:14:19 test L0162 INFO | Test timeout set. Will wait 3.00 s for PID 24008 to end
16:14:19 test L0163 INFO |
16:14:19 sysinfo L0154 DEBUG| Not logging /proc/slabinfo (lack of permissions)
16:14:20 timeouttes L0036 INFO | Sleeping for 5.00 seconds (2 more than the timeout)
16:14:22 test L0054 ERROR|
16:14:22 test L0057 ERROR| Traceback (most recent call last):
16:14:22 test L0057 ERROR| File "/home/lmr/Code/avocado/tests/timeouttest/timeouttest.py", line 37, in action
16:14:22 test L0057 ERROR| time.sleep(self.params.sleep_time)
16:14:22 test L0057 ERROR| File "/home/lmr/Code/avocado/avocado/job.py", line 101, in timeout_handler
16:14:22 test L0057 ERROR| raise exceptions.TestTimeoutError(e_msg)
16:14:22 test L0057 ERROR| TestTimeoutError: Timeout reached waiting for timeouttest to end
16:14:22 test L0058 ERROR|
16:14:22 test L0376 ERROR| ERROR timeouttest.1 -> TestTimeoutError: Timeout reached waiting for timeouttest to end
16:14:22 test L0363 INFO |
$ cat /home/lmr/avocado/job-results/job-2014-08-12T15.54-d78498a5/job.log
15:54:28 test L0143 INFO | START timeouttest.1
15:54:28 test L0144 DEBUG|
15:54:28 test L0145 DEBUG| Test log: /home/lmr/avocado/job-results/job-2014-08-12T15.54-d78498a5/timeouttest.1/test.log
15:54:28 test L0146 DEBUG| Test instance parameters:
15:54:28 test L0153 DEBUG| id = timeouttest
15:54:28 test L0154 DEBUG|
15:54:28 test L0157 DEBUG| Default parameters:
15:54:28 test L0159 DEBUG| sleep_time = 5.0
15:54:28 test L0159 DEBUG| timeout = 3.0
15:54:28 test L0161 DEBUG|
15:54:28 test L0162 DEBUG| Test instance params override defaults whenever available
15:54:28 test L0163 DEBUG|
15:54:28 test L0169 INFO | Test timeout set. Will wait 3.00 s for PID 15759 to end
15:54:28 test L0170 INFO |
15:54:28 timeouttes L0036 INFO | Sleeping for 5.00 seconds (2 more than the timeout)
15:54:31 test L0057 ERROR|
15:54:31 test L0060 ERROR| Traceback (most recent call last):
15:54:31 test L0060 ERROR| File "/home/lmr/Code/avocado.lmr/tests/timeouttest.py", line 37, in action
15:54:31 test L0060 ERROR| time.sleep(self.params.sleep_time)
15:54:31 test L0060 ERROR| File "/home/lmr/Code/avocado.lmr/avocado/job.py", line 127, in timeout_handler
15:54:31 test L0060 ERROR| raise exceptions.TestTimeoutError(e_msg)
15:54:31 test L0060 ERROR| TestTimeoutError: Timeout reached waiting for timeouttest to end
15:54:31 test L0061 ERROR|
15:54:31 test L0400 ERROR| ERROR timeouttest.1 -> TestTimeoutError: Timeout reached waiting for timeouttest to end
15:54:31 test L0387 INFO |
Environment Variables for Dropin Tests
======================================
......
......@@ -2,5 +2,5 @@
base_dir = /usr/share/avocado
test_dir = /usr/share/avocado/tests
data_dir = /usr/share/avocado/data
logs_dir = ~/avocado/logs
logs_dir = ~/avocado/job-results
tmp_dir = /tmp
......@@ -17,6 +17,7 @@
import json
import unittest
import os
import signal
import shutil
import sys
import tempfile
......@@ -46,6 +47,16 @@ class RunnerOperationTest(unittest.TestCase):
cmd_line = './scripts/avocado run "sleeptest sleeptest"'
process.run(cmd_line)
def test_datadir_alias(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run datadir'
process.run(cmd_line)
def test_datadir_noalias(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run "tests/datadir.py tests/datadir.py"'
process.run(cmd_line)
def test_runner_noalias(self):
os.chdir(basedir)
cmd_line = "./scripts/avocado run 'tests/sleeptest.py tests/sleeptest.py'"
......@@ -83,9 +94,9 @@ class RunnerOperationTest(unittest.TestCase):
"Avocado did not return rc %d:\n%s" % (expected_rc, result))
self.assertIn("TestError: Failing during cleanup. Yay!", output,
"Cleanup exception not printed to log output")
self.assertIn("FAIL doublefail.1 -> TestFail: This test is supposed to fail",
self.assertIn("TestFail: This test is supposed to fail",
output,
"Test did not fail with action exception")
"Test did not fail with action exception:\n%s" % output)
def test_runner_timeout(self):
os.chdir(basedir)
......@@ -98,9 +109,9 @@ class RunnerOperationTest(unittest.TestCase):
"Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc, result))
self.assertIn("ERROR timeouttest.1 -> TestTimeoutError: Timeout reached waiting for timeouttest to end",
self.assertIn("TestTimeoutError: Timeout reached waiting for",
output,
"Test did not fail with timeout exception")
"Test did not fail with timeout exception:\n%s" % output)
def test_runner_abort(self):
os.chdir(basedir)
......@@ -113,6 +124,25 @@ class RunnerOperationTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc, result))
def test_runner_ctrl_c(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run sleeptenmin'
sp = process.SubProcess(cmd_line)
# Let it run for 3 seconds, then send a SIGINT
# (translates to KeyboardInterrupt)
sp.wait(timeout=3, sig=signal.SIGINT)
result = sp.result
output = result.stdout + result.stderr
expected_rc = 4
unexpected_rc = 3
self.assertNotEqual(result.exit_status, unexpected_rc,
"Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc, result))
self.assertIn("Interrupted by user request", output,
"Avocado did not display interruption message. "
"Output:\n%s" % output)
class RunnerDropinTest(unittest.TestCase):
......@@ -151,7 +181,7 @@ class RunnerDropinTest(unittest.TestCase):
shutil.rmtree(self.base_logdir, ignore_errors=True)
class PluginsSysinfoTest(unittest.TestCase):
class PluginsTest(unittest.TestCase):
def setUp(self):
self.base_outputdir = tempfile.mkdtemp(prefix='avocado_plugins')
......@@ -167,6 +197,39 @@ class PluginsSysinfoTest(unittest.TestCase):
sysinfo_files = os.listdir(self.base_outputdir)
self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")
def test_list_plugin(self):
os.chdir(basedir)
cmd_line = './scripts/avocado list'
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertNotIn('No tests were found on current tests dir', output)
def test_plugin_list(self):
os.chdir(basedir)
cmd_line = './scripts/avocado plugins'
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertNotIn('Disabled', output)
def test_datadir_plugin(self):
os.chdir(basedir)
cmd_line = './scripts/avocado datadir'
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertNotIn('Disabled', output)
def tearDown(self):
if os.path.isdir(self.base_outputdir):
shutil.rmtree(self.base_outputdir, ignore_errors=True)
......@@ -176,7 +239,7 @@ class ParseXMLError(Exception):
pass
class PluginsXunitTest(PluginsSysinfoTest):
class PluginsXunitTest(PluginsTest):
def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
e_nfailures, e_nskip):
......@@ -242,7 +305,7 @@ class ParseJSONError(Exception):
pass
class PluginsJSONTest(PluginsSysinfoTest):
class PluginsJSONTest(PluginsTest):
def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
e_nfailures, e_nskip):
......
......@@ -45,7 +45,7 @@ class MultiplexTests(unittest.TestCase):
output = result.stdout + result.stderr
if expected_lines is not None:
for line in output.splitlines():
if 'DEBUG LOG:' in line:
if 'JOB LOG:' in line:
debug_log = line.split()[-1]
debug_log_obj = open(debug_log, 'r')
job_log_lines = debug_log_obj.readlines()
......@@ -71,7 +71,6 @@ class MultiplexTests(unittest.TestCase):
def test_run_mplex_noid(self):
cmd_line = './scripts/avocado run --multiplex tests/sleeptest.py.data/sleeptest.mplx'
expected_rc = 0
self.run_and_check(cmd_line, 2)
def test_run_mplex_sleeptest(self):
......
......@@ -165,8 +165,8 @@ class OutputPluginTest(unittest.TestCase):
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
output_lines = output.splitlines()
first_line = output_lines[0]
debug_log = first_line.split()[-1]
second_line = output_lines[1]
debug_log = second_line.split()[-1]
self.check_output_files(debug_log)
......
......@@ -86,10 +86,10 @@ class TestClassTest(unittest.TestCase):
self.assertIsInstance(self.tst_instance_pass.time_elapsed, float)
def testClassAttributesTag(self):
self.assertEqual(self.tst_instance_pass.tag, "1")
self.assertEqual(self.tst_instance_pass.tag, "0")
def testClassAttributesTaggedName(self):
self.assertEqual(self.tst_instance_pass.tagged_name, "AvocadoPass.1")
self.assertEqual(self.tst_instance_pass.tagged_name, "AvocadoPass")
def testTaggedNameNewTests(self):
"""
......@@ -97,8 +97,8 @@ class TestClassTest(unittest.TestCase):
"""
new_tst_instance = AvocadoPass(base_logdir=self.base_logdir)
new_tst_instance.run_avocado()
self.assertEqual(new_tst_instance.tagged_name, "AvocadoPass.2")
self.assertEqual(new_tst_instance.tag, "2")
self.assertEqual(new_tst_instance.tagged_name, "AvocadoPass.1")
self.assertEqual(new_tst_instance.tag, "1")
def tearDown(self):
if os.path.isdir(self.base_logdir):
......
#!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import os
from avocado import test
from avocado import job
from avocado.utils import build
from avocado.utils import process
class datadir(test.Test):
"""
Test that uses resources from the data dir.
"""
default_params = {'source': 'datadir.c'}
def setup(self):
"""
Build 'datadir'.
"""
self.cwd = os.getcwd()
c_file = self.get_data_path(self.params.source)
self.srcdir = os.path.dirname(c_file)
build.make(self.srcdir, extra_args='datadir')
def action(self):
"""
Execute 'datadir'.
"""
cmd = os.path.join(self.srcdir, 'datadir')
cmd_result = process.run(cmd)
self.log.info(cmd_result)
def cleanup(self):
"""
Clean up 'datadir'.
"""
os.unlink(os.path.join(self.srcdir, 'datadir'))
if __name__ == "__main__":
job.main()
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See LICENSE for more details.
Copyright: Red Hat Inc. 2014
Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
*/
#include<stdio.h>
int main()
{
printf("Hello Data Dir");
return 0;
}
......@@ -44,7 +44,7 @@ class whiteboard(test.Test):
iterations = int(self.params.whiteboard_writes)
result = ''
for i in xrange(0, iterations):
for _ in xrange(0, iterations):
result += data
self.whiteboard = base64.encodestring(result)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册