You need to sign in or sign up before continuing.
提交 d790c161 编写于 作者: C Cleber Rosa

Merge branch 'replay_v7'

......@@ -42,6 +42,7 @@ from . import tree
from . import test
from . import xunit
from . import jsonresult
from . import replay
from .settings import settings
from ..utils import archive
from ..utils import astring
......@@ -130,6 +131,7 @@ class Job(object):
self.funcatexit = data_structures.CallbackRegister("JobExit %s"
% self.unique_id,
_TEST_LOGGER)
self.replay_sourcejob = getattr(self.args, 'replay_sourcejob', None)
def _setup_job_results(self):
logdir = getattr(self.args, 'logdir', None)
......@@ -267,10 +269,10 @@ class Job(object):
Optionally, a list of tests (each test a string).
:returns: a test suite (a list of test factories)
"""
urls = self._handle_urls(urls)
loader.loader.load_plugins(self.args)
try:
suite = loader.loader.discover(urls)
replay_path = getattr(self.args, 'replay_path', None)
suite = loader.loader.discover(urls, replay_path=replay_path)
except loader.LoaderUnhandledUrlError, details:
self._remove_job_results()
raise exceptions.OptionValidationError(details)
......@@ -301,6 +303,8 @@ class Job(object):
def _log_job_id(self):
job_log = _TEST_LOGGER
job_log.info('Job ID: %s', self.unique_id)
if self.replay_sourcejob is not None:
job_log.info('Replay of Job ID: %s', self.replay_sourcejob)
job_log.info('')
@staticmethod
......@@ -417,9 +421,11 @@ class Job(object):
that configure a job failure.
"""
self._setup_job_results()
urls = self._handle_urls(urls)
self.view.start_file_logging(self.logfile,
self.loglevel,
self.unique_id)
self.unique_id,
self.replay_sourcejob)
try:
test_suite = self._make_test_suite(urls)
except loader.LoaderError, details:
......@@ -432,10 +438,13 @@ class Job(object):
"for details" % (" ".join(urls) if urls else "\b"))
raise exceptions.OptionValidationError(e_msg)
try:
mux = multiplexer.Mux(self.args)
except (IOError, ValueError), details:
raise exceptions.OptionValidationError(details)
if getattr(self.args, 'replay_mux', None) is not None:
mux = self.args.replay_mux
else:
try:
mux = multiplexer.Mux(self.args)
except (IOError, ValueError), details:
raise exceptions.OptionValidationError(details)
self.args.test_result_total = mux.get_number_of_tests(test_suite)
self._make_test_result()
......@@ -445,10 +454,13 @@ class Job(object):
self._start_sysinfo()
self._log_job_debug_info(mux)
replay.record(self.args, self.logdir, mux, urls)
self.view.logfile = self.logfile
replay_map = getattr(self.args, 'replay_map', None)
failures = self.test_runner.run_suite(test_suite, mux,
timeout=self.timeout)
timeout=self.timeout,
replay_map=replay_map)
self.view.stop_file_logging()
# If it's all good so far, set job status to 'PASS'
if self.status == 'RUNNING':
......
......@@ -194,7 +194,7 @@ class TestLoaderProxy(object):
mapping.update(loader_plugin.get_decorator_mapping())
return mapping
def discover(self, urls, which_tests=DEFAULT):
def discover(self, urls, which_tests=DEFAULT, replay_path=None):
"""
Discover (possible) tests from test urls.
......@@ -213,6 +213,8 @@ class TestLoaderProxy(object):
'avocado.app.tracebacks')
tests = []
unhandled_urls = []
if replay_path is not None and os.path.exists(replay_path):
os.chdir(replay_path)
if not urls:
for loader_plugin in self._initialized_plugins:
try:
......
......@@ -547,7 +547,7 @@ class View(object):
"""
self._log_ui_info(term_support.warn_header_str(msg), skip_newline)
def start_file_logging(self, logfile, loglevel, unique_id):
def start_file_logging(self, logfile, loglevel, unique_id, sourcejob=None):
"""
Start the main file logging.
......@@ -571,6 +571,7 @@ class View(object):
root_logger = logging.getLogger()
root_logger.addHandler(self.file_handler)
root_logger.setLevel(loglevel)
self.replay_sourcejob = sourcejob
def stop_file_logging(self):
"""
......
......@@ -132,7 +132,7 @@ class RemoteTestRunner(TestRunner):
return json_result
def run_suite(self, test_suite, mux, timeout):
def run_suite(self, test_suite, mux, timeout, replay_map=None):
"""
Run one or more tests and report with test result.
......
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2015
# Author: Amador Pahim <apahim@redhat.com>
import ast
import glob
import json
import os
import pickle
import sys
from . import exit_codes
from . import output
from .test import ReplaySkipTest
from .settings import settings
from ..utils import path
"""
Record/retrieve job information for job replay
"""
def record(args, logdir, mux, urls=None):
replay_dir = path.init_dir(logdir, 'replay')
path_cfg = os.path.join(replay_dir, 'config')
path_urls = os.path.join(replay_dir, 'urls')
path_mux = os.path.join(replay_dir, 'multiplex')
path_pwd = os.path.join(replay_dir, 'pwd')
if urls:
with open(path_urls, 'w') as f:
f.write('%s' % urls)
with open(path_cfg, 'w') as f:
settings.config.write(f)
with open(path_mux, 'w') as f:
pickle.dump(mux, f, pickle.HIGHEST_PROTOCOL)
with open(path_pwd, 'w') as f:
f.write('%s' % os.getcwd())
def retrieve_pwd(resultsdir):
recorded_pwd = os.path.join(resultsdir, "replay", "pwd")
if not os.path.exists(recorded_pwd):
return None
with open(recorded_pwd, 'r') as f:
return f.read()
def retrieve_urls(resultsdir):
recorded_urls = os.path.join(resultsdir, "replay", "urls")
if not os.path.exists(recorded_urls):
return None
with open(recorded_urls, 'r') as f:
urls = f.read()
return ast.literal_eval(urls)
def retrieve_mux(resultsdir):
pkl_path = os.path.join(resultsdir, 'replay', 'multiplex')
if not os.path.exists(pkl_path):
return None
with open(pkl_path, 'r') as f:
return pickle.load(f)
def retrieve_replay_map(resultsdir, replay_filter):
replay_map = None
resultsfile = os.path.join(resultsdir, "results.json")
if not os.path.exists(resultsfile):
return None
with open(resultsfile, 'r') as results_file_obj:
results = json.loads(results_file_obj.read())
replay_map = []
for test in results['tests']:
if test['status'] not in replay_filter:
replay_map.append(ReplaySkipTest)
else:
replay_map.append(None)
return replay_map
def get_resultsdir(logdir, jobid):
view = output.View()
matches = 0
short_jobid = jobid[:7]
if len(short_jobid) < 7:
short_jobid += '*'
idfile_pattern = os.path.join(logdir, 'job-*-%s' % short_jobid, 'id')
for id_file in glob.glob(idfile_pattern):
if get_id(id_file, jobid) is not None:
match_file = id_file
matches += 1
if matches > 1:
msg = "hash '%s' is not unique enough" % jobid
view.notify(event='error', msg=(msg))
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
if matches == 1:
return os.path.dirname(match_file)
else:
return None
def get_id(path, jobid):
if not os.path.exists(path):
return None
with open(path, 'r') as f:
content = f.read().strip('\n')
if content.startswith(jobid):
return content
else:
return None
......@@ -263,6 +263,9 @@ class HumanTestResult(TestResult):
"""
TestResult.start_tests(self)
self.stream.notify(event="message", msg="JOB ID : %s" % self.stream.job_unique_id)
if self.stream.replay_sourcejob is not None:
self.stream.notify(event="message", msg="SRC JOB ID : %s" %
self.stream.replay_sourcejob)
self.stream.notify(event="message", msg="JOB LOG : %s" % self.stream.logfile)
self.stream.notify(event="message", msg="TESTS : %s" % self.tests_total)
self.stream.set_tests_info({'tests_total': self.tests_total})
......
......@@ -329,7 +329,7 @@ class TestRunner(object):
return False
return True
def run_suite(self, test_suite, mux, timeout=0):
def run_suite(self, test_suite, mux, timeout=0, replay_map=None):
"""
Run one or more tests and report with test result.
......@@ -349,13 +349,15 @@ class TestRunner(object):
else:
deadline = None
index = -1
for test_template in test_suite:
test_template[1]['base_logdir'] = self.job.logdir
test_template[1]['job'] = self.job
break_loop = False
for test_factory in mux.itertests(test_template):
index += 1
test_parameters = test_factory[1]
if deadline is not None and time.time() > deadline:
test_parameters = test_factory[1]
if 'methodName' in test_parameters:
del test_parameters['methodName']
test_factory = (test.TimeOutSkipTest, test_parameters)
......@@ -364,6 +366,10 @@ class TestRunner(object):
if break_loop:
break
else:
if (replay_map is not None and
replay_map[index] is not None):
test_factory = (replay_map[index], test_parameters)
break_loop = not self.run_test(test_factory, queue, failures,
deadline)
if break_loop:
......
......@@ -700,3 +700,14 @@ class DryRunTest(TimeOutSkipTest):
for path, key, value in self.params.iteritems():
self.log.info("%s:%s ==> %s", path, key, value)
super(DryRunTest, self).setUp()
class ReplaySkipTest(TimeOutSkipTest):
"""
Skip test due to job replay filter.
This test is skipped due to a job replay filter.
It will never have a chance to execute.
"""
_skip_reason = "Test skipped due to a job replay filter!"
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import argparse
import os
import sys
from .base import CLI
from avocado.core import replay
from avocado.core import status
from avocado.core import exit_codes
from avocado.core import output
from avocado.core.settings import settings
class Replay(CLI):
"""
Replay a job
"""
name = 'replay'
description = "Replay options for 'run' subcommand"
def configure(self, parser):
run_subcommand_parser = parser.subcommands.choices.get('run', None)
if run_subcommand_parser is None:
return
msg = 'job replay'
self.replay_parser = run_subcommand_parser.add_argument_group(msg)
self.replay_parser.add_argument('--replay', dest='replay_jobid',
default=None,
help='Replay a job identified by its '
'(partial) hash id')
self.replay_parser.add_argument('--replay-test-status',
dest='replay_teststatus',
type=self._valid_status,
default=None,
help='Filter tests to replay by '
'test status')
self.replay_parser.add_argument('--replay-ignore',
dest='replay_ignore',
type=self._valid_ignore,
default=None,
help='Ignore multiplex (mux) and/or '
'configuration (config) from the '
'source job')
self.replay_parser.add_argument('--replay-data-dir',
dest='replay_datadir',
default=None,
help='Load replay data from an '
'alternative location')
def _valid_status(self, string):
status_list = string.split(',')
for item in status_list:
if item not in status.mapping:
msg = 'Invalid --replay-test-status option. Valid ' \
'options are (more than one allowed): %s' % \
','.join([item for item in status.mapping])
raise argparse.ArgumentTypeError(msg)
return status_list
def _valid_ignore(self, string):
options = ['mux', 'config']
ignore_list = string.split(',')
for item in ignore_list:
if item not in options:
msg = 'Invalid --replay-ignore option. Valid ' \
'options are (more than one allowed): %s' % \
','.join(options)
raise argparse.ArgumentTypeError(msg)
return ignore_list
def load_config(self, resultsdir):
config = os.path.join(resultsdir, 'replay', 'config')
with open(config, 'r') as f:
settings.process_config_path(f.read())
def run(self, args):
if getattr(args, 'replay_jobid', None) is None:
return
view = output.View()
if args.remote_hostname is not None:
msg = "Currently we don't replay jobs in remote hosts."
view.notify(event='error', msg=(msg))
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
if args.replay_datadir is not None:
resultsdir = args.replay_datadir
else:
logs_dir = settings.get_value('datadir.paths', 'logs_dir',
default=None)
self.logdir = os.path.expanduser(logs_dir)
resultsdir = replay.get_resultsdir(self.logdir, args.replay_jobid)
if resultsdir is None:
msg = "can't find job results directory in '%s'" % self.logdir
view.notify(event='error', msg=(msg))
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
sourcejob = replay.get_id(os.path.join(resultsdir, 'id'),
args.replay_jobid)
if sourcejob is None:
msg = "can't find matching job id '%s' in '%s' directory." % \
(args.replay_jobid, resultsdir)
view.notify(event='error', msg=(msg))
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
setattr(args, 'replay_sourcejob', sourcejob)
if getattr(args, 'url', None):
msg = 'Overriding the replay urls with urls provided in '\
'command line.'
view.notify(event='warning', msg=(msg))
else:
urls = replay.retrieve_urls(resultsdir)
if urls is None:
msg = 'Source job urls data not found. Aborting.'
view.notify(event='error', msg=(msg))
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
else:
setattr(args, 'url', urls)
if args.replay_ignore and 'config' in args.replay_ignore:
msg = "Ignoring configuration from source job with " \
"--replay-ignore."
view.notify(event='warning', msg=(msg))
else:
self.load_config(resultsdir)
if args.replay_ignore and 'mux' in args.replay_ignore:
msg = "Ignoring multiplex from source job with --replay-ignore."
view.notify(event='warning', msg=(msg))
else:
if getattr(args, 'multiplex_files', None) is not None:
msg = 'Overriding the replay multiplex with '\
'--multiplex-file.'
view.notify(event='warning', msg=(msg))
else:
mux = replay.retrieve_mux(resultsdir)
if mux is None:
msg = 'Source job multiplex data not found. Aborting.'
view.notify(event='error', msg=(msg))
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
else:
setattr(args, 'replay_mux', mux)
if args.replay_teststatus:
replay_map = replay.retrieve_replay_map(resultsdir,
args.replay_teststatus)
setattr(args, 'replay_map', replay_map)
pwd = replay.retrieve_pwd(resultsdir)
if pwd is not None:
setattr(args, 'replay_path', pwd)
.. _job_replay_:
==========
Job Replay
==========
In order to reproduce a given job using the same data, one can use the
``--replay`` option for the ``run`` command, informing the hash id from
the original job to be replayed. The hash id can be partial, as long as
the provided part corresponds to the inital characters of the original
job id and it is also unique enough.
Let's see an example. First, running a simple job with two urls::
$ avocado run /bin/true /bin/false
JOB ID : 825b860b0c2f6ec48953c638432e3e323f8d7cad
JOB LOG : $HOME/avocado/job-results/job-2016-01-11T16.14-825b860/job.log
TESTS : 2
(1/2) /bin/true: PASS (0.01 s)
(2/2) /bin/false: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T16.14-825b860/html/results.html
TIME : 0.02 s
Now we can replay the job by running::
$ avocado run --replay 825b86
JOB ID : 55a0d10132c02b8cc87deb2b480bfd8abbd956c3
SRC JOB ID : 825b860b0c2f6ec48953c638432e3e323f8d7cad
JOB LOG : $HOME/avocado/job-results/job-2016-01-11T16.18-55a0d10/job.log
TESTS : 2
(1/2) /bin/true: PASS (0.01 s)
(2/2) /bin/false: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T16.18-55a0d10/html/results.html
TIME : 0.01 s
The replay feature will retrieve the original job urls, the multiplex
tree and the configuration. Let's see another example, now using
multiplex file::
$ avocado run /bin/true /bin/false --multiplex-files mux-environment.yaml
JOB ID : bd6aa3b852d4290637b5e771b371537541043d1d
JOB LOG : $HOME/avocado/job-results/job-2016-01-11T21.56-bd6aa3b/job.log
TESTS : 48
(1/48) /bin/true.variant1: PASS (0.01 s)
(2/48) /bin/true.variant2: PASS (0.01 s)
(3/48) /bin/true.variant3: PASS (0.01 s)
(4/48) /bin/true.variant4: PASS (0.01 s)
(5/48) /bin/true.variant5: PASS (0.01 s)
(6/48) /bin/true.variant6: PASS (0.01 s)
(7/48) /bin/true.variant7: PASS (0.01 s)
(8/48) /bin/true.variant8: PASS (0.01 s)
(9/48) /bin/true.variant9: PASS (0.01 s)
(10/48) /bin/true.variant10: PASS (0.01 s)
(11/48) /bin/true.variant11: PASS (0.01 s)
(12/48) /bin/true.variant12: PASS (0.01 s)
(13/48) /bin/true.variant13: PASS (0.01 s)
(14/48) /bin/true.variant14: PASS (0.01 s)
(15/48) /bin/true.variant15: PASS (0.01 s)
(16/48) /bin/true.variant16: PASS (0.01 s)
(17/48) /bin/true.variant17: PASS (0.01 s)
(18/48) /bin/true.variant18: PASS (0.01 s)
(19/48) /bin/true.variant19: PASS (0.01 s)
(20/48) /bin/true.variant20: PASS (0.01 s)
(21/48) /bin/true.variant21: PASS (0.01 s)
(22/48) /bin/true.variant22: PASS (0.01 s)
(23/48) /bin/true.variant23: PASS (0.01 s)
(24/48) /bin/true.variant24: PASS (0.01 s)
(25/48) /bin/false.variant1: FAIL (0.01 s)
(26/48) /bin/false.variant2: FAIL (0.01 s)
(27/48) /bin/false.variant3: FAIL (0.01 s)
(28/48) /bin/false.variant4: FAIL (0.01 s)
(29/48) /bin/false.variant5: FAIL (0.01 s)
(30/48) /bin/false.variant6: FAIL (0.01 s)
(31/48) /bin/false.variant7: FAIL (0.01 s)
(32/48) /bin/false.variant8: FAIL (0.01 s)
(33/48) /bin/false.variant9: FAIL (0.01 s)
(34/48) /bin/false.variant10: FAIL (0.01 s)
(35/48) /bin/false.variant11: FAIL (0.01 s)
(36/48) /bin/false.variant12: FAIL (0.01 s)
(37/48) /bin/false.variant13: FAIL (0.01 s)
(38/48) /bin/false.variant14: FAIL (0.01 s)
(39/48) /bin/false.variant15: FAIL (0.01 s)
(40/48) /bin/false.variant16: FAIL (0.01 s)
(41/48) /bin/false.variant17: FAIL (0.01 s)
(42/48) /bin/false.variant18: FAIL (0.01 s)
(43/48) /bin/false.variant19: FAIL (0.01 s)
(44/48) /bin/false.variant20: FAIL (0.01 s)
(45/48) /bin/false.variant21: FAIL (0.01 s)
(46/48) /bin/false.variant22: FAIL (0.01 s)
(47/48) /bin/false.variant23: FAIL (0.01 s)
(48/48) /bin/false.variant24: FAIL (0.01 s)
RESULTS : PASS 24 | ERROR 0 | FAIL 24 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T21.56-bd6aa3b/html/results.html
TIME : 0.29 s
We can replay the job as is, using ``$ avocado run --replay bd6aa3b``,
or replay the job ignoring the multiplex file, as below::
$ avocado run --replay bd6aa3b --replay-ignore mux
Ignoring multiplex from source job with --replay-ignore.
JOB ID : d5a46186ee0fb4645e3f7758814003d76c980bf9
SRC JOB ID : bd6aa3b852d4290637b5e771b371537541043d1d
JOB LOG : $HOME/avocado/job-results/job-2016-01-11T22.01-d5a4618/job.log
TESTS : 2
(1/2) /bin/true: PASS (0.01 s)
(2/2) /bin/false: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T22.01-d5a4618/html/results.html
TIME : 0.02 s
Also, it is possible to replay only the variants that faced a given
result, using the option ``--replay-test-status``. Using the same job
``bd6aa3b``, see the example below::
$ avocado run --replay bd6aa3b --replay-test-status FAIL
JOB ID : 2e1dc41af6ed64895f3bb45e3820c5cc62a9b6eb
SRC JOB ID : bd6aa3b852d4290637b5e771b371537541043d1d
JOB LOG : $HOME/avocado/job-results/job-2016-01-12T00.38-2e1dc41/job.log
TESTS : 48
(1/48) /bin/true.variant1: SKIP
(2/48) /bin/true.variant2: SKIP
(3/48) /bin/true.variant3: SKIP
(4/48) /bin/true.variant4: SKIP
(5/48) /bin/true.variant5: SKIP
(6/48) /bin/true.variant6: SKIP
(7/48) /bin/true.variant7: SKIP
(8/48) /bin/true.variant8: SKIP
(9/48) /bin/true.variant9: SKIP
(10/48) /bin/true.variant10: SKIP
(11/48) /bin/true.variant11: SKIP
(12/48) /bin/true.variant12: SKIP
(13/48) /bin/true.variant13: SKIP
(14/48) /bin/true.variant14: SKIP
(15/48) /bin/true.variant15: SKIP
(16/48) /bin/true.variant16: SKIP
(17/48) /bin/true.variant17: SKIP
(18/48) /bin/true.variant18: SKIP
(19/48) /bin/true.variant19: SKIP
(20/48) /bin/true.variant20: SKIP
(21/48) /bin/true.variant21: SKIP
(22/48) /bin/true.variant22: SKIP
(23/48) /bin/true.variant23: SKIP
(24/48) /bin/true.variant24: SKIP
(25/48) /bin/false.variant1: FAIL (0.01 s)
(26/48) /bin/false.variant2: FAIL (0.01 s)
(27/48) /bin/false.variant3: FAIL (0.01 s)
(28/48) /bin/false.variant4: FAIL (0.01 s)
(29/48) /bin/false.variant5: FAIL (0.01 s)
(30/48) /bin/false.variant6: FAIL (0.01 s)
(31/48) /bin/false.variant7: FAIL (0.01 s)
(32/48) /bin/false.variant8: FAIL (0.01 s)
(33/48) /bin/false.variant9: FAIL (0.01 s)
(34/48) /bin/false.variant10: FAIL (0.01 s)
(35/48) /bin/false.variant11: FAIL (0.01 s)
(36/48) /bin/false.variant12: FAIL (0.01 s)
(37/48) /bin/false.variant13: FAIL (0.01 s)
(38/48) /bin/false.variant14: FAIL (0.01 s)
(39/48) /bin/false.variant15: FAIL (0.01 s)
(40/48) /bin/false.variant16: FAIL (0.01 s)
(41/48) /bin/false.variant17: FAIL (0.01 s)
(42/48) /bin/false.variant18: FAIL (0.01 s)
(43/48) /bin/false.variant19: FAIL (0.01 s)
(44/48) /bin/false.variant20: FAIL (0.01 s)
(45/48) /bin/false.variant21: FAIL (0.01 s)
(46/48) /bin/false.variant22: FAIL (0.01 s)
(47/48) /bin/false.variant23: FAIL (0.01 s)
(48/48) /bin/false.variant24: FAIL (0.01 s)
RESULTS : PASS 0 | ERROR 0 | FAIL 24 | SKIP 24 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-01-12T00.38-2e1dc41/html/results.html
TIME : 0.19 s
To be able to replay a job, avocado records the job data in the same
job results directory, inside a subdirectory named ``replay``. If a
given job has a non-default path to record the logs, when the replay
time comes, we need to inform where the logs are. See the example
below::
$ avocado run /bin/true --job-results-dir /tmp/avocado_results/
JOB ID : f1b1c870ad892eac6064a5332f1bbe38cda0aaf3
JOB LOG : /tmp/avocado_results/job-2016-01-11T22.10-f1b1c87/job.log
TESTS : 1
(1/1) /bin/true: PASS (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : /tmp/avocado_results/job-2016-01-11T22.10-f1b1c87/html/results.html
TIME : 0.01 s
Trying to replay the job, it fails::
$ avocado run --replay f1b1
can't find job results directory in '$HOME/avocado/job-results'
In this case, we have to inform where the job results dir is located::
$ avocado run --replay f1b1 --replay-data-dir /tmp/avocado_results
JOB ID : 19c76abb29f29fe410a9a3f4f4b66387570edffa
SRC JOB ID : f1b1c870ad892eac6064a5332f1bbe38cda0aaf3
JOB LOG : $HOME/avocado/job-results/job-2016-01-11T22.15-19c76ab/job.log
TESTS : 1
(1/1) /bin/true: PASS (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0
JOB HTML : $HOME/avocado/job-results/job-2016-01-11T22.15-19c76ab/html/results.html
TIME : 0.01 s
......@@ -14,6 +14,7 @@ Contents:
Configuration
Loaders
MultiplexConfig
Replay
RunningTestsRemotely
DebuggingWithGDB
WrapProcess
......
#!/usr/bin/env python
import glob
import os
import sys
import tempfile
import shutil
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
from avocado.core import exit_codes
from avocado.utils import process
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
class ReplayTests(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
cmd_line = ('./scripts/avocado run passtest --multiplex '
'examples/tests/sleeptest.py.data/sleeptest.yaml '
'--job-results-dir %s --sysinfo=off' %
self.tmpdir)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
self.jobdir = ''.join(glob.glob(os.path.join(self.tmpdir, 'job-*')))
idfile = ''.join(os.path.join(self.jobdir, 'id'))
with open(idfile, 'r') as f:
self.jobid = f.read().strip('\n')
def run_and_check(self, cmd_line, expected_rc):
os.chdir(basedir)
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, expected_rc,
"Command %s did not return rc "
"%d:\n%s" % (cmd_line, expected_rc, result))
return result
def test_run_replay_noid(self):
cmd_line = ('./scripts/avocado run --replay %s'
'--job-results-dir %s --replay-data-dir %s--sysinfo=off' %
('foo', self.tmpdir, self.jobdir))
expected_rc = exit_codes.AVOCADO_JOB_FAIL
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_data(self):
file_list = ['multiplex', 'config', 'urls', 'pwd']
for filename in file_list:
path = os.path.join(self.jobdir, 'replay', filename)
self.assertTrue(glob.glob(path))
def test_run_replay(self):
cmd_line = ('./scripts/avocado run --replay %s '
'--job-results-dir %s --replay-data-dir %s --sysinfo=off'
% (self.jobid, self.tmpdir, self.jobdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_partialid(self):
partial_id = self.jobid[:5]
cmd_line = ('./scripts/avocado run --replay %s '
'--job-results-dir %s --replay-data-dir %s --sysinfo=off'
% (partial_id, self.tmpdir, self.jobdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_invalidignore(self):
cmd_line = ('./scripts/avocado run --replay %s --replay-ignore foo'
'--job-results-dir %s --replay-data-dir %s --sysinfo=off'
% (self.jobid, self.tmpdir, self.jobdir))
expected_rc = exit_codes.AVOCADO_JOB_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Invalid --replay-ignore option. Valid options are ' \
'(more than one allowed): mux,config'
self.assertIn(msg, result.stderr)
def test_run_replay_ignoremux(self):
cmd_line = ('./scripts/avocado run --replay %s --replay-ignore mux '
'--job-results-dir %s --replay-data-dir %s --sysinfo=off'
% (self.jobid, self.tmpdir, self.jobdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Ignoring multiplex from source job with --replay-ignore.'
self.assertIn(msg, result.stdout)
def test_run_replay_invalidstatus(self):
cmd_line = ('./scripts/avocado run --replay %s --replay-test-status E '
'--job-results-dir %s --replay-data-dir %s --sysinfo=off'
% (self.jobid, self.tmpdir, self.jobdir))
expected_rc = exit_codes.AVOCADO_JOB_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Invalid --replay-test-status option. Valid options are (more ' \
'than one allowed): NOSTATUS,INTERRUPTED,WARN,START,ERROR,'\
'FAIL,PASS,TEST_NA,ALERT,RUNNING,ABORT'
self.assertIn(msg, result.stderr)
def test_run_replay_statusfail(self):
cmd_line = ('./scripts/avocado run --replay %s --replay-test-status '
'FAIL --job-results-dir %s --replay-data-dir %s '
'--sysinfo=off' % (self.jobid, self.tmpdir, self.jobdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = '(1/4) passtest.py:PassTest.test.variant1: SKIP\n ' \
'(2/4) passtest.py:PassTest.test.variant2: SKIP\n ' \
'(3/4) passtest.py:PassTest.test.variant3: SKIP\n ' \
'(4/4) passtest.py:PassTest.test.variant4: SKIP'
self.assertIn(msg, result.stdout)
def test_run_replay_remotefail(self):
cmd_line = ('./scripts/avocado run --replay %s --remote-hostname '
'localhost ' '--job-results-dir %s --replay-data-dir %s '
'--sysinfo=off' % (self.jobid, self.tmpdir, self.jobdir))
expected_rc = exit_codes.AVOCADO_JOB_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = "Currently we don't replay jobs in remote hosts."
self.assertIn(msg, result.stderr)
def tearDown(self):
shutil.rmtree(self.tmpdir)
if __name__ == '__main__':
unittest.main()
......@@ -133,6 +133,7 @@ if __name__ == '__main__':
'journal = avocado.plugins.journal:Journal',
'html = avocado.plugins.html:HTML',
'remote = avocado.plugins.remote:Remote',
'replay = avocado.plugins.replay:Replay',
'vm = avocado.plugins.vm:VM',
],
'avocado.plugins.cli.cmd': [
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册