core.job: introduce the job.test_suites property

This property will hold a list of TestSuite objects. I'm adding a
test_suite property here for backward compatibility. But it will be
removed soon.
Signed-off-by: NBeraldo Leal <bleal@redhat.com>
上级 1e67305b
......@@ -17,6 +17,7 @@
Job module - describes a sequence of automated test operations.
"""
import logging
import os
import pprint
......@@ -26,6 +27,8 @@ import sys
import tempfile
import time
import traceback
import warnings
from copy import deepcopy
from ..utils import astring
from ..utils.data_structures import CallbackRegister, time_to_seconds
......@@ -77,13 +80,17 @@ class Job:
along with setup operations and event recording.
"""
def __init__(self, config=None):
def __init__(self, config=None, test_suites=None):
"""
Creates an instance of Job class.
:param config: the job configuration, usually set by command
line options and argument parsing
:type config: dict
:param test_suites: A list with TestSuite objects. If is None the job
will have an empty list and you can add suites
after init accessing job.test_suites.
:type test_suites: list
"""
self.config = settings.as_dict()
if config:
......@@ -97,6 +104,8 @@ class Job:
self.config['run.unique_job_id'] = '0' * 40
self.config['sysinfo.collect.enabled'] = 'off'
self.test_suites = test_suites or []
#: The log directory for this job, also known as the job results
#: directory. If it's set to None, it means that the job results
#: directory has not yet been created.
......@@ -125,11 +134,6 @@ class Job:
self._stdout_stderr = None
self.replay_sourcejob = self.config.get('replay_sourcejob')
self.exitcode = exit_codes.AVOCADO_ALL_OK
#: The list of discovered/resolved tests that will be attempted to
#: be run by this job. If set to None, it means that test resolution
#: has not been attempted. If set to an empty list, it means that no
#: test was found during resolution.
self.test_suite = None
# The result events dispatcher is shared with the test runner.
# Because of our goal to support using the phases of a job
......@@ -367,6 +371,12 @@ class Job:
if os.path.exists(proc_latest):
os.unlink(proc_latest)
@classmethod
def from_config(cls, job_config, suites_configs=None):
suites_configs = suites_configs or [deepcopy(job_config)]
suites = [TestSuite.from_config(config) for config in suites_configs]
return cls(job_config, suites)
@property
def test_parameters(self):
"""Placeholder for test parameters.
......@@ -381,6 +391,23 @@ class Job:
[])}
return self._test_parameters
@property
def test_suite(self):
"""This is the first test suite of this job (deprecated).
Please, use test_suites instead.
"""
if self.test_suites:
return self.test_suites[0]
@test_suite.setter
def test_suite(self, var):
"""Temporary setter. Suites should be setter from test_suites."""
if self.test_suites:
self.test_suites[0] = var
else:
self.test_suites = [var]
@property
def timeout(self):
if self._timeout is None:
......@@ -421,14 +448,15 @@ class Job:
def create_test_suite(self):
try:
self.test_suite = TestSuite.from_config(self.config)
if self.test_suite.size == 0:
if self.test_suite and self.test_suite.size == 0:
refs = self.test_suite.references
msg = ("No tests found for given test references, try "
"'avocado list -V %s' for details") % " ".join(refs)
raise exceptions.JobTestSuiteEmptyError(msg)
except TestSuiteError as details:
raise exceptions.JobBaseException(details)
self.result.tests_total = self.test_suite.size
if self.test_suite:
self.result.tests_total = self.test_suite.size
def post_tests(self):
"""
......@@ -517,18 +545,19 @@ class Job:
self.test_suite.variants,
sys.argv)
# This is "almost ready" for a loop
summary = self.test_suite.run(self)
if not self.test_suites:
self.exitcode |= exit_codes.AVOCADO_JOB_FAIL
return self.exitcode
summary = set()
for suite in self.test_suites:
summary |= suite.run(self)
# If it's all good so far, set job status to 'PASS'
if self.status == 'RUNNING':
self.status = 'PASS'
LOG_JOB.info('Test results available in %s', self.logdir)
if summary is None:
self.exitcode |= exit_codes.AVOCADO_JOB_FAIL
return self.exitcode
if 'INTERRUPTED' in summary:
self.exitcode |= exit_codes.AVOCADO_JOB_INTERRUPTED
if 'FAIL' in summary:
......
......@@ -25,6 +25,7 @@ from avocado.core.dispatcher import JobPrePostDispatcher
from avocado.core.future.settings import settings
from avocado.core.output import LOG_UI
from avocado.core.plugin_interfaces import CLICmd, Init
from avocado.core.suite import TestSuite, TestSuiteError
from avocado.utils import process
......@@ -299,7 +300,14 @@ class Run(CLICmd):
LOG_UI.error('Unique Job ID needs to be a 40 digit hex number')
sys.exit(exit_codes.AVOCADO_FAIL)
with job.Job(config) as job_instance:
try:
suite = TestSuite.from_config(config, name='suite01')
if suite.size == 0:
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
except TestSuiteError as err:
LOG_UI.error(err)
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
with job.Job(config, [suite]) as job_instance:
pre_post_dispatcher = JobPrePostDispatcher()
try:
# Run JobPre plugins
......
......@@ -3,6 +3,7 @@
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
config = {
'run.test_runner': 'nrunner',
......@@ -14,5 +15,6 @@ config = {
],
}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
......@@ -3,8 +3,10 @@
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
config = {'run.references': ['examples/tests/passtest.py:PassTest.test']}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
......@@ -3,9 +3,11 @@
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
config = {'run.references': ['examples/tests/passtest.py:PassTest.test'],
'cit_parameter_file': 'examples/varianter_cit/test_params.cit'}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
......@@ -3,10 +3,12 @@
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
config = {'run.references': ['examples/tests/passtest.py:PassTest.test'],
'job.run.result.html.enabled': 'on',
'run.open_browser': True}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
......@@ -3,6 +3,7 @@
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
config = {'run.references': ['examples/tests/sleeptest.py:SleepTest.test'],
'run.dict_variants': [
......@@ -10,5 +11,6 @@ config = {'run.references': ['examples/tests/sleeptest.py:SleepTest.test'],
{'sleep_length': "1.0"}
]}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
......@@ -3,9 +3,11 @@
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
config = {'run.references': ['examples/tests/sleeptest.py:SleepTest.test'],
'json.variants.load': 'examples/tests/sleeptest.py.data/sleeptest.json'}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
......@@ -21,8 +21,9 @@ class Test(TestCaseTmpDir):
def test_job_run_result_json_enabled(self):
self.base_config['job.run.result.json.enabled'] = 'on'
with Job(self.base_config) as j:
result = j.run()
j = Job.from_config(self.base_config)
j.setup()
result = j.run()
self.assertEqual(result, exit_codes.AVOCADO_ALL_OK)
json_results_path = os.path.join(self.tmpdir.name, 'latest', 'results.json')
self.assertTrue(os.path.exists(json_results_path))
......@@ -30,8 +31,9 @@ class Test(TestCaseTmpDir):
def test_job_run_result_json_output(self):
json_results_path = os.path.join(self.tmpdir.name, 'myresults.json')
self.base_config['job.run.result.json.output'] = json_results_path
with Job(self.base_config) as j:
result = j.run()
j = Job.from_config(self.base_config)
j.setup()
result = j.run()
self.assertEqual(result, exit_codes.AVOCADO_ALL_OK)
self.assertTrue(os.path.exists(json_results_path))
......
......@@ -128,32 +128,37 @@ import sys
from avocado import Test
from avocado.core.job import Job
from avocado.core.suite import TestSuite
class PassTest(Test):
def test1(self):
config = {'core.show': ['none'],
'run.references': ['/bin/true']}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
j.run()
def test2(self):
config = {'core.show': ['app'],
'run.references': ['/bin/true']}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
j.run()
def test3(self):
config = {'core.show': ['none'],
'run.references': ['/bin/true']}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
j.run()
if __name__ == '__main__':
config = {'run.references': [__file__],
'core.show': ['app']}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
"""
......
......@@ -14,5 +14,6 @@ config = {'run.references': ['selftests/functional/test_nrunner_interface.py'],
{'runner': 'avocado-runner-tap'},
]}
with Job(config) as j:
sys.exit(j.run())
job = Job.from_config(config)
job.setup()
sys.exit(job.run())
......@@ -6,6 +6,7 @@ import os
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
config = {
'run.references': [
......@@ -14,5 +15,6 @@ config = {
]
}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
......@@ -126,10 +126,9 @@ class JobTest(unittest.TestCase):
'run.results_dir': self.tmpdir.name,
'run.store_logging_stream': [],
'run.references': simple_tests_found}
self.job = job.Job(config)
self.job = job.Job.from_config(config)
self.job.setup()
self.job.create_test_suite()
self.assertEqual(len(simple_tests_found), len(self.job.test_suite))
self.assertEqual(len(simple_tests_found), len(self.job.test_suites[0]))
def test_job_pre_tests(self):
class JobFilterTime(job.Job):
......@@ -151,9 +150,8 @@ class JobTest(unittest.TestCase):
'run.results_dir': self.tmpdir.name,
'run.store_logging_stream': [],
'run.references': simple_tests_found}
self.job = JobFilterTime(config)
self.job = JobFilterTime.from_config(config)
self.job.setup()
self.job.create_test_suite()
try:
self.job.pre_tests()
finally:
......@@ -166,9 +164,8 @@ class JobTest(unittest.TestCase):
'run.results_dir': self.tmpdir.name,
'run.store_logging_stream': [],
'run.references': simple_tests_found}
self.job = job.Job(config)
self.job = job.Job.from_config(config)
self.job.setup()
self.job.create_test_suite()
self.assertEqual(self.job.run_tests(),
exit_codes.AVOCADO_ALL_OK)
......@@ -198,18 +195,19 @@ class JobTest(unittest.TestCase):
def test_job_run(self):
class JobFilterLog(job.Job):
def pre_tests(self):
filtered_test_suite = []
for test_factory in self.test_suite.tests:
if self.config.get('run.test_runner') == 'runner':
if test_factory[0] is test.SimpleTest:
if not test_factory[1].get('name', '').endswith('time'):
for suite in self.test_suites:
filtered_test_suite = []
for test_factory in suite.tests:
if self.config.get('run.test_runner') == 'runner':
if test_factory[0] is test.SimpleTest:
if not test_factory[1].get('name', '').endswith('time'):
filtered_test_suite.append(test_factory)
elif self.config.get('run.test_runner') == 'nrunner':
task = test_factory
if not task.runnable.url.endswith('time'):
filtered_test_suite.append(test_factory)
elif self.config.get('run.test_runner') == 'nrunner':
task = test_factory
if not task.runnable.url.endswith('time'):
filtered_test_suite.append(test_factory)
self.test_suite.tests = filtered_test_suite
super(JobFilterLog, self).pre_tests()
suite.tests = filtered_test_suite
super(JobFilterLog, self).pre_tests()
def post_tests(self):
with open(os.path.join(self.logdir, "reversed_id"), "w") as f:
......@@ -220,11 +218,11 @@ class JobTest(unittest.TestCase):
'run.results_dir': self.tmpdir.name,
'run.store_logging_stream': [],
'run.references': simple_tests_found}
self.job = JobFilterLog(config)
self.job = JobFilterLog.from_config(config)
self.job.setup()
self.assertEqual(self.job.run(),
exit_codes.AVOCADO_ALL_OK)
self.assertLessEqual(len(self.job.test_suite), 1)
self.assertLessEqual(len(self.job.test_suites), 1)
with open(os.path.join(self.job.logdir, "reversed_id")) as reverse_id_file:
self.assertEqual(self.job.unique_id[::-1],
reverse_id_file.read())
......@@ -262,6 +260,15 @@ class JobTest(unittest.TestCase):
self.assertEqual(self.job.time_end, 20.0)
self.assertEqual(self.job.time_elapsed, 100.0)
def test_job_suites_config(self):
config = {'run.results_dir': self.tmpdir.name,
'core.show': ['none'],
'run.references': ['/bin/true']}
suite_config = {'run.references': ['/bin/false']}
self.job = job.Job.from_config(config, [suite_config])
self.assertEqual(self.job.config.get('run.references'), ['/bin/true'])
def test_job_dryrun_no_unique_job_id(self):
config = {'run.results_dir': self.tmpdir.name,
'run.store_logging_stream': [],
......@@ -299,9 +306,8 @@ class JobTest(unittest.TestCase):
'run.references': simple_tests_found,
'run.test_runner': 'nrunner',
'core.show': ['none']}
self.job = job.Job(config)
self.job = job.Job.from_config(config)
self.job.setup()
self.job.create_test_suite()
self.assertEqual(len(simple_tests_found), len(self.job.test_suite))
if self.job.test_suite:
self.assertIsInstance(self.job.test_suite.tests[0], nrunner.Task)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册