未验证 提交 856c3b57 编写于 作者: B Beraldo Leal

core: introduce the TestSuite() class

This is an attempt to organize and prepare the code to support multiple
suites in a single Job. Each TestSuite will have its own configuration.
Signed-off-by: NBeraldo Leal <bleal@redhat.com>
上级 159b8c9f
......@@ -27,16 +27,14 @@ import tempfile
import time
import traceback
from ..utils import astring, path, process, stacktrace
from ..utils import astring, path, process
from ..utils.data_structures import CallbackRegister, time_to_seconds
from . import (data_dir, dispatcher, exceptions, exit_codes, jobdata,
loader, output, result, tags, varianter, version)
output, result, varianter, version)
from .job_id import create_unique_job_id
from .future.settings import settings
from .output import LOG_JOB, LOG_UI, STD_OUTPUT
from .test import DryRunTest
from .utils import resolutions_to_tasks
from .resolver import resolve
from .suite import TestSuite, TestSuiteError
_NEW_ISSUE_LINK = 'https://github.com/avocado-framework/avocado/issues/new'
......@@ -340,7 +338,7 @@ class Job:
"""
def soft_abort(msg):
""" Only log the problem """
LOG_JOB.warning("Unable to update the latest link: {}", msg)
LOG_JOB.warning("Unable to update the latest link: %s", msg)
basedir = os.path.dirname(self.logdir)
basename = os.path.basename(self.logdir)
proc_latest = os.path.join(basedir, "latest.%s" % os.getpid())
......@@ -366,49 +364,6 @@ class Job:
if os.path.exists(proc_latest):
os.unlink(proc_latest)
def _make_test_suite(self, references, ignore_missing):
"""
Prepares a test suite to be used for running tests
:param references: List of tests references to be resolved and
transformed into test factories
:type references: list of str
:returns: a test suite (a list of test factories)
"""
if self._test_runner_name == 'nrunner':
make_test_suite = self._make_test_suite_resolver
else:
make_test_suite = self._make_test_suite_loader
try:
self.test_suite = make_test_suite(references, ignore_missing)
except loader.LoaderError as details:
stacktrace.log_exc_info(sys.exc_info(), LOG_UI.getChild("debug"))
raise exceptions.JobTestSuiteError(details)
def _make_test_suite_loader(self, references, ignore_missing):
loader.loader.load_plugins(self.config)
try:
suite = loader.loader.discover(references, force=ignore_missing)
filter_tags = self.config.get("filter.by_tags.tags")
if filter_tags:
suite = tags.filter_test_tags(
suite,
filter_tags,
self.config.get("filter.by_tags.include_empty"),
self.config.get('filter.by_tags.include_empty_key'))
except loader.LoaderUnhandledReferenceError as details:
raise exceptions.JobTestSuiteError(details)
if not self.config.get('run.dry_run.enabled'):
return suite
for i in range(len(suite)):
suite[i] = [DryRunTest, suite[i][1]]
return suite
def _make_test_suite_resolver(self, references, ignore_missing):
resolutions = resolve(references, ignore_missing)
return resolutions_to_tasks(resolutions, self.config)
def _log_job_id(self):
LOG_JOB.info('Job ID: %s', self.unique_id)
if self.replay_sourcejob is not None:
......@@ -495,25 +450,16 @@ class Job:
self._log_job_id()
def create_test_suite(self):
"""
Creates the test suite for this Job
This is a public Job API as part of the documented Job phases
"""
refs = self.config.get('run.references')
ignore_missing = self.config.get('run.ignore_missing_references')
self._make_test_suite(refs, ignore_missing)
if not self.test_suite:
if refs:
e_msg = ("No tests found for given test references, try "
"'avocado list -V %s' for details") % " ".join(refs)
else:
e_msg = ("No test references provided nor any other arguments "
"resolved into tests. Please double check the "
"executed command.")
raise exceptions.JobTestSuiteEmptyError(e_msg)
self.result.tests_total = len(self.test_suite)
try:
self.test_suite = TestSuite.from_config(self.config)
if self.test_suite.size == 0:
refs = self.test_suite.references
msg = ("No tests found for given test references, try "
"'avocado list -V %s' for details") % " ".join(refs)
raise exceptions.JobTestSuiteEmptyError(msg)
except TestSuiteError as details:
raise exceptions.JobBaseException(details)
self.result.tests_total = self.test_suite.size
def pre_tests(self):
"""
......@@ -548,7 +494,7 @@ class Job:
jobdata.record(self.config, self.logdir, variant, sys.argv)
summary = self.test_runner.run_suite(self,
self.result,
self.test_suite,
self.test_suite.tests,
variant)
# If it's all good so far, set job status to 'PASS'
if self.status == 'RUNNING':
......
from enum import Enum
from uuid import uuid1
from .exceptions import OptionValidationError
from .future.settings import settings
from .loader import loader, LoaderError, LoaderUnhandledReferenceError
from .resolver import resolve
from .tags import filter_test_tags
from .test import DryRunTest
from .utils import resolutions_to_tasks
from .varianter import Varianter
class TestSuiteError(Exception):
pass
class TestSuiteStatus(Enum):
RESOLUTION_NOT_STARTED = object()
TESTS_NOT_FOUND = object()
TESTS_FOUND = object()
UNKNOWN = object()
class TestSuite:
def __init__(self, name, config, tests=None):
self.name = name
self.tests = tests
# Create a complete config dict with all registered options + custom
# config
self.config = settings.as_dict()
if config:
self.config.update(config)
self._variant = None
self._references = None
if (config.get('run.dry_run.enabled') and
self.config.get('run.test_runner') == 'runner'):
self._convert_to_dry_run()
def __len__(self):
"""This is a convenient method to run `len()` over this object.
With this you can run: len(a_suite) and will return the same as
`len(a_suite.tests)`.
"""
return self.size
def _convert_to_dry_run(self):
for i in range(self.size):
self.tests[i] = [DryRunTest, self.tests[i][1]]
@classmethod
def _from_config_with_loader(cls, config, name=None):
references = config.get('run.references')
ignore_missing = config.get('run.ignore_missing_references')
try:
loader.load_plugins(config)
tests = loader.discover(references, force=ignore_missing)
if config.get("filter.by_tags.tags"):
tests = filter_test_tags(
tests,
config.get("filter.by_tags.tags"),
config.get("filter.by_tags.include_empty"),
config.get('filter.by_tags.include_empty_key'))
except (LoaderUnhandledReferenceError, LoaderError) as details:
raise TestSuiteError(details)
return cls(name=name or str(uuid1),
config=config,
tests=tests)
@classmethod
def _from_config_with_resolver(cls, config, name=None):
ignore_missing = config.get('run.ignore_missing_references')
references = config.get('run.references')
resolutions = resolve(references, ignore_missing=ignore_missing)
if not resolutions:
msg = ("Test Suite could not be created. References failed to "
"resolve into resolutions.")
raise TestSuiteError(msg)
tasks = resolutions_to_tasks(resolutions, config)
return cls(name=name or str(uuid1),
config=config,
tests=tasks)
def _parse_variant(self):
# Varianter not yet parsed, apply configs
if not self.variant.is_parsed():
try:
self.variant.parse(self.config)
except (IOError, ValueError) as details:
raise OptionValidationError("Unable to parse "
"variant: %s" % details)
@property
def references(self):
if self._references is None:
self._references = self.config.get('run.references')
return self._references
@property
def size(self):
"""The overall length/size of this test suite."""
if self.tests is None:
return 0
return len(self.tests)
@property
def status(self):
if self.tests is None:
return TestSuiteStatus.RESOLUTION_NOT_STARTED
elif self.size == 0:
return TestSuiteStatus.TESTS_NOT_FOUND
elif self.size > 0:
return TestSuiteStatus.TESTS_FOUND
else:
return TestSuiteStatus.UNKNOWN
@property
def variant(self):
if self._variant is None:
self._variant = self.config.get("avocado_variants") or Varianter()
self._parse_variant()
return self._variant
@classmethod
def from_config(cls, config, name=None):
references = config.get('run.references')
runner = config.get('run.test_runner') or 'runner'
if not references:
msg = ("Test Suite could not be create. No test references "
"provided nor any other arguments resolved into tests")
raise TestSuiteError(msg)
if runner == 'nrunner':
return cls._from_config_with_resolver(config, name)
else:
return cls._from_config_with_loader(config, name)
......@@ -261,7 +261,7 @@ class FetchAssetJob(JobPreTests): # pylint: disable=R0903
logger = job.log
else:
logger = None
for test in job.test_suite:
for test in job.test_suite.tests:
# ignore nrunner/resolver based test suites that contain
# task, because the requirements resolution planned is
# completely different from the traditional job runner
......
......@@ -4,11 +4,12 @@ import tempfile
import unittest.mock
from avocado.core import data_dir
from avocado.core import exceptions
from avocado.core import exit_codes
from avocado.core import job
from avocado.core import nrunner
from avocado.core import test
from avocado.core.exceptions import JobBaseException
from avocado.core.suite import TestSuite, TestSuiteStatus
from avocado.utils import path as utils_path
from .. import setup_avocado_loggers, temp_dir_prefix
......@@ -101,14 +102,29 @@ class JobTest(unittest.TestCase):
self.job = job.Job(config)
self.assertIsNone(self.job.test_suite)
def test_suite_not_started(self):
suite = TestSuite('empty-suite', {'run.test_runner': 'nrunner'})
self.assertEqual(suite.status, TestSuiteStatus.RESOLUTION_NOT_STARTED)
def test_suite_tests_found(self):
suite = TestSuite.from_config({'run.references': ['/bin/true'],
'run.test_runner': 'nrunner'})
self.assertEqual(suite.status, TestSuiteStatus.TESTS_FOUND)
def test_suite_tests_not_found(self):
suite = TestSuite.from_config({'run.references': ['/bin/not-found'],
'run.test_runner': 'nrunner',
'run.ignore_missing_references': True})
self.assertEqual(suite.status, TestSuiteStatus.TESTS_NOT_FOUND)
def test_job_create_test_suite_empty(self):
config = {'run.results_dir': self.tmpdir.name,
'run.store_logging_stream': [],
'core.show': ['none']}
self.job = job.Job(config)
self.job.setup()
self.assertRaises(exceptions.JobTestSuiteEmptyError,
self.job.create_test_suite)
with self.assertRaises(JobBaseException):
self.job.create_test_suite()
def test_job_create_test_suite_simple(self):
simple_tests_found = self._find_simple_test_candidates()
......@@ -125,7 +141,7 @@ class JobTest(unittest.TestCase):
class JobFilterTime(job.Job):
def pre_tests(self):
filtered_test_suite = []
for test_factory in self.test_suite:
for test_factory in self.test_suite.tests:
if self.config.get('run.test_runner') == 'runner':
if test_factory[0] is test.SimpleTest:
if not test_factory[1].get('name', '').endswith('time'):
......@@ -134,7 +150,7 @@ class JobTest(unittest.TestCase):
task = test_factory
if not task.runnable.url.endswith('time'):
filtered_test_suite.append(test_factory)
self.test_suite = filtered_test_suite
self.test_suite.tests = filtered_test_suite
super(JobFilterTime, self).pre_tests()
simple_tests_found = self._find_simple_test_candidates()
config = {'core.show': ['none'],
......@@ -189,7 +205,7 @@ class JobTest(unittest.TestCase):
class JobFilterLog(job.Job):
def pre_tests(self):
filtered_test_suite = []
for test_factory in self.test_suite:
for test_factory in self.test_suite.tests:
if self.config.get('run.test_runner') == 'runner':
if test_factory[0] is test.SimpleTest:
if not test_factory[1].get('name', '').endswith('time'):
......@@ -198,7 +214,7 @@ class JobTest(unittest.TestCase):
task = test_factory
if not task.runnable.url.endswith('time'):
filtered_test_suite.append(test_factory)
self.test_suite = filtered_test_suite
self.test_suite.tests = filtered_test_suite
super(JobFilterLog, self).pre_tests()
def post_tests(self):
......@@ -294,7 +310,7 @@ class JobTest(unittest.TestCase):
self.job.create_test_suite()
self.assertEqual(len(simple_tests_found), len(self.job.test_suite))
if self.job.test_suite:
self.assertIsInstance(self.job.test_suite[0], nrunner.Task)
self.assertIsInstance(self.job.test_suite.tests[0], nrunner.Task)
def tearDown(self):
data_dir._tmp_tracker.unittest_refresh_dir_tracker()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册