提交 f364a1ab 编写于 作者: C Cleber Rosa

Inner Runner implementation

In some external test suites, what is defined as a "test" can not be
run by itself and need a specific runner script/tool.

This introduces the concept of an "inner runner", that is, a custom
test runner or tool that can deal with custom tests that do follow
the Avocado SIMPLE test definition.

This implements the Trello card:

 https://trello.com/c/TpXecE2n/486-introduce-inner-runner-to-avocado-run

More information can be found in the man page section about it.

Changes from v1:
 * removed "level" from "inner level runner" mentions
 * replicated docs from man page to "main" docs
Signed-off-by: NCleber Rosa <crosa@redhat.com>
上级 9214b78d
...@@ -294,6 +294,9 @@ class FileLoader(TestLoader): ...@@ -294,6 +294,9 @@ class FileLoader(TestLoader):
:param list_tests: list corrupted/invalid tests too :param list_tests: list corrupted/invalid tests too
:return: list of matching tests :return: list of matching tests
""" """
if test.INNER_RUNNER is not None:
return self._make_tests(url, [], [])
if url is None: if url is None:
if list_tests is DEFAULT: if list_tests is DEFAULT:
return [] # Return empty set when not listing details return [] # Return empty set when not listing details
...@@ -461,6 +464,9 @@ class FileLoader(TestLoader): ...@@ -461,6 +464,9 @@ class FileLoader(TestLoader):
""" Always return empty list """ """ Always return empty list """
return [] return []
if test.INNER_RUNNER is not None:
return self._make_test(test.SimpleTest, test_path)
if list_non_tests: # return broken test with params if list_non_tests: # return broken test with params
make_broken = self._make_test make_broken = self._make_test
else: # return empty set instead else: # return empty set instead
......
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2015
# Author: Cleber Rosa <cleber@redhat.com>
"""Allows the use of an intermediary inner test runner."""
import os
import sys
import shlex
from . import plugin
from .. import test
from .. import output
from .. import exit_codes
class InnerRunner(plugin.Plugin):
"""
Allows the use of an intermediary inner test runner
"""
name = 'inner_runner'
enabled = True
def configure(self, parser):
inner_grp = parser.runner.add_argument_group('inner test runner support')
inner_grp.add_argument('--inner-runner', default=None,
metavar='EXECUTABLE',
help=('Path to an specific test runner that '
'allows the use of its own tests. This '
'should be used for running tests that '
'do not conform to Avocado\' SIMPLE test'
'interface and can not run standalone'))
chdir_help = ('Change directory before executing tests. This option '
'may be necessary because of requirements and/or '
'limitations of the inner test runner. If the inner '
'runner requires to be run from its own base directory,'
'use "runner" here. If the inner runner runs tests based'
' on files and requires to be run from the directory '
'where those files are located, use "test" here and '
'specify the test directory with the option '
'"--inner-runner-testdir". Defaults to "%(default)s"')
inner_grp.add_argument('--inner-runner-chdir', default='off',
choices=('runner', 'test', 'off'),
help=chdir_help)
inner_grp.add_argument('--inner-runner-testdir', metavar='DIRECTORY',
default=None,
help=('Where test files understood by the inner'
' test runner are located in the '
'filesystem. Obviously this assumes and '
'only applies to inner test runners that '
'run tests from files'))
self.configured = True
def activate(self, app_args):
self.view = output.View(app_args=app_args)
if hasattr(app_args, 'inner_runner'):
if app_args.inner_runner:
inner_runner_and_args = shlex.split(app_args.inner_runner)
if len(inner_runner_and_args) > 1:
executable = inner_runner_and_args[0]
else:
executable = app_args.inner_runner
if not os.path.exists(executable):
msg = 'Could not find the inner runner executable "%s"' % executable
self.view.notify(event='error', msg=msg)
sys.exit(exit_codes.AVOCADO_FAIL)
test.INNER_RUNNER = app_args.inner_runner
if hasattr(app_args, 'inner_runner_testdir'):
if app_args.inner_runner_testdir:
test.INNER_RUNNER_TESTDIR = app_args.inner_runner_testdir
if hasattr(app_args, 'inner_runner_chdir'):
if app_args.inner_runner_chdir:
if app_args.inner_runner_chdir == 'test':
if app_args.inner_runner_testdir is None:
msg = ('Option "--inner-runner-testdir" is mandatory '
'when "--inner-runner-chdir=test" is used.')
self.view.notify(event='error', msg=msg)
sys.exit(exit_codes.AVOCADO_FAIL)
test.INNER_RUNNER_CHDIR = app_args.inner_runner_chdir
...@@ -42,6 +42,11 @@ from ..utils import process ...@@ -42,6 +42,11 @@ from ..utils import process
from ..utils import stacktrace from ..utils import stacktrace
INNER_RUNNER = None
INNER_RUNNER_TESTDIR = None
INNER_RUNNER_CHDIR = None
class Test(unittest.TestCase): class Test(unittest.TestCase):
""" """
...@@ -549,7 +554,10 @@ class SimpleTest(Test): ...@@ -549,7 +554,10 @@ class SimpleTest(Test):
r' \d\d:\d\d:\d\d WARN \|') r' \d\d:\d\d:\d\d WARN \|')
def __init__(self, name, params=None, base_logdir=None, tag=None, job=None): def __init__(self, name, params=None, base_logdir=None, tag=None, job=None):
self.path = os.path.abspath(name) if INNER_RUNNER is None:
self.path = os.path.abspath(name)
else:
self.path = name
super(SimpleTest, self).__init__(name=name, base_logdir=base_logdir, super(SimpleTest, self).__init__(name=name, base_logdir=base_logdir,
params=params, tag=tag, job=job) params=params, tag=tag, job=job)
basedir = os.path.dirname(self.path) basedir = os.path.dirname(self.path)
...@@ -577,9 +585,36 @@ class SimpleTest(Test): ...@@ -577,9 +585,36 @@ class SimpleTest(Test):
try: try:
test_params = dict([(str(key), str(val)) for key, val in test_params = dict([(str(key), str(val)) for key, val in
self.params.iteritems()]) self.params.iteritems()])
pre_cwd = os.getcwd()
new_cwd = None
if INNER_RUNNER is not None:
self.log.info('Running test with the inner level test '
'runner: "%s"', INNER_RUNNER)
# Change work directory if needed by the inner runner
if INNER_RUNNER_CHDIR == 'runner':
new_cwd = os.path.dirname(INNER_RUNNER)
elif INNER_RUNNER_CHDIR == 'test':
new_cwd = INNER_RUNNER_TESTDIR
else:
new_cwd = None
if new_cwd is not None:
self.log.debug('Changing working directory to "%s" '
'because of inner runner requirements ',
new_cwd)
os.chdir(new_cwd)
command = "%s %s" % (INNER_RUNNER, self.path)
else:
command = pipes.quote(self.path)
# process.run uses shlex.split(), the self.path needs to be escaped # process.run uses shlex.split(), the self.path needs to be escaped
result = process.run(pipes.quote(self.path), verbose=True, result = process.run(command, verbose=True,
env=test_params) env=test_params)
if new_cwd is not None:
os.chdir(pre_cwd)
self._log_detailed_cmd_info(result) self._log_detailed_cmd_info(result)
except process.CmdError, details: except process.CmdError, details:
self._log_detailed_cmd_info(details.result) self._log_detailed_cmd_info(details.result)
......
...@@ -179,6 +179,70 @@ instrumented and simple tests:: ...@@ -179,6 +179,70 @@ instrumented and simple tests::
RESULTS : PASS 2 | ERROR 2 | FAIL 2 | SKIP 0 | WARN 0 | INTERRUPT 0 RESULTS : PASS 2 | ERROR 2 | FAIL 2 | SKIP 0 | WARN 0 | INTERRUPT 0
TIME : 1.04 s TIME : 1.04 s
.. _running-inner-runner:
Running Tests With An Inner Runner
==================================
It's quite common to have organically grown test suites in most
software projects. These usually include a custom built, very specific
test runner that knows how to find and run their own tests.
Still, running those tests inside Avocado may be a good idea for
various reasons, including being able to have results in different
human and machine readable formats, collecting system information
alongside those tests (the Avocado's `sysinfo` functionality), and
more.
Avocado makes that possible by means of its "inner runner" feature. The
most basic way of using it is::
$ avocado run --inner-runner=/path/to/inner_runner foo bar baz
In this example, Avocado will report individual test results for tests
`foo`, `bar` and `baz`. The actual results will be based on the return
code of individual executions of `/path/to/inner_runner foo`,
`/path/to/inner_runner bar` and finally `/path/to/inner_runner baz`.
As another way to explain an show how this feature works, think of the
"inner runner" as some kind of interpreter and the individual tests as
anything that this interpreter recognizes and is able to execute. A
UNIX shell, say `/bin/sh` could be considered an inner runner, and
files with shell code could be considered tests::
$ echo "exit 0" > /tmp/pass
$ echo "exit 1" > /tmp/fail
$ avocado run --inner-runner=/bin/sh /tmp/pass /tmp/fail
JOB ID : 4a2a1d259690cc7b226e33facdde4f628ab30741
JOB LOG : /home/<user>/avocado/job-results/job-<date>-<shortid>/job.log
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
TESTS : 2
(1/2) /tmp/pass: PASS (0.01 s)
(2/2) /tmp/fail: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TIME : 0.01 s
This example is pretty obvious, and could be achieved by giving
`/tmp/pass` and `/tmp/fail` shell "shebangs" (`#!/bin/sh`), making
them executable (`chmod +x /tmp/pass /tmp/fail)`, and running them as
"SIMPLE" tests.
But now consider the following example::
$ avocado run --inner-runner=/bin/curl http://local-avocado-server:9405/jobs/ \
http://remote-avocado-server:9405/jobs/
JOB ID : 56016a1ffffaba02492fdbd5662ac0b958f51e11
JOB LOG : /home/<user>/avocado/job-results/job-<date>-<shortid>/job.log
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
TESTS : 2
(1/2) http://local-avocado-server:9405/jobs/: PASS (0.02 s)
(2/2) http://remote-avocado-server:9405/jobs/: FAIL (3.02 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TIME : 3.04 s
This effectively makes `/bin/curl` an "inner test runner", responsible for
trying to fetch those URLs, and reporting PASS or FAIL for each of them.
Debugging tests Debugging tests
=============== ===============
......
...@@ -452,6 +452,67 @@ the execution of ``perf.sh``. :: ...@@ -452,6 +452,67 @@ the execution of ``perf.sh``. ::
Note that it is not possible to use ``--gdb-run-bin`` together Note that it is not possible to use ``--gdb-run-bin`` together
with ``--wrapper``, they are incompatible. with ``--wrapper``, they are incompatible.
RUNNING TESTS WITH AN INNER RUNNER
==================================
It's quite common to have organically grown test suites in most
software projects. These usually include a custom built, very specific
test runner that knows how to find and run their own tests.
Still, running those tests inside Avocado may be a good idea for
various reasons, including being able to have results in different
human and machine readable formats, collecting system information
alongside those tests (the Avocado's `sysinfo` functionality), and
more.
Avocado makes that possible by means of its "inner runner" feature. The
most basic way of using it is::
$ avocado run --inner-runner=/path/to/inner_runner foo bar baz
In this example, Avocado will report individual test results for tests
`foo`, `bar` and `baz`. The actual results will be based on the return
code of individual executions of `/path/to/inner_runner foo`,
`/path/to/inner_runner bar` and finally `/path/to/inner_runner baz`.
As another way to explain an show how this feature works, think of the
"inner runner" as some kind of interpreter and the individual tests as
anything that this interpreter recognizes and is able to execute. A
UNIX shell, say `/bin/sh` could be considered an inner runner, and
files with shell code could be considered tests::
$ echo "exit 0" > /tmp/pass
$ echo "exit 1" > /tmp/fail
$ avocado run --inner-runner=/bin/sh /tmp/pass /tmp/fail
JOB ID : 4a2a1d259690cc7b226e33facdde4f628ab30741
JOB LOG : /home/<user>/avocado/job-results/job-<date>-<shortid>/job.log
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
TESTS : 2
(1/2) /tmp/pass: PASS (0.01 s)
(2/2) /tmp/fail: FAIL (0.01 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TIME : 0.01 s
This example is pretty obvious, and could be achieved by giving
`/tmp/pass` and `/tmp/fail` shell "shebangs" (`#!/bin/sh`), making
them executable (`chmod +x /tmp/pass /tmp/fail)`, and running them as
"SIMPLE" tests.
But now consider the following example::
$ avocado run --inner-runner=/bin/curl http://local-avocado-server:9405/jobs/ \
http://remote-avocado-server:9405/jobs/
JOB ID : 56016a1ffffaba02492fdbd5662ac0b958f51e11
JOB LOG : /home/<user>/avocado/job-results/job-<date>-<shortid>/job.log
JOB HTML : /home/<user>/avocado/job-results/job-<date>-<shortid>/html/results.html
TESTS : 2
(1/2) http://local-avocado-server:9405/jobs/: PASS (0.02 s)
(2/2) http://remote-avocado-server:9405/jobs/: FAIL (3.02 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0
TIME : 3.04 s
This effectively makes `/bin/curl` an "inner test runner", responsible for
trying to fetch those URLs, and reporting PASS or FAIL for each of them.
RECORDING TEST REFERENCE OUTPUT RECORDING TEST REFERENCE OUTPUT
=============================== ===============================
......
...@@ -24,10 +24,14 @@ PASS_SCRIPT_CONTENTS = """#!/bin/sh ...@@ -24,10 +24,14 @@ PASS_SCRIPT_CONTENTS = """#!/bin/sh
true true
""" """
PASS_SHELL_CONTENTS = "exit 0"
FAIL_SCRIPT_CONTENTS = """#!/bin/sh FAIL_SCRIPT_CONTENTS = """#!/bin/sh
false false
""" """
FAIL_SHELL_CONTENTS = "exit 1"
VOID_PLUGIN_CONTENTS = """#!/usr/bin/env python VOID_PLUGIN_CONTENTS = """#!/usr/bin/env python
from avocado.core.plugins.plugin import Plugin from avocado.core.plugins.plugin import Plugin
class VoidPlugin(Plugin): class VoidPlugin(Plugin):
...@@ -351,6 +355,60 @@ class RunnerSimpleTest(unittest.TestCase): ...@@ -351,6 +355,60 @@ class RunnerSimpleTest(unittest.TestCase):
shutil.rmtree(self.tmpdir) shutil.rmtree(self.tmpdir)
class InnerRunnerTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.pass_script = script.TemporaryScript(
'pass',
PASS_SHELL_CONTENTS,
'avocado_innerrunner_functional')
self.pass_script.save()
self.fail_script = script.TemporaryScript(
'fail',
FAIL_SHELL_CONTENTS,
'avocado_innerrunner_functional')
self.fail_script.save()
def test_innerrunner_pass(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --inner-runner=/bin/sh %s'
cmd_line %= (self.tmpdir, self.pass_script.path)
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
def test_innerrunner_fail(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --job-results-dir %s --sysinfo=off --inner-runner=/bin/sh %s'
cmd_line %= (self.tmpdir, self.fail_script.path)
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
def test_innerrunner_chdir_no_testdir(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off --inner-runner=/bin/sh '
'--inner-runner-chdir=test %s')
cmd_line %= (self.tmpdir, self.pass_script.path)
result = process.run(cmd_line, ignore_status=True)
expected_output = 'Option "--inner-runner-testdir" is mandatory'
self.assertIn(expected_output, result.stderr)
expected_rc = 3
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
def tearDown(self):
self.pass_script.remove()
self.fail_script.remove()
shutil.rmtree(self.tmpdir)
class ExternalPluginsTest(unittest.TestCase): class ExternalPluginsTest(unittest.TestCase):
def setUp(self): def setUp(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册