提交 dc562b80 编写于 作者: C Cleber Rosa

Merge remote-tracking branch 'ruda/disable_sysinfo_option_V3'

......@@ -29,6 +29,7 @@ from avocado import test
from avocado import runner
from avocado import loader
from avocado import runtime
from avocado import sysinfo
from avocado.core import data_dir
from avocado.core import exit_codes
from avocado.core import exceptions
......@@ -37,6 +38,7 @@ from avocado.core import output
from avocado.plugins import jsonresult
from avocado.plugins import xunit
from avocado.utils import archive
from avocado.utils import path
try:
......@@ -104,6 +106,11 @@ class Job(object):
self.status = "RUNNING"
self.result_proxy = result.TestResultProxy()
self.view = output.View(app_args=self.args)
self.sysinfo = None
if hasattr(self.args, 'sysinfo'):
if self.args.sysinfo == 'on':
sysinfo_dir = path.init_dir(self.logdir, 'sysinfo')
self.sysinfo = sysinfo.SysInfo(basedir=sysinfo_dir)
def _make_test_loader(self):
if hasattr(self.args, 'test_loader'):
......
......@@ -86,6 +86,8 @@ class ReportModel(object):
sysinfo_contents = sysinfo_file.read()
except OSError, details:
sysinfo_contents = "Error reading %s: %s" % (sysinfo_path, details)
except IOError, details:
sysinfo_contents = os.uname()[1]
return sysinfo_contents
def hostname(self):
......@@ -122,10 +124,13 @@ class ReportModel(object):
return test_info
def sysinfo(self):
sysinfo_list = []
base_path = os.path.join(self._results_dir(relative_links=False), 'sysinfo', 'pre')
sysinfo_files = os.listdir(base_path)
try:
sysinfo_files = os.listdir(base_path)
except OSError:
return sysinfo_list
sysinfo_files.sort()
sysinfo_list = []
s_id = 1
for s_f in sysinfo_files:
sysinfo_dict = {}
......
......@@ -18,6 +18,7 @@ Base Test Runner Plugins.
import sys
from avocado.settings import settings
from avocado.core import exit_codes
from avocado.plugins import plugin
from avocado.core import output
......@@ -60,6 +61,16 @@ class TestRunner(plugin.Plugin):
'server. You should not use this option '
'unless you know exactly what you\'re doing'))
sysinfo_default = settings.get_value('sysinfo.collect',
'enabled',
key_type='bool',
default=True)
sysinfo_default = 'on' if sysinfo_default is True else 'off'
self.parser.add_argument('--sysinfo', choices=('on', 'off'), default=sysinfo_default,
help=('Enable or disable system information '
'(hardware details, profilers, etc.). '
'Current: %(default)s'))
out = self.parser.add_argument_group('output related arguments')
out.add_argument('-s', '--silent', action='store_true', default=False,
......
......@@ -26,7 +26,6 @@ import sys
import time
from avocado import runtime
from avocado import sysinfo
from avocado.core import exceptions
from avocado.core import output
from avocado.core import status
......@@ -52,8 +51,6 @@ class TestRunner(object):
"""
self.job = job
self.result = test_result
sysinfo_dir = path.init_dir(self.job.logdir, 'sysinfo')
self.sysinfo = sysinfo.SysInfo(basedir=sysinfo_dir)
def run_test(self, test_factory, queue):
"""
......@@ -122,7 +119,8 @@ class TestRunner(object):
:return: a list of test failures.
"""
failures = []
self.sysinfo.start_job_hook()
if self.job.sysinfo is not None:
self.job.sysinfo.start_job_hook()
self.result.start_tests()
q = queues.SimpleQueue()
test_suite = self.job.test_loader.discover(params_list, q)
......@@ -233,5 +231,6 @@ class TestRunner(object):
failures.append(test_state['name'])
runtime.CURRENT_TEST = None
self.result.end_tests()
self.sysinfo.end_job_hook()
if self.job.sysinfo is not None:
self.job.sysinfo.end_job_hook()
return failures
......@@ -5,4 +5,5 @@ data_dir = /usr/share/avocado/data
logs_dir = ~/avocado/job-results
[sysinfo.collect]
enabled = True
installed_packages = False
......@@ -141,6 +141,11 @@ stdout. Even if you specify things like --show-job-log in the CLI, --silent
will have precedence and you will not get application stdout. Note that --silent
does not affect on disk job logs, those continue to be generated normally.
SILENCING SYSINFO REPORT
========================
You may specify --sysinfo=off and avocado will not collect profilers,
hardware details and other system information, inside the job result directory.
LISTING TESTS
=============
......
......@@ -52,27 +52,27 @@ class RunnerOperationTest(unittest.TestCase):
def test_runner_all_ok(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run passtest passtest'
cmd_line = './scripts/avocado run --sysinfo=off passtest passtest'
process.run(cmd_line)
def test_datadir_alias(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run datadir'
cmd_line = './scripts/avocado run --sysinfo=off datadir'
process.run(cmd_line)
def test_datadir_noalias(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run examples/tests/datadir.py examples/tests/datadir.py'
cmd_line = './scripts/avocado run --sysinfo=off examples/tests/datadir.py examples/tests/datadir.py'
process.run(cmd_line)
def test_runner_noalias(self):
os.chdir(basedir)
cmd_line = "./scripts/avocado run examples/tests/passtest.py examples/tests/passtest.py"
cmd_line = "./scripts/avocado run --sysinfo=off examples/tests/passtest.py examples/tests/passtest.py"
process.run(cmd_line)
def test_runner_tests_fail(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run passtest failtest passtest'
cmd_line = './scripts/avocado run --sysinfo=off passtest failtest passtest'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
......@@ -80,7 +80,7 @@ class RunnerOperationTest(unittest.TestCase):
def test_runner_nonexistent_test(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run bogustest'
cmd_line = './scripts/avocado run --sysinfo=off bogustest'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
unexpected_rc = 3
......@@ -91,7 +91,7 @@ class RunnerOperationTest(unittest.TestCase):
def test_runner_doublefail(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --xunit - doublefail'
cmd_line = './scripts/avocado run --sysinfo=off --xunit - doublefail'
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
expected_rc = 1
......@@ -108,7 +108,7 @@ class RunnerOperationTest(unittest.TestCase):
def test_runner_timeout(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --xunit - timeouttest'
cmd_line = './scripts/avocado run --sysinfo=off --xunit - timeouttest'
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
expected_rc = 1
......@@ -124,7 +124,7 @@ class RunnerOperationTest(unittest.TestCase):
def test_runner_abort(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --xunit - abort'
cmd_line = './scripts/avocado run --sysinfo=off --xunit - abort'
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
excerpt = 'Test process aborted'
......@@ -138,7 +138,7 @@ class RunnerOperationTest(unittest.TestCase):
def test_silent_output(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run passtest --silent'
cmd_line = './scripts/avocado run --sysinfo=off passtest --silent'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
expected_output = ''
......@@ -156,7 +156,7 @@ class RunnerOperationTest(unittest.TestCase):
def test_empty_test_list(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run'
cmd_line = './scripts/avocado run --sysinfo=off'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 2
expected_output = 'Empty test ID. A test path or alias must be provided'
......@@ -167,7 +167,7 @@ class RunnerOperationTest(unittest.TestCase):
def test_not_found(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run sbrubles'
cmd_line = './scripts/avocado run --sysinfo=off sbrubles'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc)
......@@ -175,20 +175,20 @@ class RunnerOperationTest(unittest.TestCase):
self.assertIn('NOT FOUND : 1', result.stdout)
def test_invalid_unique_id(self):
cmd_line = './scripts/avocado run --force-job-id foobar skiptest'
cmd_line = './scripts/avocado run --sysinfo=off --force-job-id foobar skiptest'
result = process.run(cmd_line, ignore_status=True)
self.assertNotEqual(0, result.exit_status)
self.assertIn('needs to be a 40 digit hex', result.stdout)
def test_valid_unique_id(self):
cmd_line = './scripts/avocado run --force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 skiptest'
cmd_line = './scripts/avocado run --sysinfo=off --force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 skiptest'
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(0, result.exit_status)
self.assertNotIn('needs to be a 40 digit hex', result.stdout)
self.assertIn('SKIP', result.stdout)
def test_automatic_unique_id(self):
cmd_line = './scripts/avocado run skiptest --json -'
cmd_line = './scripts/avocado run --sysinfo=off skiptest --json -'
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(0, result.exit_status)
r = json.loads(result.stdout)
......@@ -212,7 +212,7 @@ class RunnerSimpleTest(unittest.TestCase):
def test_simpletest_pass(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run %s' % self.pass_script.path
cmd_line = './scripts/avocado run --sysinfo=off %s' % self.pass_script.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
......@@ -221,7 +221,7 @@ class RunnerSimpleTest(unittest.TestCase):
def test_simpletest_fail(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run %s' % self.fail_script.path
cmd_line = './scripts/avocado run --sysinfo=off %s' % self.fail_script.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
......@@ -238,7 +238,7 @@ class RunnerSimpleTest(unittest.TestCase):
"""
os.chdir(basedir)
one_hundred = 'failtest ' * 100
cmd_line = './scripts/avocado run %s' % one_hundred
cmd_line = './scripts/avocado run --sysinfo=off %s' % one_hundred
initial_time = time.time()
result = process.run(cmd_line, ignore_status=True)
actual_time = time.time() - initial_time
......@@ -254,7 +254,7 @@ class RunnerSimpleTest(unittest.TestCase):
"""
os.chdir(basedir)
sleep_fail_sleep = 'sleeptest ' + 'failtest ' * 100 + 'sleeptest'
cmd_line = './scripts/avocado run %s' % sleep_fail_sleep
cmd_line = './scripts/avocado run --sysinfo=off %s' % sleep_fail_sleep
initial_time = time.time()
result = process.run(cmd_line, ignore_status=True)
actual_time = time.time() - initial_time
......@@ -389,7 +389,7 @@ class PluginsXunitTest(PluginsTest):
def run_and_check(self, testname, e_rc, e_ntests, e_nerrors,
e_nnotfound, e_nfailures, e_nskip):
os.chdir(basedir)
cmd_line = './scripts/avocado run --xunit - %s' % testname
cmd_line = './scripts/avocado run --sysinfo=off --xunit - %s' % testname
result = process.run(cmd_line, ignore_status=True)
xml_output = result.stdout
self.assertEqual(result.exit_status, e_rc,
......@@ -458,7 +458,7 @@ class PluginsJSONTest(PluginsTest):
def run_and_check(self, testname, e_rc, e_ntests, e_nerrors, e_nnotfound,
e_nfailures, e_nskip):
os.chdir(basedir)
cmd_line = './scripts/avocado run --json - --archive %s' % testname
cmd_line = './scripts/avocado run --sysinfo=off --json - --archive %s' % testname
result = process.run(cmd_line, ignore_status=True)
json_output = result.stdout
self.assertEqual(result.exit_status, e_rc,
......
......@@ -47,7 +47,7 @@ class EnvironmentVariablesTest(unittest.TestCase):
def test_environment_vars(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run %s' % self.script.path
cmd_line = './scripts/avocado run --sysinfo=off %s' % self.script.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
......
......@@ -16,12 +16,12 @@ class GDBPluginTest(unittest.TestCase):
def test_gdb_prerun_commands(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --gdb-prerun-commands=/dev/null sleeptest'
cmd_line = './scripts/avocado run --sysinfo=off --gdb-prerun-commands=/dev/null sleeptest'
process.run(cmd_line)
def test_gdb_multiple_prerun_commands(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --gdb-prerun-commands=/dev/null '
cmd_line = ('./scripts/avocado run --sysinfo=off --gdb-prerun-commands=/dev/null '
'--gdb-prerun-commands=foo:/dev/null sleeptest')
process.run(cmd_line)
......
......@@ -17,7 +17,7 @@ class JournalPluginTests(unittest.TestCase):
def setUp(self):
os.chdir(basedir)
self.cmd_line = './scripts/avocado run --json - --journal examples/tests/passtest.py'
self.cmd_line = './scripts/avocado run --sysinfo=off --json - --journal examples/tests/passtest.py'
self.result = process.run(self.cmd_line, ignore_status=True)
data = json.loads(self.result.stdout)
self.job_id = data['job_id']
......
......@@ -61,7 +61,7 @@ class LoaderTestFunctional(unittest.TestCase):
simple_test = script.TemporaryScript('simpletest.sh', SIMPLE_TEST,
'avocado_loader_unittest')
simple_test.save()
cmd_line = './scripts/avocado run %s' % simple_test.path
cmd_line = './scripts/avocado run --sysinfo=off %s' % simple_test.path
process.run(cmd_line)
simple_test.remove()
......@@ -71,7 +71,7 @@ class LoaderTestFunctional(unittest.TestCase):
'avocado_loader_unittest',
mode=0664)
simple_test.save()
cmd_line = './scripts/avocado run %s' % simple_test.path
cmd_line = './scripts/avocado run --sysinfo=off %s' % simple_test.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
......@@ -85,7 +85,7 @@ class LoaderTestFunctional(unittest.TestCase):
AVOCADO_TEST_OK,
'avocado_loader_unittest')
avocado_pass_test.save()
cmd_line = './scripts/avocado run %s' % avocado_pass_test.path
cmd_line = './scripts/avocado run --sysinfo=off %s' % avocado_pass_test.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
......@@ -97,7 +97,7 @@ class LoaderTestFunctional(unittest.TestCase):
AVOCADO_TEST_BUGGY,
'avocado_loader_unittest')
avocado_buggy_test.save()
cmd_line = './scripts/avocado run %s' % avocado_buggy_test.path
cmd_line = './scripts/avocado run --sysinfo=off %s' % avocado_buggy_test.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
......@@ -110,7 +110,7 @@ class LoaderTestFunctional(unittest.TestCase):
'avocado_loader_unittest',
mode=0664)
avocado_buggy_test.save()
cmd_line = './scripts/avocado run %s' % avocado_buggy_test.path
cmd_line = './scripts/avocado run --sysinfo=off %s' % avocado_buggy_test.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
......@@ -122,7 +122,7 @@ class LoaderTestFunctional(unittest.TestCase):
avocado_not_a_test = script.TemporaryScript('notatest.py', NOT_A_TEST,
'avocado_loader_unittest')
avocado_not_a_test.save()
cmd_line = './scripts/avocado run %s' % avocado_not_a_test.path
cmd_line = './scripts/avocado run --sysinfo=off %s' % avocado_not_a_test.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
......@@ -135,7 +135,7 @@ class LoaderTestFunctional(unittest.TestCase):
'avocado_loader_unittest',
mode=0664)
avocado_not_a_test.save()
cmd_line = './scripts/avocado run %s' % avocado_not_a_test.path
cmd_line = './scripts/avocado run --sysinfo=off %s' % avocado_not_a_test.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
......
......@@ -69,12 +69,12 @@ class MultiplexTests(unittest.TestCase):
self.assertIn(DEBUG_OUT, out)
def test_run_mplex_noid(self):
cmd_line = './scripts/avocado run --multiplex examples/tests/sleeptest.py.data/sleeptest.yaml'
cmd_line = './scripts/avocado run --sysinfo=off --multiplex examples/tests/sleeptest.py.data/sleeptest.yaml'
expected_rc = 2
self.run_and_check(cmd_line, expected_rc)
def test_run_mplex_passtest(self):
cmd_line = './scripts/avocado run passtest --multiplex examples/tests/sleeptest.py.data/sleeptest.yaml'
cmd_line = './scripts/avocado run --sysinfo=off passtest --multiplex examples/tests/sleeptest.py.data/sleeptest.yaml'
expected_rc = 0
# A typical pass has about 14 lines of output,
# so we expect the full job log has at least 4 times
......@@ -83,17 +83,17 @@ class MultiplexTests(unittest.TestCase):
self.run_and_check(cmd_line, expected_rc, 14 * 4)
def test_run_mplex_doublepass(self):
cmd_line = './scripts/avocado run passtest passtest --multiplex examples/tests/sleeptest.py.data/sleeptest.yaml'
cmd_line = './scripts/avocado run --sysinfo=off passtest passtest --multiplex examples/tests/sleeptest.py.data/sleeptest.yaml'
# Should run 2-times 4 variants of pass test
self.run_and_check(cmd_line, expected_rc=0, expected_lines=2 * 4 * 14)
def test_run_mplex_failtest(self):
cmd_line = './scripts/avocado run passtest failtest --multiplex examples/tests/sleeptest.py.data/sleeptest.yaml'
cmd_line = './scripts/avocado run --sysinfo=off passtest failtest --multiplex examples/tests/sleeptest.py.data/sleeptest.yaml'
expected_rc = 1
self.run_and_check(cmd_line, expected_rc)
def test_run_double_mplex(self):
cmd_line = ('./scripts/avocado run passtest --multiplex '
cmd_line = ('./scripts/avocado run --sysinfo=off passtest --multiplex '
'examples/tests/sleeptest.py.data/sleeptest.yaml '
'examples/tests/sleeptest.py.data/sleeptest.yaml')
expected_rc = 0
......@@ -104,7 +104,7 @@ class MultiplexTests(unittest.TestCase):
self.run_and_check(cmd_line, expected_rc, 14 * 4)
def test_run_mplex_params(self):
cmd_line = ('./scripts/avocado run examples/tests/env_variables.sh '
cmd_line = ('./scripts/avocado run --sysinfo=off examples/tests/env_variables.sh '
'--multiplex examples/tests/env_variables.sh.data'
'/env_variables.yaml '
'--show-job-log')
......
......@@ -29,7 +29,7 @@ class RunnerSimpleTest(unittest.TestCase):
def test_output_record_none(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run %s --output-check-record none' % self.output_script.path
cmd_line = './scripts/avocado run --sysinfo=off %s --output-check-record none' % self.output_script.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
......@@ -42,7 +42,7 @@ class RunnerSimpleTest(unittest.TestCase):
def test_output_record_stdout(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run %s --output-check-record stdout' % self.output_script.path
cmd_line = './scripts/avocado run --sysinfo=off %s --output-check-record stdout' % self.output_script.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
......@@ -55,7 +55,7 @@ class RunnerSimpleTest(unittest.TestCase):
def test_output_record_all(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run %s --output-check-record all' % self.output_script.path
cmd_line = './scripts/avocado run --sysinfo=off %s --output-check-record all' % self.output_script.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
......@@ -68,7 +68,7 @@ class RunnerSimpleTest(unittest.TestCase):
def test_output_record_and_check(self):
self.test_output_record_all()
cmd_line = './scripts/avocado run %s' % self.output_script.path
cmd_line = './scripts/avocado run --sysinfo=off %s' % self.output_script.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
......@@ -81,7 +81,7 @@ class RunnerSimpleTest(unittest.TestCase):
stdout_file = os.path.join("%s.data/stdout.expected" % self.output_script.path)
with open(stdout_file, 'w') as stdout_file_obj:
stdout_file_obj.write(tampered_msg)
cmd_line = './scripts/avocado run %s --xunit -' % self.output_script.path
cmd_line = './scripts/avocado run --sysinfo=off %s --xunit -' % self.output_script.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
self.assertEqual(result.exit_status, expected_rc,
......@@ -95,7 +95,7 @@ class RunnerSimpleTest(unittest.TestCase):
stdout_file = os.path.join("%s.data/stdout.expected" % self.output_script.path)
with open(stdout_file, 'w') as stdout_file_obj:
stdout_file_obj.write(tampered_msg)
cmd_line = './scripts/avocado run %s --disable-output-check --xunit -' % self.output_script.path
cmd_line = './scripts/avocado run --sysinfo=off %s --disable-output-check --xunit -' % self.output_script.path
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
......
......@@ -20,7 +20,7 @@ class OutputTest(unittest.TestCase):
def test_output_doublefree(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run doublefree'
cmd_line = './scripts/avocado run --sysinfo=off doublefree'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 1
output = result.stdout + result.stderr
......@@ -47,7 +47,7 @@ class OutputPluginTest(unittest.TestCase):
def test_output_incompatible_setup(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --xunit - --json - passtest'
cmd_line = './scripts/avocado run --sysinfo=off --xunit - --json - passtest'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 2
output = result.stdout + result.stderr
......@@ -60,7 +60,7 @@ class OutputPluginTest(unittest.TestCase):
def test_output_incompatible_setup_2(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --vm --json - passtest'
cmd_line = './scripts/avocado run --sysinfo=off --vm --json - passtest'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 2
output = result.stdout + result.stderr
......@@ -73,7 +73,7 @@ class OutputPluginTest(unittest.TestCase):
def test_output_incompatible_setup_3(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --html - sleeptest'
cmd_line = './scripts/avocado run --sysinfo=off --html - sleeptest'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 2
output = result.stdout + result.stderr
......@@ -87,7 +87,7 @@ class OutputPluginTest(unittest.TestCase):
def test_output_compatible_setup(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = './scripts/avocado run --journal --xunit %s --json - passtest' % tmpfile
cmd_line = './scripts/avocado run --sysinfo=off --journal --xunit %s --json - passtest' % tmpfile
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
......@@ -107,7 +107,7 @@ class OutputPluginTest(unittest.TestCase):
def test_output_compatible_setup_2(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = './scripts/avocado run --xunit - --json %s passtest' % tmpfile
cmd_line = './scripts/avocado run --sysinfo=off --xunit - --json %s passtest' % tmpfile
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
......@@ -133,7 +133,7 @@ class OutputPluginTest(unittest.TestCase):
tmpdir = tempfile.mkdtemp()
tmpfile3 = tempfile.mktemp(dir=tmpdir)
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --xunit %s --json %s --html %s passtest' %
cmd_line = ('./scripts/avocado run --sysinfo=off --xunit %s --json %s --html %s passtest' %
(tmpfile, tmpfile2, tmpfile3))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
......@@ -164,7 +164,7 @@ class OutputPluginTest(unittest.TestCase):
tmpfile = tempfile.mktemp()
tmpfile2 = tempfile.mktemp()
os.chdir(basedir)
cmd_line = './scripts/avocado run --silent --xunit %s --json %s passtest' % (tmpfile, tmpfile2)
cmd_line = './scripts/avocado run --sysinfo=off --silent --xunit %s --json %s passtest' % (tmpfile, tmpfile2)
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
......@@ -188,7 +188,7 @@ class OutputPluginTest(unittest.TestCase):
def test_show_job_log(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run passtest --show-job-log'
cmd_line = './scripts/avocado run --sysinfo=off passtest --show-job-log'
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
......@@ -197,7 +197,7 @@ class OutputPluginTest(unittest.TestCase):
def test_silent_trumps_show_job_log(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run passtest --show-job-log --silent'
cmd_line = './scripts/avocado run --sysinfo=off passtest --show-job-log --silent'
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
......@@ -208,7 +208,7 @@ class OutputPluginTest(unittest.TestCase):
def test_default_enabled_plugins(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run passtest'
cmd_line = './scripts/avocado run --sysinfo=off passtest'
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = 0
......@@ -224,7 +224,7 @@ class OutputPluginTest(unittest.TestCase):
tmpfile = tempfile.mktemp()
try:
os.chdir(basedir)
cmd_line = './scripts/avocado run whiteboard --json %s' % tmpfile
cmd_line = './scripts/avocado run --sysinfo=off whiteboard --json %s' % tmpfile
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
......@@ -248,7 +248,7 @@ class OutputPluginTest(unittest.TestCase):
redirected_output_path = tempfile.mktemp()
try:
os.chdir(basedir)
cmd_line = './scripts/avocado run passtest > %s' % redirected_output_path
cmd_line = './scripts/avocado run --sysinfo=off passtest > %s' % redirected_output_path
result = process.run(cmd_line, ignore_status=True, shell=True)
output = result.stdout + result.stderr
expected_rc = 0
......
import os
import sys
import unittest
# simple magic for using scripts within a source tree
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', '..')
basedir = os.path.abspath(basedir)
if os.path.isdir(os.path.join(basedir, 'avocado')):
sys.path.append(basedir)
from avocado.utils import process
class SysInfoTest(unittest.TestCase):
def test_sysinfo_enabled(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --sysinfo=on passtest'
result = process.run(cmd_line)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
'Avocado did not return rc %d:\n%s' % (expected_rc, result))
output = result.stdout + result.stderr
for line in output.splitlines():
if 'JOB LOG' in line:
job_log = line.split()[-1]
sysinfo_dir = os.path.join(os.path.dirname(job_log), 'sysinfo')
msg = "Avocado didn't create sysinfo directory %s:\n%s" % (sysinfo_dir, result)
self.assertTrue(os.path.isdir(sysinfo_dir), msg)
msg = 'The sysinfo directory is empty:\n%s' % result
self.assertGreater(len(os.listdir(sysinfo_dir)), 0, msg)
for hook in ('pre', 'post'):
sysinfo_subdir = os.path.join(sysinfo_dir, hook)
msg = 'The sysinfo/%s subdirectory is empty:\n%s' % (hook, result)
self.assertGreater(len(os.listdir(sysinfo_subdir)), 0, msg)
def test_sysinfo_disabled(self):
os.chdir(basedir)
cmd_line = './scripts/avocado run --sysinfo=off passtest'
result = process.run(cmd_line)
expected_rc = 0
self.assertEqual(result.exit_status, expected_rc,
'Avocado did not return rc %d:\n%s' % (expected_rc, result))
output = result.stdout + result.stderr
for line in output.splitlines():
if 'JOB LOG' in line:
job_log = line.split()[-1]
sysinfo_dir = os.path.join(os.path.dirname(job_log), 'sysinfo')
msg = 'Avocado created sysinfo directory %s:\n%s' % (sysinfo_dir, result)
self.assertFalse(os.path.isdir(sysinfo_dir), msg)
if __name__ == '__main__':
unittest.main()
......@@ -40,7 +40,7 @@ class WrapperTest(unittest.TestCase):
def test_global_wrapper(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --wrapper %s '
cmd_line = ('./scripts/avocado run --sysinfo=off --wrapper %s '
'examples/tests/datadir.py' % self.script.path)
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
......@@ -54,7 +54,7 @@ class WrapperTest(unittest.TestCase):
def test_process_wrapper(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --wrapper %s:*/datadir '
cmd_line = ('./scripts/avocado run --sysinfo=off --wrapper %s:*/datadir '
'examples/tests/datadir.py' % self.script.path)
result = process.run(cmd_line, ignore_status=True)
expected_rc = 0
......@@ -68,7 +68,7 @@ class WrapperTest(unittest.TestCase):
def test_both_wrappers(self):
os.chdir(basedir)
cmd_line = ('./scripts/avocado run --wrapper %s --wrapper %s:*/datadir '
cmd_line = ('./scripts/avocado run --sysinfo=off --wrapper %s --wrapper %s:*/datadir '
'examples/tests/datadir.py' % (self.dummy.path,
self.script.path))
result = process.run(cmd_line, ignore_status=True)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册