提交 7d0e6a44 编写于 作者: C Cleber Rosa

avocado.utils.process: use bytes for raw stdout/stderr

This brings a change in behaviour, in which the stdout/stderr of the
executed process will now be of bytes type, instead of a string type.

Two new attributes, which are implemented as properties, have been
added to the CmdResult class, `stdout_text` and `stderr_text`.  Those
are convenience methods that will return the same content that is in
`stdout` and `stderr`, reespectively, but decoded on the fly[1].

With regards to encoding, if one is not provided, the result of
`sys.getdefaultencoding()` will be used ("utf-8" for Python 3 and
"ascii" for Python 2).

Applications and/or tests using the APIs that return a CmdResult
should, to the best of my knowledge, set a default encoding themselves
so a stable behavior across Python versions.  But that if left to
users of this API.

A different tradeoff/design decision has to do with the tests modified
here.  One option is to have "text" (as in sequences of human readable
glyphs) as being of Python type "str".  On Python 2, "str" can be
compared to "bytes" because a conversion will happen on demand.  That
is, the following is fine on Python 2:

   >>> result = process.run("command")
   >>> "expected" in process.stdout

Where `expected` is of type "str" and `process.stdout` is of type
"bytes".  This is not true of Python 3, so either the types must match
or a conversion must be done explicitly.  The solutions to that are:

1) have these "text" as (of type) "bytes" in the source code itself,
   and avoid the conversion whenever possible
2) have "strings" in the source code itself, and use the conversion
   provided by `CmdResult.stdout_text` and `CmdResult.stderr_text`.

The approach chosen here is to avoid conversion if possible, that is,
use "byte" types, given the fact that the source code encoding is by
default 'ascii' and most of the "text" dealt with here can be
represented in 'ascii' too.  This is equivalent of doing:

   result = process.run("command")
   b"expected" in process.stdout
   "errors: %s" % 0 in process.stderr_text

[1] The obvious alternative, instead of decoding these on the fly
    would be to have multiple copies of the "same" data.  This assumes
    that binary data produced on the stdout/stderr will usually be
    larger than textual data.
Signed-off-by: NCleber Rosa <crosa@redhat.com>
上级 15d436d7
......@@ -349,7 +349,7 @@ class Job(object):
cmd = "%s show --summary --pretty='%%H'" % git
res = process.run(cmd, ignore_status=True, verbose=False)
if res.exit_status == 0:
top_commit = res.stdout.splitlines()[0][:8]
top_commit = res.stdout_text.splitlines()[0][:8]
return " (GIT commit %s)" % top_commit
finally:
os.chdir(olddir)
......
......@@ -1169,11 +1169,11 @@ class SimpleTest(Test):
if regex is not None:
re_warn = re.compile(regex)
if warn_location in ['all', 'stdout']:
if re_warn.search(result.stdout):
if re_warn.search(result.stdout_text):
raise exceptions.TestWarn(warn_msg % 'stdout')
if warn_location in ['all', 'stderr']:
if re_warn.search(result.stderr):
if re_warn.search(result.stderr_text):
raise exceptions.TestWarn(warn_msg % 'stderr')
if skip_regex is not None:
......@@ -1182,11 +1182,11 @@ class SimpleTest(Test):
"Check the log for details.")
if skip_location in ['all', 'stdout']:
if re_skip.search(result.stdout):
if re_skip.search(result.stdout_text):
raise exceptions.TestSkipError(skip_msg % 'stdout')
if warn_location in ['all', 'stderr']:
if re_skip.search(result.stderr):
if re_skip.search(result.stderr_text):
raise exceptions.TestSkipError(skip_msg % 'stderr')
def test(self):
......
......@@ -222,12 +222,11 @@ class Iso9660IsoInfo(MixInMntDirMount, BaseIso9660):
"""
cmd = 'isoinfo -i %s -d' % path
output = process.system_output(cmd)
if re.findall("\nJoliet", output):
if b"\nJoliet" in output:
self.joliet = True
if re.findall("\nRock Ridge signatures", output):
if b"\nRock Ridge signatures" in output:
self.rock_ridge = True
if re.findall("\nEl Torito", output):
if b"\nEl Torito" in output:
self.el_torito = True
@staticmethod
......
......@@ -27,6 +27,7 @@ import shutil
import signal
import stat
import subprocess
import sys
import threading
import time
......@@ -269,17 +270,35 @@ class CmdResult(object):
:type duration: float
:param pid: ID of the process
:type pid: int
:param encoding: the encoding to use for the text version
of stdout and stderr, with the default being
Python's own (:func:`sys.getdefaultencoding`).
:type encoding: str
"""
def __init__(self, command="", stdout="", stderr="",
exit_status=None, duration=0, pid=None):
exit_status=None, duration=0, pid=None,
encoding=None):
self.command = command
self.exit_status = exit_status
#: The raw stdout (bytes)
self.stdout = stdout
#: The raw stderr (bytes)
self.stderr = stderr
self.duration = duration
self.interrupted = False
self.pid = pid
if encoding is None:
encoding = sys.getdefaultencoding()
self.encoding = encoding
@property
def stdout_text(self):
return self.stdout.decode(self.encoding)
@property
def stderr_text(self):
return self.stderr.decode(self.encoding)
def __repr__(self):
cmd_rep = ("Command: %s\n"
......@@ -1312,12 +1331,12 @@ def system_output(cmd, timeout=None, verbose=True, ignore_status=False,
:type strip_trail_nl: bool
:return: Command output.
:rtype: str
:rtype: bytes
:raise: :class:`CmdError`, if ``ignore_status=False``.
"""
cmd_result = run(cmd=cmd, timeout=timeout, verbose=verbose, ignore_status=ignore_status,
allow_output_check=allow_output_check, shell=shell, env=env,
sudo=sudo, ignore_bg_processes=ignore_bg_processes)
if strip_trail_nl:
return cmd_result.stdout.rstrip('\n\r')
return cmd_result.stdout.rstrip(b'\n\r')
return cmd_result.stdout
......@@ -45,9 +45,9 @@ class DoubleFreeTest(Test):
self.log.info(cmd_result)
output = cmd_result.stdout + cmd_result.stderr
if sys.platform.startswith('darwin'):
pattern = 'pointer being freed was not allocated'
pattern = b'pointer being freed was not allocated'
else:
pattern = 'free(): invalid pointer'
pattern = b'free(): invalid pointer'
self.assertTrue(pattern in output,
msg='Could not find pattern %s in output %s' %
(pattern, output))
......
......@@ -12,7 +12,7 @@ basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
DEBUG_OUT = """
DEBUG_OUT = b"""
Variant mint-debug-amd-virtio-935e: amd@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml, virtio@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml, mint@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml, debug@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml
/distro/mint:init => systemv@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml:/distro/mint
/env/debug:opt_CFLAGS => -O0 -g@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml:/env/debug
......@@ -38,7 +38,7 @@ class MultiplexTests(unittest.TestCase):
if tests is not None:
exp = ("PASS %s | ERROR 0 | FAIL %s | SKIP 0 | WARN 0 | "
"INTERRUPT 0" % tests)
self.assertIn(exp, result.stdout, "%s not in stdout:\n%s"
self.assertIn(exp, result.stdout_text, "%s not in stdout:\n%s"
% (exp, result))
return result
......@@ -52,7 +52,7 @@ class MultiplexTests(unittest.TestCase):
cmd_line = '%s variants -m nonexist' % AVOCADO
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
self.assertIn('No such file or directory', result.stderr)
self.assertIn('No such file or directory', result.stderr_text)
def test_mplex_debug(self):
cmd_line = ('%s variants -c -d -m '
......@@ -106,9 +106,9 @@ class MultiplexTests(unittest.TestCase):
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc, (4, 4))
self.assertIn("(1/8) passtest.py:PassTest.test;short", result.stdout)
self.assertIn("(2/8) passtest.py:PassTest.test;medium", result.stdout)
self.assertIn("(8/8) failtest.py:FailTest.test;longest",
self.assertIn(b"(1/8) passtest.py:PassTest.test;short", result.stdout)
self.assertIn(b"(2/8) passtest.py:PassTest.test;medium", result.stdout)
self.assertIn(b"(8/8) failtest.py:FailTest.test;longest",
result.stdout)
def test_run_mplex_failtest_tests_per_variant(self):
......@@ -119,9 +119,9 @@ class MultiplexTests(unittest.TestCase):
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc, (4, 4))
self.assertIn("(1/8) passtest.py:PassTest.test;short", result.stdout)
self.assertIn("(2/8) failtest.py:FailTest.test;short", result.stdout)
self.assertIn("(8/8) failtest.py:FailTest.test;longest",
self.assertIn(b"(1/8) passtest.py:PassTest.test;short", result.stdout)
self.assertIn(b"(2/8) failtest.py:FailTest.test;short", result.stdout)
self.assertIn(b"(8/8) failtest.py:FailTest.test;longest",
result.stdout)
def test_run_double_mplex(self):
......@@ -155,15 +155,15 @@ class MultiplexTests(unittest.TestCase):
msg_lines = msg.splitlines()
msg_header = '[stdout] Custom variable: %s' % msg_lines[0]
self.assertIn(msg_header, result.stdout,
self.assertIn(msg_header, result.stdout_text,
"Multiplexed variable should produce:"
"\n %s\nwhich is not present in the output:\n %s"
% (msg_header, "\n ".join(result.stdout.splitlines())))
% (msg_header, "\n ".join(result.stdout_text.splitlines())))
for msg_remain in msg_lines[1:]:
self.assertIn('[stdout] %s' % msg_remain, result.stdout,
self.assertIn('[stdout] %s' % msg_remain, result.stdout_text,
"Multiplexed variable should produce:"
"\n %s\nwhich is not present in the output:\n %s"
% (msg_remain, "\n ".join(result.stdout.splitlines())))
% (msg_remain, "\n ".join(result.stdout_text.splitlines())))
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
......@@ -39,8 +39,8 @@ class ArgumentParsingTest(unittest.TestCase):
expected_rc = exit_codes.AVOCADO_FAIL
self.assertEqual(result.exit_status, expected_rc,
'Avocado did not return rc %d:\n%s' % (expected_rc, result))
subcommand_error_msg = 'avocado run: error: unrecognized arguments: '\
'--whacky-argument'
subcommand_error_msg = (b'avocado run: error: unrecognized arguments: '
b'--whacky-argument')
self.assertIn(subcommand_error_msg, result.stderr)
......
此差异已折叠。
......@@ -148,10 +148,10 @@ class InterruptTest(unittest.TestCase):
output = self.proc.stdout.read()
# Make sure the Interrupted requested sentence is there
self.assertIn('Interrupt requested. Waiting 2 seconds for test to '
'finish (ignoring new Ctrl+C until then)', output)
self.assertIn(b'Interrupt requested. Waiting 2 seconds for test to '
b'finish (ignoring new Ctrl+C until then)', output)
# Make sure the Killing test subprocess message did appear
self.assertIn('Killing test subprocess', output)
self.assertIn(b'Killing test subprocess', output)
@unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
"Skipping test that take a long time to run, are "
......@@ -191,7 +191,7 @@ class InterruptTest(unittest.TestCase):
timeout=10), 'Avocado left processes behind.')
# Make sure the Interrupted test sentence is there
self.assertIn('Terminated\n', self.proc.stdout.read())
self.assertIn(b'Terminated\n', self.proc.stdout.read())
@unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
"Skipping test that take a long time to run, are "
......@@ -232,10 +232,10 @@ class InterruptTest(unittest.TestCase):
output = self.proc.stdout.read()
# Make sure the Interrupted requested sentence is there
self.assertIn('Interrupt requested. Waiting 2 seconds for test to '
'finish (ignoring new Ctrl+C until then)', output)
self.assertIn(b'Interrupt requested. Waiting 2 seconds for test to '
b'finish (ignoring new Ctrl+C until then)', output)
# Make sure the Killing test subprocess message is not there
self.assertNotIn('Killing test subprocess', output)
self.assertNotIn(b'Killing test subprocess', output)
@unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
"Skipping test that take a long time to run, are "
......@@ -275,7 +275,7 @@ class InterruptTest(unittest.TestCase):
timeout=10), 'Avocado left processes behind.')
# Make sure the Interrupted test sentence is there
self.assertIn('Terminated\n', self.proc.stdout.read())
self.assertIn(b'Terminated\n', self.proc.stdout.read())
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
......@@ -164,7 +164,7 @@ class LoaderTestFunctional(unittest.TestCase):
test_script.save()
cmd_line = ('%s list -V %s' % (AVOCADO, test_script.path))
result = process.run(cmd_line)
self.assertIn('%s: %s' % (exp_str, count), result.stdout)
self.assertIn('%s: %s' % (exp_str, count), result.stdout_text)
test_script.remove()
def _run_with_timeout(self, cmd_line, timeout):
......@@ -213,7 +213,7 @@ class LoaderTestFunctional(unittest.TestCase):
("Took more than 3 seconds to list tests. Loader "
"probably loaded/executed Python code and slept for "
"eleven seconds."))
self.assertIn('INSTRUMENTED: 2', result.stdout)
self.assertIn(b'INSTRUMENTED: 2', result.stdout)
def test_multiple_class(self):
self._test('multipleclasses.py', AVOCADO_TEST_MULTIPLE_CLASSES,
......@@ -246,7 +246,7 @@ class LoaderTestFunctional(unittest.TestCase):
mytest.save()
cmd_line = "%s list -V %s" % (AVOCADO, mytest)
result = process.run(cmd_line)
self.assertIn('SIMPLE: 1', result.stdout)
self.assertIn(b'SIMPLE: 1', result.stdout)
# job should be able to finish under 5 seconds. If this fails, it's
# possible that we hit the "simple test fork bomb" bug
cmd_line = ("%s run --sysinfo=off --job-results-dir '%s' -- '%s'"
......@@ -357,10 +357,10 @@ class LoaderTestFunctional(unittest.TestCase):
"""
cmd = "%s list examples/tests/:fail" % AVOCADO
result = process.run(cmd)
expected = ("INSTRUMENTED examples/tests/doublefail.py:DoubleFail.test\n"
"INSTRUMENTED examples/tests/fail_on_exception.py:FailOnException.test\n"
"INSTRUMENTED examples/tests/failtest.py:FailTest.test\n"
"SIMPLE examples/tests/failtest.sh\n")
expected = (b"INSTRUMENTED examples/tests/doublefail.py:DoubleFail.test\n"
b"INSTRUMENTED examples/tests/fail_on_exception.py:FailOnException.test\n"
b"INSTRUMENTED examples/tests/failtest.py:FailTest.test\n"
b"SIMPLE examples/tests/failtest.sh\n")
self.assertEqual(expected, result.stdout)
def tearDown(self):
......
......@@ -157,7 +157,7 @@ class OutputTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
bad_string = 'double free or corruption'
bad_string = b'double free or corruption'
self.assertNotIn(bad_string, output,
"Libc double free can be seen in avocado "
"doublefree output:\n%s" % output)
......@@ -313,7 +313,7 @@ class OutputPluginTest(unittest.TestCase):
'(--xunit)): Options ((--xunit --json)|'
'(--json --xunit)) are trying to use stdout '
'simultaneously\n')
self.assertIsNotNone(error_regex.match(result.stderr),
self.assertIsNotNone(error_regex.match(result.stderr_text),
"Missing error message from output:\n%s" %
result.stderr)
......@@ -328,7 +328,7 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
error_excerpt = "HTML to stdout not supported"
error_excerpt = b"HTML to stdout not supported"
self.assertIn(error_excerpt, output,
"Missing excerpt error message from output:\n%s" % output)
......@@ -427,7 +427,7 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, "", "Output is not empty:\n%s" % output)
self.assertEqual(output, b"", "Output is not empty:\n%s" % output)
# Check if we are producing valid outputs
with open(tmpfile2, 'r') as fp:
json_results = json.load(fp)
......@@ -447,7 +447,7 @@ class OutputPluginTest(unittest.TestCase):
"--job-results-dir %s --sysinfo=off"
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
output = result.stdout_text + result.stderr_text
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
......@@ -469,7 +469,7 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
job_id_list = re.findall('Job ID: (.*)', result.stdout,
job_id_list = re.findall('Job ID: (.*)', result.stdout_text,
re.MULTILINE)
self.assertTrue(job_id_list, 'No Job ID in stdout:\n%s' %
result.stdout)
......@@ -487,7 +487,7 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, "")
self.assertEqual(output, b"")
def test_default_enabled_plugins(self):
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
......@@ -588,7 +588,7 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, '',
self.assertEqual(output, b'',
'After redirecting to file, output is not empty: %s' % output)
with open(redirected_output_path, 'r') as redirected_output_file_obj:
redirected_output = redirected_output_file_obj.read()
......@@ -617,7 +617,7 @@ class OutputPluginTest(unittest.TestCase):
"--job-results-dir %s "
"--tap -" % (AVOCADO, self.tmpdir))
result = process.run(cmd_line)
expr = '1..4'
expr = b'1..4'
self.assertIn(expr, result.stdout, "'%s' not found in:\n%s"
% (expr, result.stdout))
......@@ -630,9 +630,9 @@ class OutputPluginTest(unittest.TestCase):
("avocado run to broken pipe did not return "
"rc %d:\n%s" % (expected_rc, result)))
self.assertEqual(len(result.stderr.splitlines()), 1)
self.assertIn("whacky-unknown-command", result.stderr)
self.assertIn("not found", result.stderr)
self.assertNotIn("Avocado crashed", result.stderr)
self.assertIn(b"whacky-unknown-command", result.stderr)
self.assertIn(b"not found", result.stderr)
self.assertNotIn(b"Avocado crashed", result.stderr)
def test_results_plugins_no_tests(self):
cmd_line = ("%s run UNEXISTING --job-results-dir %s"
......@@ -650,11 +650,11 @@ class OutputPluginTest(unittest.TestCase):
self.assertFalse(os.path.exists(tap_results))
# Check that no UI output was generated
self.assertNotIn("RESULTS : PASS ", result.stdout)
self.assertNotIn("JOB TIME :", result.stdout)
self.assertNotIn(b"RESULTS : PASS ", result.stdout)
self.assertNotIn(b"JOB TIME :", result.stdout)
# Check that plugins do not produce errors
self.assertNotIn("Error running method ", result.stderr)
self.assertNotIn(b"Error running method ", result.stderr)
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
......@@ -50,20 +50,16 @@ class DiffTests(unittest.TestCase):
(AVOCADO, self.jobdir, self.jobdir2))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = "# COMMAND LINE"
self.assertIn(msg, result.stdout)
msg = "-./scripts/avocado run"
self.assertIn(msg, result.stdout)
msg = "+./scripts/avocado run"
self.assertIn(msg, result.stdout)
self.assertIn(b"# COMMAND LINE", result.stdout)
self.assertIn(b"-./scripts/avocado run", result.stdout)
self.assertIn(b"+./scripts/avocado run", result.stdout)
def test_diff_nocmdline(self):
cmd_line = ('%s diff %s %s --diff-filter nocmdline' %
(AVOCADO, self.jobdir, self.jobdir2))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = "# COMMAND LINE"
self.assertNotIn(msg, result.stdout)
self.assertNotIn(b"# COMMAND LINE", result.stdout)
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
......@@ -79,9 +79,9 @@ class JobScriptsTest(unittest.TestCase):
# Pre/Post scripts failures do not (currently?) alter the exit status
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertNotIn('Pre job script "%s" exited with status "1"' % touch_script,
result.stderr)
result.stderr_text)
self.assertNotIn('Post job script "%s" exited with status "1"' % rm_script,
result.stderr)
result.stderr_text)
def test_status_non_zero(self):
"""
......@@ -102,7 +102,7 @@ class JobScriptsTest(unittest.TestCase):
# Pre/Post scripts failures do not (currently?) alter the exit status
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertEqual('Pre job script "%s" exited with status "1"\n' % non_zero_script,
result.stderr)
result.stderr_text)
def test_non_existing_dir(self):
"""
......@@ -124,9 +124,9 @@ class JobScriptsTest(unittest.TestCase):
# Pre/Post scripts failures do not (currently?) alter the exit status
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertIn('-job scripts has not been found', result.stderr)
self.assertIn(b'-job scripts has not been found', result.stderr)
self.assertNotIn('Pre job script "%s" exited with status "1"' % non_zero_script,
result.stderr)
result.stderr_text)
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
......@@ -106,8 +106,8 @@ class ReplayTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Invalid --replay-ignore option. Valid options are ' \
'(more than one allowed): variants,config'
msg = (b'Invalid --replay-ignore option. Valid options are '
b'(more than one allowed): variants,config')
self.assertIn(msg, result.stderr)
def test_run_replay_ignorevariants(self):
......@@ -119,7 +119,7 @@ class ReplayTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Ignoring variants from source job with --replay-ignore.'
msg = b'Ignoring variants from source job with --replay-ignore.'
self.assertIn(msg, result.stderr)
def test_run_replay_invalidstatus(self):
......@@ -131,8 +131,8 @@ class ReplayTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Invalid --replay-test-status option. Valid options are (more ' \
'than one allowed): SKIP,ERROR,FAIL,WARN,PASS,INTERRUPTED'
msg = (b'Invalid --replay-test-status option. Valid options are (more '
b'than one allowed): SKIP,ERROR,FAIL,WARN,PASS,INTERRUPTED')
self.assertIn(msg, result.stderr)
def test_run_replay_statusfail(self):
......@@ -144,7 +144,8 @@ class ReplayTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = 'RESULTS : PASS 0 | ERROR 0 | FAIL 0 | SKIP 4 | WARN 0 | INTERRUPT 0'
msg = (b'RESULTS : PASS 0 | ERROR 0 | FAIL 0 | SKIP 4 | WARN 0 | '
b'INTERRUPT 0')
self.assertIn(msg, result.stdout)
def test_run_replay_remotefail(self):
......@@ -156,7 +157,7 @@ class ReplayTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = "Currently we don't replay jobs in remote hosts."
msg = b"Currently we don't replay jobs in remote hosts."
self.assertIn(msg, result.stderr)
def test_run_replay_status_and_variants(self):
......@@ -168,8 +169,8 @@ class ReplayTests(unittest.TestCase):
'--sysinfo=off' % (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = ("Option `--replay-test-status` is incompatible with "
"`--replay-ignore variants`")
msg = (b"Option `--replay-test-status` is incompatible with "
b"`--replay-ignore variants`")
self.assertIn(msg, result.stderr)
def test_run_replay_status_and_references(self):
......@@ -181,8 +182,8 @@ class ReplayTests(unittest.TestCase):
'--sysinfo=off' % (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = ("Option --replay-test-status is incompatible with "
"test references given on the command line.")
msg = (b"Option --replay-test-status is incompatible with "
b"test references given on the command line.")
self.assertIn(msg, result.stderr)
def test_run_replay_and_mux(self):
......
......@@ -47,8 +47,8 @@ class ReplayExtRunnerTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = "Overriding the replay external-runner with the "\
"--external-runner value given on the command line."
msg = (b"Overriding the replay external-runner with the "
b"--external-runner value given on the command line.")
self.assertIn(msg, result.stderr)
def tearDown(self):
......
......@@ -49,7 +49,8 @@ class ReplayFailfastTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Overriding the replay failfast with the --failfast value given on the command line.'
msg = (b'Overriding the replay failfast with the --failfast value '
b'given on the command line.')
self.assertIn(msg, result.stderr)
def tearDown(self):
......
......@@ -50,7 +50,7 @@ class StandaloneTests(unittest.TestCase):
exc = "errortest_nasty.NastyException: Nasty-string-like-exception"
else:
exc = "NastyException: Nasty-string-like-exception"
count = result.stdout.count("\n%s" % exc)
count = result.stdout_text.count("\n%s" % exc)
self.assertEqual(count, 2, "Exception \\n%s should be present twice in"
"the log (once from the log, second time when parsing"
"exception details." % (exc))
......@@ -59,17 +59,17 @@ class StandaloneTests(unittest.TestCase):
cmd_line = './examples/tests/errortest_nasty2.py -r'
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc, 'errortest_nasty2')
self.assertIn("Exception: Unable to get exception, check the traceback"
" for details.", result.stdout)
self.assertIn(b"Exception: Unable to get exception, check the traceback"
b" for details.", result.stdout)
def test_errortest_nasty3(self):
cmd_line = './examples/tests/errortest_nasty3.py -r'
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc, 'errortest_nasty3')
if sys.version_info[0] == 3:
exc = "TypeError: exceptions must derive from BaseException"
exc = b"TypeError: exceptions must derive from BaseException"
else:
exc = "TestError: <errortest_nasty3.NastyException instance at "
exc = b"TestError: <errortest_nasty3.NastyException instance at "
self.assertIn(exc, result.stdout)
def test_errortest(self):
......
......@@ -25,8 +25,8 @@ class StreamsTest(unittest.TestCase):
"""
result = process.run('%s distro' % AVOCADO)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertIn('Detected distribution', result.stdout)
self.assertEqual('', result.stderr)
self.assertIn(b'Detected distribution', result.stdout)
self.assertEqual(b'', result.stderr)
def test_app_error_stderr(self):
"""
......@@ -35,12 +35,12 @@ class StreamsTest(unittest.TestCase):
result = process.run('%s unknown-whacky-command' % AVOCADO,
ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL)
self.assertIn("invalid choice: 'unknown-whacky-command'",
self.assertIn(b"invalid choice: 'unknown-whacky-command'",
result.stderr)
self.assertNotIn("invalid choice: 'unknown-whacky-command'",
self.assertNotIn(b"invalid choice: 'unknown-whacky-command'",
result.stdout)
self.assertIn("Avocado Test Runner", result.stdout)
self.assertNotIn("Avocado Test Runner", result.stderr)
self.assertIn(b"Avocado Test Runner", result.stdout)
self.assertNotIn(b"Avocado Test Runner", result.stderr)
def test_other_stream_early_stdout(self):
"""
......@@ -58,11 +58,11 @@ class StreamsTest(unittest.TestCase):
for cmd, env in cmds:
result = process.run(cmd, env=env, shell=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertIn("stevedore.extension: found extension EntryPoint.parse",
self.assertIn(b"stevedore.extension: found extension EntryPoint.parse",
result.stdout)
self.assertIn("avocado.test: Command line: %s" % cmd,
result.stdout)
self.assertEqual('', result.stderr)
result.stdout_text)
self.assertEqual(b'', result.stderr)
def test_test(self):
"""
......@@ -76,16 +76,16 @@ class StreamsTest(unittest.TestCase):
' passtest.py' % (AVOCADO, self.tmpdir))):
result = process.run(cmd)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertNotIn("stevedore.extension: found extension EntryPoint.parse",
self.assertNotIn(b"stevedore.extension: found extension EntryPoint.parse",
result.stdout)
self.assertNotIn("stevedore.extension: found extension EntryPoint.parse",
self.assertNotIn(b"stevedore.extension: found extension EntryPoint.parse",
result.stderr)
self.assertIn("Command line: %s" % cmd,
result.stdout_text)
self.assertIn(b"\nSTART 1-passtest.py:PassTest.test",
result.stdout)
self.assertIn("\nSTART 1-passtest.py:PassTest.test",
result.stdout)
self.assertIn("PASS 1-passtest.py:PassTest.test", result.stdout)
self.assertEqual('', result.stderr)
self.assertIn(b"PASS 1-passtest.py:PassTest.test", result.stdout)
self.assertEqual(b'', result.stderr)
def test_none_success(self):
"""
......@@ -99,8 +99,8 @@ class StreamsTest(unittest.TestCase):
'passtest.py' % (AVOCADO, self.tmpdir))):
result = process.run(cmd)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertEqual('', result.stdout)
self.assertEqual('', result.stderr)
self.assertEqual(b'', result.stdout)
self.assertEqual(b'', result.stderr)
def test_none_error(self):
"""
......@@ -112,8 +112,8 @@ class StreamsTest(unittest.TestCase):
'%s --silent unknown-whacky-command' % AVOCADO):
result = process.run(cmd, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL)
self.assertEqual('', result.stdout)
self.assertNotEqual('', result.stderr)
self.assertEqual(b'', result.stdout)
self.assertNotEqual(b'', result.stderr)
def test_custom_stream_and_level(self):
"""
......
......@@ -35,7 +35,7 @@ class SysInfoTest(unittest.TestCase):
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
'Avocado did not return rc %d:\n%s' % (expected_rc, result))
output = result.stdout + result.stderr
output = result.stdout_text + result.stderr_text
sysinfo_dir = None
for line in output.splitlines():
if 'JOB LOG' in line:
......@@ -61,7 +61,7 @@ class SysInfoTest(unittest.TestCase):
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
'Avocado did not return rc %d:\n%s' % (expected_rc, result))
output = result.stdout + result.stderr
output = result.stdout_text + result.stderr_text
sysinfo_dir = None
for line in output.splitlines():
if 'JOB LOG' in line:
......
......@@ -91,23 +91,23 @@ class UnittestCompat(unittest.TestCase):
cmd_line = '%s %s' % (sys.executable, self.unittest_script_good)
result = process.run(cmd_line)
self.assertEqual(0, result.exit_status)
self.assertIn('Ran 1 test in', result.stderr)
self.assertIn(b'Ran 1 test in', result.stderr)
def test_run_fail(self):
cmd_line = '%s %s' % (sys.executable, self.unittest_script_fail)
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(1, result.exit_status)
self.assertIn('Ran 1 test in', result.stderr)
self.assertIn('This test is supposed to fail', result.stderr)
self.assertIn('FAILED (failures=1)', result.stderr)
self.assertIn(b'Ran 1 test in', result.stderr)
self.assertIn(b'This test is supposed to fail', result.stderr)
self.assertIn(b'FAILED (failures=1)', result.stderr)
def test_run_error(self):
cmd_line = '%s %s' % (sys.executable, self.unittest_script_error)
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(1, result.exit_status)
self.assertIn('Ran 1 test in', result.stderr)
self.assertIn('This test is supposed to error', result.stderr)
self.assertIn('FAILED (errors=1)', result.stderr)
self.assertIn(b'Ran 1 test in', result.stderr)
self.assertIn(b'This test is supposed to error', result.stderr)
self.assertIn(b'FAILED (errors=1)', result.stderr)
def tearDown(self):
self.unittest_script_error.remove()
......
......@@ -152,7 +152,7 @@ class ProcessTest(unittest.TestCase):
proc = process.SubProcess(self.fake_uptime)
result = proc.run()
self.assertEqual(result.exit_status, 0, 'result: %s' % result)
self.assertIn('load average', result.stdout)
self.assertIn(b'load average', result.stdout)
def tearDown(self):
shutil.rmtree(self.base_logdir)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册