未验证 提交 de63af37 编写于 作者: A Amador Pahim

Merge branch 'clebergnu-selftests_and_misc_fixes_4th_batch_v2'

Signed-off-by: NAmador Pahim <apahim@redhat.com>
......@@ -349,7 +349,7 @@ class Job(object):
cmd = "%s show --summary --pretty='%%H'" % git
res = process.run(cmd, ignore_status=True, verbose=False)
if res.exit_status == 0:
top_commit = res.stdout.splitlines()[0][:8]
top_commit = res.stdout_text.splitlines()[0][:8]
return " (GIT commit %s)" % top_commit
finally:
os.chdir(olddir)
......
......@@ -1169,11 +1169,11 @@ class SimpleTest(Test):
if regex is not None:
re_warn = re.compile(regex)
if warn_location in ['all', 'stdout']:
if re_warn.search(result.stdout):
if re_warn.search(result.stdout_text):
raise exceptions.TestWarn(warn_msg % 'stdout')
if warn_location in ['all', 'stderr']:
if re_warn.search(result.stderr):
if re_warn.search(result.stderr_text):
raise exceptions.TestWarn(warn_msg % 'stderr')
if skip_regex is not None:
......@@ -1182,11 +1182,11 @@ class SimpleTest(Test):
"Check the log for details.")
if skip_location in ['all', 'stdout']:
if re_skip.search(result.stdout):
if re_skip.search(result.stdout_text):
raise exceptions.TestSkipError(skip_msg % 'stdout')
if warn_location in ['all', 'stderr']:
if re_skip.search(result.stderr):
if re_skip.search(result.stderr_text):
raise exceptions.TestSkipError(skip_msg % 'stderr')
def test(self):
......
......@@ -405,7 +405,7 @@ def tree_view(root, verbose=None, use_utf8=None):
down_right = charset['DownRight']
right = charset['Right']
out = [node.name]
if verbose >= 2 and node.is_leaf:
if verbose is not None and verbose >= 2 and node.is_leaf:
values = itertools.chain(iteritems(node.environment),
[("filter-only", _)
for _ in node.environment.filter_only],
......@@ -466,7 +466,7 @@ def tree_view(root, verbose=None, use_utf8=None):
down_right = charset['DownRight']
right = charset['Right']
out = []
if (verbose >= 2) and root.is_leaf:
if verbose is not None and verbose >= 2 and root.is_leaf:
values = iteritems(root.environment)
elif verbose in (1, 3):
values = iteritems(root.value)
......
......@@ -18,7 +18,7 @@
import datetime
import os
import string
from xml.dom.minidom import Document, Element
from xml.dom.minidom import Document
from avocado.core.parser import FileOrStdoutAction
from avocado.core.output import LOG_UI
......@@ -54,13 +54,13 @@ class XUnitResult(Result):
return testcase
def _create_failure_or_error(self, document, test, element_type):
element = Element(element_type)
element = document.createElement(element_type)
element.setAttribute('type', self._get_attr(test, 'fail_class'))
element.setAttribute('message', self._get_attr(test, 'fail_reason'))
traceback_content = self._escape_cdata(test.get('traceback', self.UNKNOWN))
traceback = document.createCDATASection(traceback_content)
element.appendChild(traceback)
system_out = Element('system-out')
system_out = document.createElement('system-out')
try:
with open(test.get("logfile"), "r") as logfile_obj:
text_output = logfile_obj.read()
......@@ -88,7 +88,7 @@ class XUnitResult(Result):
if status in ('PASS', 'WARN'):
pass
elif status == 'SKIP':
testcase.appendChild(Element('skipped'))
testcase.appendChild(document.createElement('skipped'))
elif status == 'FAIL':
element, system_out = self._create_failure_or_error(document,
test,
......@@ -96,7 +96,7 @@ class XUnitResult(Result):
testcase.appendChild(element)
testcase.appendChild(system_out)
elif status == 'CANCEL':
testcase.appendChild(Element('skipped'))
testcase.appendChild(document.createElement('skipped'))
else:
element, system_out = self._create_failure_or_error(document,
test,
......@@ -123,7 +123,7 @@ class XUnitResult(Result):
xunit_path = getattr(job.args, 'xunit_output', 'None')
if xunit_path is not None:
if xunit_path == '-':
LOG_UI.debug(content)
LOG_UI.debug(content.decode('UTF-8'))
else:
with open(xunit_path, 'wb') as xunit_file:
xunit_file.write(content)
......
......@@ -222,12 +222,11 @@ class Iso9660IsoInfo(MixInMntDirMount, BaseIso9660):
"""
cmd = 'isoinfo -i %s -d' % path
output = process.system_output(cmd)
if re.findall("\nJoliet", output):
if b"\nJoliet" in output:
self.joliet = True
if re.findall("\nRock Ridge signatures", output):
if b"\nRock Ridge signatures" in output:
self.rock_ridge = True
if re.findall("\nEl Torito", output):
if b"\nEl Torito" in output:
self.el_torito = True
@staticmethod
......@@ -287,8 +286,8 @@ class Iso9660IsoRead(MixInMntDirMount, BaseIso9660):
temp_path = os.path.join(self.temp_dir, path)
cmd = 'iso-read -i %s -e %s -o %s' % (self.path, path, temp_path)
process.run(cmd)
with open(temp_path) as temp_file:
return temp_file.read()
with open(temp_path, 'rb') as temp_file:
return bytes(temp_file.read())
def copy(self, src, dst):
cmd = 'iso-read -i %s -e %s -o %s' % (self.path, src, dst)
......@@ -331,8 +330,8 @@ class Iso9660Mount(BaseIso9660):
:rtype: str
"""
full_path = os.path.join(self.mnt_dir, path)
with open(full_path) as file_to_read:
return file_to_read.read()
with open(full_path, 'rb') as file_to_read:
return bytes(file_to_read.read())
def copy(self, src, dst):
"""
......
......@@ -27,6 +27,7 @@ import shutil
import signal
import stat
import subprocess
import sys
import threading
import time
......@@ -269,17 +270,35 @@ class CmdResult(object):
:type duration: float
:param pid: ID of the process
:type pid: int
:param encoding: the encoding to use for the text version
of stdout and stderr, with the default being
Python's own (:func:`sys.getdefaultencoding`).
:type encoding: str
"""
def __init__(self, command="", stdout="", stderr="",
exit_status=None, duration=0, pid=None):
exit_status=None, duration=0, pid=None,
encoding=None):
self.command = command
self.exit_status = exit_status
#: The raw stdout (bytes)
self.stdout = stdout
#: The raw stderr (bytes)
self.stderr = stderr
self.duration = duration
self.interrupted = False
self.pid = pid
if encoding is None:
encoding = sys.getdefaultencoding()
self.encoding = encoding
@property
def stdout_text(self):
return self.stdout.decode(self.encoding)
@property
def stderr_text(self):
return self.stderr.decode(self.encoding)
def __repr__(self):
cmd_rep = ("Command: %s\n"
......@@ -1312,12 +1331,12 @@ def system_output(cmd, timeout=None, verbose=True, ignore_status=False,
:type strip_trail_nl: bool
:return: Command output.
:rtype: str
:rtype: bytes
:raise: :class:`CmdError`, if ``ignore_status=False``.
"""
cmd_result = run(cmd=cmd, timeout=timeout, verbose=verbose, ignore_status=ignore_status,
allow_output_check=allow_output_check, shell=shell, env=env,
sudo=sudo, ignore_bg_processes=ignore_bg_processes)
if strip_trail_nl:
return cmd_result.stdout.rstrip('\n\r')
return cmd_result.stdout.rstrip(b'\n\r')
return cmd_result.stdout
......@@ -45,9 +45,9 @@ class DoubleFreeTest(Test):
self.log.info(cmd_result)
output = cmd_result.stdout + cmd_result.stderr
if sys.platform.startswith('darwin'):
pattern = 'pointer being freed was not allocated'
pattern = b'pointer being freed was not allocated'
else:
pattern = 'free(): invalid pointer'
pattern = b'free(): invalid pointer'
self.assertTrue(pattern in output,
msg='Could not find pattern %s in output %s' %
(pattern, output))
......
......@@ -12,7 +12,7 @@ basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
DEBUG_OUT = """
DEBUG_OUT = b"""
Variant mint-debug-amd-virtio-935e: amd@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml, virtio@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml, mint@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml, debug@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml
/distro/mint:init => systemv@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml:/distro/mint
/env/debug:opt_CFLAGS => -O0 -g@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml:/env/debug
......@@ -38,7 +38,7 @@ class MultiplexTests(unittest.TestCase):
if tests is not None:
exp = ("PASS %s | ERROR 0 | FAIL %s | SKIP 0 | WARN 0 | "
"INTERRUPT 0" % tests)
self.assertIn(exp, result.stdout, "%s not in stdout:\n%s"
self.assertIn(exp, result.stdout_text, "%s not in stdout:\n%s"
% (exp, result))
return result
......@@ -52,7 +52,7 @@ class MultiplexTests(unittest.TestCase):
cmd_line = '%s variants -m nonexist' % AVOCADO
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
self.assertIn('No such file or directory', result.stderr)
self.assertIn('No such file or directory', result.stderr_text)
def test_mplex_debug(self):
cmd_line = ('%s variants -c -d -m '
......@@ -106,9 +106,9 @@ class MultiplexTests(unittest.TestCase):
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc, (4, 4))
self.assertIn("(1/8) passtest.py:PassTest.test;short", result.stdout)
self.assertIn("(2/8) passtest.py:PassTest.test;medium", result.stdout)
self.assertIn("(8/8) failtest.py:FailTest.test;longest",
self.assertIn(b"(1/8) passtest.py:PassTest.test;short", result.stdout)
self.assertIn(b"(2/8) passtest.py:PassTest.test;medium", result.stdout)
self.assertIn(b"(8/8) failtest.py:FailTest.test;longest",
result.stdout)
def test_run_mplex_failtest_tests_per_variant(self):
......@@ -119,9 +119,9 @@ class MultiplexTests(unittest.TestCase):
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc, (4, 4))
self.assertIn("(1/8) passtest.py:PassTest.test;short", result.stdout)
self.assertIn("(2/8) failtest.py:FailTest.test;short", result.stdout)
self.assertIn("(8/8) failtest.py:FailTest.test;longest",
self.assertIn(b"(1/8) passtest.py:PassTest.test;short", result.stdout)
self.assertIn(b"(2/8) failtest.py:FailTest.test;short", result.stdout)
self.assertIn(b"(8/8) failtest.py:FailTest.test;longest",
result.stdout)
def test_run_double_mplex(self):
......@@ -155,15 +155,15 @@ class MultiplexTests(unittest.TestCase):
msg_lines = msg.splitlines()
msg_header = '[stdout] Custom variable: %s' % msg_lines[0]
self.assertIn(msg_header, result.stdout,
self.assertIn(msg_header, result.stdout_text,
"Multiplexed variable should produce:"
"\n %s\nwhich is not present in the output:\n %s"
% (msg_header, "\n ".join(result.stdout.splitlines())))
% (msg_header, "\n ".join(result.stdout_text.splitlines())))
for msg_remain in msg_lines[1:]:
self.assertIn('[stdout] %s' % msg_remain, result.stdout,
self.assertIn('[stdout] %s' % msg_remain, result.stdout_text,
"Multiplexed variable should produce:"
"\n %s\nwhich is not present in the output:\n %s"
% (msg_remain, "\n ".join(result.stdout.splitlines())))
% (msg_remain, "\n ".join(result.stdout_text.splitlines())))
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
......@@ -2,9 +2,10 @@ import copy
import itertools
import os
import pickle
import sys
import unittest
import yaml
import yaml
from six import iteritems
import avocado_varianter_yaml_to_mux as yaml_to_mux
......@@ -420,8 +421,13 @@ class TestMultipleLoaders(unittest.TestCase):
debug = yaml_to_mux.create_from_yaml([yaml_url], debug=True)
self.assertEqual(type(debug), mux.MuxTreeNodeDebug)
# Debug nodes are of generated "NamedTreeNodeDebug" type
self.assertEqual("<class 'avocado_varianter_yaml_to_mux.NamedTreeNodeDebug'>",
str(type(debug.children[0])))
if sys.version_info[0] == 3:
children_type = ("<class 'avocado_varianter_yaml_to_mux."
"get_named_tree_cls.<locals>.NamedTreeNodeDebug'>")
else:
children_type = ("<class 'avocado_varianter_yaml_to_mux."
"NamedTreeNodeDebug'>")
self.assertEqual(children_type, str(type(debug.children[0])))
plain = yaml.load("foo: bar")
self.assertEqual(type(plain), dict)
......
......@@ -39,8 +39,8 @@ class ArgumentParsingTest(unittest.TestCase):
expected_rc = exit_codes.AVOCADO_FAIL
self.assertEqual(result.exit_status, expected_rc,
'Avocado did not return rc %d:\n%s' % (expected_rc, result))
subcommand_error_msg = 'avocado run: error: unrecognized arguments: '\
'--whacky-argument'
subcommand_error_msg = (b'avocado run: error: unrecognized arguments: '
b'--whacky-argument')
self.assertIn(subcommand_error_msg, result.stderr)
......
......@@ -140,7 +140,7 @@ GNU_ECHO_BINARY = probe_binary('echo')
if GNU_ECHO_BINARY is not None:
if probe_binary('man') is not None:
echo_manpage = process.run('man %s' % os.path.basename(GNU_ECHO_BINARY)).stdout
if '-e' not in echo_manpage:
if b'-e' not in echo_manpage:
GNU_ECHO_BINARY = probe_binary('gecho')
READ_BINARY = probe_binary('read')
SLEEP_BINARY = probe_binary('sleep')
......@@ -163,9 +163,9 @@ class RunnerOperationTest(unittest.TestCase):
def test_show_version(self):
result = process.run('%s -v' % AVOCADO, ignore_status=True)
self.assertEqual(result.exit_status, 0)
self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr),
self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr_text),
"Version string does not match 'Avocado \\d\\.\\d:'\n"
"%r" % (result.stderr))
"%r" % (result.stderr_text))
def test_alternate_config_datadir(self):
"""
......@@ -195,9 +195,9 @@ class RunnerOperationTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertIn(' base ' + mapping['base_dir'], result.stdout)
self.assertIn(' data ' + mapping['data_dir'], result.stdout)
self.assertIn(' logs ' + mapping['logs_dir'], result.stdout)
self.assertIn(' base ' + mapping['base_dir'], result.stdout_text)
self.assertIn(' data ' + mapping['data_dir'], result.stdout_text)
self.assertIn(' logs ' + mapping['logs_dir'], result.stdout_text)
def test_runner_all_ok(self):
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
......@@ -214,8 +214,8 @@ class RunnerOperationTest(unittest.TestCase):
'passtest.py failtest.py passtest.py --failfast on'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertIn('Interrupting job (failfast).', result.stdout)
self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
self.assertIn(b'Interrupting job (failfast).', result.stdout)
self.assertIn(b'PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout)
expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc, result))
......@@ -225,8 +225,8 @@ class RunnerOperationTest(unittest.TestCase):
'passtest.py badtest.py --ignore-missing-references on'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertIn("Unable to resolve reference(s) 'badtest.py'", result.stderr)
self.assertIn('PASS 1 | ERROR 0 | FAIL 0 | SKIP 0', result.stdout)
self.assertIn(b"Unable to resolve reference(s) 'badtest.py'", result.stderr)
self.assertIn(b'PASS 1 | ERROR 0 | FAIL 0 | SKIP 0', result.stdout)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc, result))
......@@ -236,9 +236,9 @@ class RunnerOperationTest(unittest.TestCase):
'badtest.py badtest2.py --ignore-missing-references on'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertIn("Unable to resolve reference(s) 'badtest.py', 'badtest2.py'",
self.assertIn(b"Unable to resolve reference(s) 'badtest.py', 'badtest2.py'",
result.stderr)
self.assertEqual('', result.stdout)
self.assertEqual(b'', result.stdout)
expected_rc = exit_codes.AVOCADO_JOB_FAIL
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc, result))
......@@ -336,18 +336,16 @@ class RunnerOperationTest(unittest.TestCase):
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'--xunit - doublefail.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
unexpected_rc = exit_codes.AVOCADO_FAIL
self.assertNotEqual(result.exit_status, unexpected_rc,
"Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc, result))
self.assertIn("TestError: Failing during tearDown. Yay!", output,
self.assertIn(b"TestError: Failing during tearDown. Yay!", result.stdout,
"Cleanup exception not printed to log output")
self.assertIn("TestFail: This test is supposed to fail",
output,
"Test did not fail with action exception:\n%s" % output)
self.assertIn(b"TestFail: This test is supposed to fail", result.stdout,
"Test did not fail with action exception:\n%s" % result.stdout)
def test_uncaught_exception(self):
cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
......@@ -357,7 +355,7 @@ class RunnerOperationTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc,
result))
self.assertIn('"status": "ERROR"', result.stdout)
self.assertIn(b'"status": "ERROR"', result.stdout)
def test_fail_on_exception(self):
cmd_line = ("%s run --sysinfo=off --job-results-dir %s "
......@@ -367,7 +365,7 @@ class RunnerOperationTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc,
result))
self.assertIn('"status": "FAIL"', result.stdout)
self.assertIn(b'"status": "FAIL"', result.stdout)
def test_exception_not_in_path(self):
os.mkdir(os.path.join(self.tmpdir, "shared_lib"))
......@@ -383,10 +381,10 @@ class RunnerOperationTest(unittest.TestCase):
result = process.run("%s --show test run --sysinfo=off "
"--job-results-dir %s %s"
% (AVOCADO, self.tmpdir, mytest))
self.assertIn("mytest.py:SharedLibTest.test -> CancelExc: This "
"should not crash on unpickling in runner",
self.assertIn(b"mytest.py:SharedLibTest.test -> CancelExc: This "
b"should not crash on unpickling in runner",
result.stdout)
self.assertNotIn("Failed to read queue", result.stdout)
self.assertNotIn(b"Failed to read queue", result.stdout)
def test_runner_timeout(self):
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
......@@ -399,10 +397,10 @@ class RunnerOperationTest(unittest.TestCase):
"Avocado crashed (rc %d):\n%s" % (unexpected_rc, result))
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" % (expected_rc, result))
self.assertIn("Runner error occurred: Timeout reached", output,
self.assertIn(b"Runner error occurred: Timeout reached", output,
"Timeout reached message not found in the output:\n%s" % output)
# Ensure no test aborted error messages show up
self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output)
self.assertNotIn(b"TestAbortedError: Test aborted unexpectedly", output)
@unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
"Skipping test that take a long time to run, are "
......@@ -411,7 +409,7 @@ class RunnerOperationTest(unittest.TestCase):
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
'--xunit - abort.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
excerpt = 'Test died without reporting the status.'
excerpt = b'Test died without reporting the status.'
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
unexpected_rc = exit_codes.AVOCADO_FAIL
self.assertNotEqual(result.exit_status, unexpected_rc,
......@@ -425,37 +423,37 @@ class RunnerOperationTest(unittest.TestCase):
'passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertEqual(result.stdout, '')
self.assertEqual(result.stdout, b'')
def test_empty_args_list(self):
cmd_line = AVOCADO
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL)
self.assertIn('error: too few arguments', result.stderr)
self.assertIn(b'error: too few arguments', result.stderr)
def test_empty_test_list(self):
cmd_line = '%s run --sysinfo=off --job-results-dir %s' % (AVOCADO,
self.tmpdir)
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_JOB_FAIL)
self.assertIn('No test references provided nor any other arguments '
'resolved into tests', result.stderr)
self.assertIn(b'No test references provided nor any other arguments '
b'resolved into tests', result.stderr)
def test_not_found(self):
cmd_line = ('%s run --sysinfo=off --job-results-dir %s sbrubles'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_JOB_FAIL)
self.assertIn('Unable to resolve reference', result.stderr)
self.assertNotIn('Unable to resolve reference', result.stdout)
self.assertIn(b'Unable to resolve reference', result.stderr)
self.assertNotIn(b'Unable to resolve reference', result.stdout)
def test_invalid_unique_id(self):
cmd_line = ('%s run --sysinfo=off --job-results-dir %s --force-job-id '
'foobar passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertIn('needs to be a 40 digit hex', result.stderr)
self.assertNotIn('needs to be a 40 digit hex', result.stdout)
self.assertIn(b'needs to be a 40 digit hex', result.stderr)
self.assertNotIn(b'needs to be a 40 digit hex', result.stdout)
def test_valid_unique_id(self):
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
......@@ -463,8 +461,8 @@ class RunnerOperationTest(unittest.TestCase):
'passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertNotIn('needs to be a 40 digit hex', result.stderr)
self.assertIn('PASS', result.stdout)
self.assertNotIn(b'needs to be a 40 digit hex', result.stderr)
self.assertIn(b'PASS', result.stdout)
def test_automatic_unique_id(self):
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
......@@ -530,7 +528,7 @@ class RunnerOperationTest(unittest.TestCase):
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test,
result.stdout)
result.stdout_text)
@unittest.skipIf(not READ_BINARY, "read binary not available.")
@unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
......@@ -563,7 +561,7 @@ class RunnerHumanOutputTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertIn('passtest.py:PassTest.test: PASS', result.stdout)
self.assertIn(b'passtest.py:PassTest.test: PASS', result.stdout)
def test_output_fail(self):
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
......@@ -573,7 +571,7 @@ class RunnerHumanOutputTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertIn('failtest.py:FailTest.test: FAIL', result.stdout)
self.assertIn(b'failtest.py:FailTest.test: FAIL', result.stdout)
def test_output_error(self):
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
......@@ -583,7 +581,7 @@ class RunnerHumanOutputTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertIn('errortest.py:ErrorTest.test: ERROR', result.stdout)
self.assertIn(b'errortest.py:ErrorTest.test: ERROR', result.stdout)
def test_output_cancel(self):
cmd_line = ('%s run --sysinfo=off --job-results-dir %s '
......@@ -593,7 +591,8 @@ class RunnerHumanOutputTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertIn('PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1',
self.assertIn(b'PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | '
b'INTERRUPT 0 | CANCEL 1',
result.stdout)
@unittest.skipIf(not GNU_ECHO_BINARY,
......@@ -608,10 +607,10 @@ class RunnerHumanOutputTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %s:\n%s" %
(expected_rc, result))
self.assertIn('[stdout] foo', result.stdout, result)
self.assertIn('[stdout] \'"', result.stdout, result)
self.assertIn('[stdout] bar/baz', result.stdout, result)
self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
self.assertIn(b'[stdout] foo', result.stdout, result)
self.assertIn(b'[stdout] \'"', result.stdout, result)
self.assertIn(b'[stdout] bar/baz', result.stdout, result)
self.assertIn(b'PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz',
result.stdout, result)
# logdir name should escape special chars (/)
test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest',
......@@ -729,12 +728,12 @@ class RunnerSimpleTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %s:\n%s" %
(expected_rc, result))
self.assertIn('DEBUG| Debug message', result.stdout, result)
self.assertIn('INFO | Info message', result.stdout, result)
self.assertIn('WARN | Warning message (should cause this test to '
'finish with warning)', result.stdout, result)
self.assertIn('ERROR| Error message (ordinary message not changing '
'the results)', result.stdout, result)
self.assertIn(b'DEBUG| Debug message', result.stdout, result)
self.assertIn(b'INFO | Info message', result.stdout, result)
self.assertIn(b'WARN | Warning message (should cause this test to '
b'finish with warning)', result.stdout, result)
self.assertIn(b'ERROR| Error message (ordinary message not changing '
b'the results)', result.stdout, result)
@unittest.skipIf(not GNU_ECHO_BINARY, "Uses echo as test")
def test_fs_unfriendly_run(self):
......@@ -917,8 +916,8 @@ class ExternalRunnerTest(unittest.TestCase):
'--external-runner=/bin/sh --external-runner-chdir=test %s'
% (AVOCADO, self.tmpdir, self.pass_script.path))
result = process.run(cmd_line, ignore_status=True)
expected_output = ('Option "--external-runner-chdir=test" requires '
'"--external-runner-testdir" to be set')
expected_output = (b'Option "--external-runner-chdir=test" requires '
b'"--external-runner-testdir" to be set')
self.assertIn(expected_output, result.stderr)
expected_rc = exit_codes.AVOCADO_JOB_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -929,8 +928,8 @@ class ExternalRunnerTest(unittest.TestCase):
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--external-runner=%s' % (AVOCADO, self.tmpdir, TRUE_CMD))
result = process.run(cmd_line, ignore_status=True)
expected_output = ('No test references provided nor any other '
'arguments resolved into tests')
expected_output = (b'No test references provided nor any other '
b'arguments resolved into tests')
self.assertIn(expected_output, result.stderr)
expected_rc = exit_codes.AVOCADO_JOB_FAIL
self.assertEqual(result.exit_status, expected_rc,
......@@ -972,7 +971,8 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertNotIn('No tests were found on current tests dir', result.stdout)
self.assertNotIn(b'No tests were found on current tests dir',
result.stdout)
def test_list_error_output(self):
cmd_line = '%s list sbrubles' % AVOCADO
......@@ -981,7 +981,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertIn("Unable to resolve reference", result.stderr)
self.assertIn(b"Unable to resolve reference", result.stderr)
def test_list_no_file_loader(self):
cmd_line = ("%s list --loaders external --verbose -- "
......@@ -990,12 +990,12 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
"Avocado did not return rc %d:\n%s"
% (exit_codes.AVOCADO_ALL_OK, result))
exp = ("Type Test Tag(s)\n"
"MISSING this-wont-be-matched \n\n"
"TEST TYPES SUMMARY\n"
"==================\n"
"EXTERNAL: 0\n"
"MISSING: 1\n")
exp = (b"Type Test Tag(s)\n"
b"MISSING this-wont-be-matched \n\n"
b"TEST TYPES SUMMARY\n"
b"==================\n"
b"EXTERNAL: 0\n"
b"MISSING: 1\n")
self.assertEqual(exp, result.stdout, "Stdout mismatch:\n%s\n\n%s"
% (exp, result))
......@@ -1011,7 +1011,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK,
"Avocado did not return rc %d:\n%s"
% (exit_codes.AVOCADO_ALL_OK, result))
stdout_lines = result.stdout.splitlines()
stdout_lines = result.stdout_text.splitlines()
self.assertIn("Tag(s)", stdout_lines[0])
full_test_name = "%s:MyTest.test" % test
self.assertEqual("INSTRUMENTED %s BIG_TAG_NAME" % full_test_name,
......@@ -1029,7 +1029,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
if sys.version_info[:2] >= (2, 7, 0):
self.assertNotIn('Disabled', result.stdout)
self.assertNotIn(b'Disabled', result.stdout)
def test_config_plugin(self):
cmd_line = '%s config --paginator off' % AVOCADO
......@@ -1038,7 +1038,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertNotIn('Disabled', result.stdout)
self.assertNotIn(b'Disabled', result.stdout)
def test_config_plugin_datadir(self):
cmd_line = '%s config --datadir --paginator off' % AVOCADO
......@@ -1047,7 +1047,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertNotIn('Disabled', result.stdout)
self.assertNotIn(b'Disabled', result.stdout)
def test_disable_plugin(self):
cmd_line = '%s plugins' % AVOCADO
......@@ -1056,7 +1056,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertIn("Collect system information", result.stdout)
self.assertIn(b"Collect system information", result.stdout)
config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"
config = script.TemporaryScript("disable_sysinfo_cmd.conf",
......@@ -1068,7 +1068,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertNotIn("Collect system information", result.stdout)
self.assertNotIn(b"Collect system information", result.stdout)
def test_plugin_order(self):
"""
......@@ -1104,7 +1104,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
for result_plugin in result_plugins:
self.assertIn(result_plugin, result.stdout)
self.assertIn(result_plugin, result.stdout_text)
config_content_zip_first = "[plugins.result]\norder=['zip_archive']"
config_zip_first = script.TemporaryScript("zip_first.conf",
......@@ -1140,7 +1140,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertNotIn("'Namespace' object has no attribute", result.stderr)
self.assertNotIn(b"'Namespace' object has no attribute", result.stderr)
class ParseXMLError(Exception):
......
......@@ -148,10 +148,10 @@ class InterruptTest(unittest.TestCase):
output = self.proc.stdout.read()
# Make sure the Interrupted requested sentence is there
self.assertIn('Interrupt requested. Waiting 2 seconds for test to '
'finish (ignoring new Ctrl+C until then)', output)
self.assertIn(b'Interrupt requested. Waiting 2 seconds for test to '
b'finish (ignoring new Ctrl+C until then)', output)
# Make sure the Killing test subprocess message did appear
self.assertIn('Killing test subprocess', output)
self.assertIn(b'Killing test subprocess', output)
@unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2,
"Skipping test that take a long time to run, are "
......@@ -191,7 +191,7 @@ class InterruptTest(unittest.TestCase):
timeout=10), 'Avocado left processes behind.')
# Make sure the Interrupted test sentence is there
self.assertIn('Terminated\n', self.proc.stdout.read())
self.assertIn(b'Terminated\n', self.proc.stdout.read())
@unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
"Skipping test that take a long time to run, are "
......@@ -232,10 +232,10 @@ class InterruptTest(unittest.TestCase):
output = self.proc.stdout.read()
# Make sure the Interrupted requested sentence is there
self.assertIn('Interrupt requested. Waiting 2 seconds for test to '
'finish (ignoring new Ctrl+C until then)', output)
self.assertIn(b'Interrupt requested. Waiting 2 seconds for test to '
b'finish (ignoring new Ctrl+C until then)', output)
# Make sure the Killing test subprocess message is not there
self.assertNotIn('Killing test subprocess', output)
self.assertNotIn(b'Killing test subprocess', output)
@unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1,
"Skipping test that take a long time to run, are "
......@@ -275,7 +275,7 @@ class InterruptTest(unittest.TestCase):
timeout=10), 'Avocado left processes behind.')
# Make sure the Interrupted test sentence is there
self.assertIn('Terminated\n', self.proc.stdout.read())
self.assertIn(b'Terminated\n', self.proc.stdout.read())
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
......@@ -164,7 +164,7 @@ class LoaderTestFunctional(unittest.TestCase):
test_script.save()
cmd_line = ('%s list -V %s' % (AVOCADO, test_script.path))
result = process.run(cmd_line)
self.assertIn('%s: %s' % (exp_str, count), result.stdout)
self.assertIn('%s: %s' % (exp_str, count), result.stdout_text)
test_script.remove()
def _run_with_timeout(self, cmd_line, timeout):
......@@ -213,7 +213,7 @@ class LoaderTestFunctional(unittest.TestCase):
("Took more than 3 seconds to list tests. Loader "
"probably loaded/executed Python code and slept for "
"eleven seconds."))
self.assertIn('INSTRUMENTED: 2', result.stdout)
self.assertIn(b'INSTRUMENTED: 2', result.stdout)
def test_multiple_class(self):
self._test('multipleclasses.py', AVOCADO_TEST_MULTIPLE_CLASSES,
......@@ -246,7 +246,7 @@ class LoaderTestFunctional(unittest.TestCase):
mytest.save()
cmd_line = "%s list -V %s" % (AVOCADO, mytest)
result = process.run(cmd_line)
self.assertIn('SIMPLE: 1', result.stdout)
self.assertIn(b'SIMPLE: 1', result.stdout)
# job should be able to finish under 5 seconds. If this fails, it's
# possible that we hit the "simple test fork bomb" bug
cmd_line = ("%s run --sysinfo=off --job-results-dir '%s' -- '%s'"
......@@ -357,10 +357,10 @@ class LoaderTestFunctional(unittest.TestCase):
"""
cmd = "%s list examples/tests/:fail" % AVOCADO
result = process.run(cmd)
expected = ("INSTRUMENTED examples/tests/doublefail.py:DoubleFail.test\n"
"INSTRUMENTED examples/tests/fail_on_exception.py:FailOnException.test\n"
"INSTRUMENTED examples/tests/failtest.py:FailTest.test\n"
"SIMPLE examples/tests/failtest.sh\n")
expected = (b"INSTRUMENTED examples/tests/doublefail.py:DoubleFail.test\n"
b"INSTRUMENTED examples/tests/fail_on_exception.py:FailOnException.test\n"
b"INSTRUMENTED examples/tests/failtest.py:FailTest.test\n"
b"SIMPLE examples/tests/failtest.sh\n")
self.assertEqual(expected, result.stdout)
def tearDown(self):
......
......@@ -157,7 +157,7 @@ class OutputTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
bad_string = 'double free or corruption'
bad_string = b'double free or corruption'
self.assertNotIn(bad_string, output,
"Libc double free can be seen in avocado "
"doublefree output:\n%s" % output)
......@@ -313,7 +313,7 @@ class OutputPluginTest(unittest.TestCase):
'(--xunit)): Options ((--xunit --json)|'
'(--json --xunit)) are trying to use stdout '
'simultaneously\n')
self.assertIsNotNone(error_regex.match(result.stderr),
self.assertIsNotNone(error_regex.match(result.stderr_text),
"Missing error message from output:\n%s" %
result.stderr)
......@@ -328,7 +328,7 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
error_excerpt = "HTML to stdout not supported"
error_excerpt = b"HTML to stdout not supported"
self.assertIn(error_excerpt, output,
"Missing excerpt error message from output:\n%s" % output)
......@@ -427,7 +427,7 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, "", "Output is not empty:\n%s" % output)
self.assertEqual(output, b"", "Output is not empty:\n%s" % output)
# Check if we are producing valid outputs
with open(tmpfile2, 'r') as fp:
json_results = json.load(fp)
......@@ -447,7 +447,7 @@ class OutputPluginTest(unittest.TestCase):
"--job-results-dir %s --sysinfo=off"
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
output = result.stdout_text + result.stderr_text
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
......@@ -469,7 +469,7 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
job_id_list = re.findall('Job ID: (.*)', result.stdout,
job_id_list = re.findall('Job ID: (.*)', result.stdout_text,
re.MULTILINE)
self.assertTrue(job_id_list, 'No Job ID in stdout:\n%s' %
result.stdout)
......@@ -487,7 +487,7 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, "")
self.assertEqual(output, b"")
def test_default_enabled_plugins(self):
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
......@@ -588,7 +588,7 @@ class OutputPluginTest(unittest.TestCase):
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, '',
self.assertEqual(output, b'',
'After redirecting to file, output is not empty: %s' % output)
with open(redirected_output_path, 'r') as redirected_output_file_obj:
redirected_output = redirected_output_file_obj.read()
......@@ -617,7 +617,7 @@ class OutputPluginTest(unittest.TestCase):
"--job-results-dir %s "
"--tap -" % (AVOCADO, self.tmpdir))
result = process.run(cmd_line)
expr = '1..4'
expr = b'1..4'
self.assertIn(expr, result.stdout, "'%s' not found in:\n%s"
% (expr, result.stdout))
......@@ -630,9 +630,9 @@ class OutputPluginTest(unittest.TestCase):
("avocado run to broken pipe did not return "
"rc %d:\n%s" % (expected_rc, result)))
self.assertEqual(len(result.stderr.splitlines()), 1)
self.assertIn("whacky-unknown-command", result.stderr)
self.assertIn("not found", result.stderr)
self.assertNotIn("Avocado crashed", result.stderr)
self.assertIn(b"whacky-unknown-command", result.stderr)
self.assertIn(b"not found", result.stderr)
self.assertNotIn(b"Avocado crashed", result.stderr)
def test_results_plugins_no_tests(self):
cmd_line = ("%s run UNEXISTING --job-results-dir %s"
......@@ -650,11 +650,11 @@ class OutputPluginTest(unittest.TestCase):
self.assertFalse(os.path.exists(tap_results))
# Check that no UI output was generated
self.assertNotIn("RESULTS : PASS ", result.stdout)
self.assertNotIn("JOB TIME :", result.stdout)
self.assertNotIn(b"RESULTS : PASS ", result.stdout)
self.assertNotIn(b"JOB TIME :", result.stdout)
# Check that plugins do not produce errors
self.assertNotIn("Error running method ", result.stderr)
self.assertNotIn(b"Error running method ", result.stderr)
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
......@@ -50,20 +50,16 @@ class DiffTests(unittest.TestCase):
(AVOCADO, self.jobdir, self.jobdir2))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = "# COMMAND LINE"
self.assertIn(msg, result.stdout)
msg = "-./scripts/avocado run"
self.assertIn(msg, result.stdout)
msg = "+./scripts/avocado run"
self.assertIn(msg, result.stdout)
self.assertIn(b"# COMMAND LINE", result.stdout)
self.assertIn(b"-./scripts/avocado run", result.stdout)
self.assertIn(b"+./scripts/avocado run", result.stdout)
def test_diff_nocmdline(self):
cmd_line = ('%s diff %s %s --diff-filter nocmdline' %
(AVOCADO, self.jobdir, self.jobdir2))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = "# COMMAND LINE"
self.assertNotIn(msg, result.stdout)
self.assertNotIn(b"# COMMAND LINE", result.stdout)
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
......@@ -79,9 +79,9 @@ class JobScriptsTest(unittest.TestCase):
# Pre/Post scripts failures do not (currently?) alter the exit status
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertNotIn('Pre job script "%s" exited with status "1"' % touch_script,
result.stderr)
result.stderr_text)
self.assertNotIn('Post job script "%s" exited with status "1"' % rm_script,
result.stderr)
result.stderr_text)
def test_status_non_zero(self):
"""
......@@ -102,7 +102,7 @@ class JobScriptsTest(unittest.TestCase):
# Pre/Post scripts failures do not (currently?) alter the exit status
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertEqual('Pre job script "%s" exited with status "1"\n' % non_zero_script,
result.stderr)
result.stderr_text)
def test_non_existing_dir(self):
"""
......@@ -124,9 +124,9 @@ class JobScriptsTest(unittest.TestCase):
# Pre/Post scripts failures do not (currently?) alter the exit status
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertIn('-job scripts has not been found', result.stderr)
self.assertIn(b'-job scripts has not been found', result.stderr)
self.assertNotIn('Pre job script "%s" exited with status "1"' % non_zero_script,
result.stderr)
result.stderr_text)
def tearDown(self):
shutil.rmtree(self.tmpdir)
......
......@@ -106,8 +106,8 @@ class ReplayTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Invalid --replay-ignore option. Valid options are ' \
'(more than one allowed): variants,config'
msg = (b'Invalid --replay-ignore option. Valid options are '
b'(more than one allowed): variants,config')
self.assertIn(msg, result.stderr)
def test_run_replay_ignorevariants(self):
......@@ -119,7 +119,7 @@ class ReplayTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Ignoring variants from source job with --replay-ignore.'
msg = b'Ignoring variants from source job with --replay-ignore.'
self.assertIn(msg, result.stderr)
def test_run_replay_invalidstatus(self):
......@@ -131,8 +131,8 @@ class ReplayTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Invalid --replay-test-status option. Valid options are (more ' \
'than one allowed): SKIP,ERROR,FAIL,WARN,PASS,INTERRUPTED'
msg = (b'Invalid --replay-test-status option. Valid options are (more '
b'than one allowed): SKIP,ERROR,FAIL,WARN,PASS,INTERRUPTED')
self.assertIn(msg, result.stderr)
def test_run_replay_statusfail(self):
......@@ -144,7 +144,8 @@ class ReplayTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = 'RESULTS : PASS 0 | ERROR 0 | FAIL 0 | SKIP 4 | WARN 0 | INTERRUPT 0'
msg = (b'RESULTS : PASS 0 | ERROR 0 | FAIL 0 | SKIP 4 | WARN 0 | '
b'INTERRUPT 0')
self.assertIn(msg, result.stdout)
def test_run_replay_remotefail(self):
......@@ -156,7 +157,7 @@ class ReplayTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = "Currently we don't replay jobs in remote hosts."
msg = b"Currently we don't replay jobs in remote hosts."
self.assertIn(msg, result.stderr)
def test_run_replay_status_and_variants(self):
......@@ -168,8 +169,8 @@ class ReplayTests(unittest.TestCase):
'--sysinfo=off' % (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = ("Option `--replay-test-status` is incompatible with "
"`--replay-ignore variants`")
msg = (b"Option `--replay-test-status` is incompatible with "
b"`--replay-ignore variants`")
self.assertIn(msg, result.stderr)
def test_run_replay_status_and_references(self):
......@@ -181,8 +182,8 @@ class ReplayTests(unittest.TestCase):
'--sysinfo=off' % (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = ("Option --replay-test-status is incompatible with "
"test references given on the command line.")
msg = (b"Option --replay-test-status is incompatible with "
b"test references given on the command line.")
self.assertIn(msg, result.stderr)
def test_run_replay_and_mux(self):
......
......@@ -47,8 +47,8 @@ class ReplayExtRunnerTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = "Overriding the replay external-runner with the "\
"--external-runner value given on the command line."
msg = (b"Overriding the replay external-runner with the "
b"--external-runner value given on the command line.")
self.assertIn(msg, result.stderr)
def tearDown(self):
......
......@@ -49,7 +49,8 @@ class ReplayFailfastTests(unittest.TestCase):
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Overriding the replay failfast with the --failfast value given on the command line.'
msg = (b'Overriding the replay failfast with the --failfast value '
b'given on the command line.')
self.assertIn(msg, result.stderr)
def tearDown(self):
......
......@@ -50,7 +50,7 @@ class StandaloneTests(unittest.TestCase):
exc = "errortest_nasty.NastyException: Nasty-string-like-exception"
else:
exc = "NastyException: Nasty-string-like-exception"
count = result.stdout.count("\n%s" % exc)
count = result.stdout_text.count("\n%s" % exc)
self.assertEqual(count, 2, "Exception \\n%s should be present twice in"
"the log (once from the log, second time when parsing"
"exception details." % (exc))
......@@ -59,17 +59,17 @@ class StandaloneTests(unittest.TestCase):
cmd_line = './examples/tests/errortest_nasty2.py -r'
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc, 'errortest_nasty2')
self.assertIn("Exception: Unable to get exception, check the traceback"
" for details.", result.stdout)
self.assertIn(b"Exception: Unable to get exception, check the traceback"
b" for details.", result.stdout)
def test_errortest_nasty3(self):
cmd_line = './examples/tests/errortest_nasty3.py -r'
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
result = self.run_and_check(cmd_line, expected_rc, 'errortest_nasty3')
if sys.version_info[0] == 3:
exc = "TypeError: exceptions must derive from BaseException"
exc = b"TypeError: exceptions must derive from BaseException"
else:
exc = "TestError: <errortest_nasty3.NastyException instance at "
exc = b"TestError: <errortest_nasty3.NastyException instance at "
self.assertIn(exc, result.stdout)
def test_errortest(self):
......
......@@ -25,8 +25,8 @@ class StreamsTest(unittest.TestCase):
"""
result = process.run('%s distro' % AVOCADO)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertIn('Detected distribution', result.stdout)
self.assertEqual('', result.stderr)
self.assertIn(b'Detected distribution', result.stdout)
self.assertEqual(b'', result.stderr)
def test_app_error_stderr(self):
"""
......@@ -35,12 +35,12 @@ class StreamsTest(unittest.TestCase):
result = process.run('%s unknown-whacky-command' % AVOCADO,
ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL)
self.assertIn("invalid choice: 'unknown-whacky-command'",
self.assertIn(b"invalid choice: 'unknown-whacky-command'",
result.stderr)
self.assertNotIn("invalid choice: 'unknown-whacky-command'",
self.assertNotIn(b"invalid choice: 'unknown-whacky-command'",
result.stdout)
self.assertIn("Avocado Test Runner", result.stdout)
self.assertNotIn("Avocado Test Runner", result.stderr)
self.assertIn(b"Avocado Test Runner", result.stdout)
self.assertNotIn(b"Avocado Test Runner", result.stderr)
def test_other_stream_early_stdout(self):
"""
......@@ -58,11 +58,11 @@ class StreamsTest(unittest.TestCase):
for cmd, env in cmds:
result = process.run(cmd, env=env, shell=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertIn("stevedore.extension: found extension EntryPoint.parse",
self.assertIn(b"stevedore.extension: found extension EntryPoint.parse",
result.stdout)
self.assertIn("avocado.test: Command line: %s" % cmd,
result.stdout)
self.assertEqual('', result.stderr)
result.stdout_text)
self.assertEqual(b'', result.stderr)
def test_test(self):
"""
......@@ -76,16 +76,16 @@ class StreamsTest(unittest.TestCase):
' passtest.py' % (AVOCADO, self.tmpdir))):
result = process.run(cmd)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertNotIn("stevedore.extension: found extension EntryPoint.parse",
self.assertNotIn(b"stevedore.extension: found extension EntryPoint.parse",
result.stdout)
self.assertNotIn("stevedore.extension: found extension EntryPoint.parse",
self.assertNotIn(b"stevedore.extension: found extension EntryPoint.parse",
result.stderr)
self.assertIn("Command line: %s" % cmd,
result.stdout_text)
self.assertIn(b"\nSTART 1-passtest.py:PassTest.test",
result.stdout)
self.assertIn("\nSTART 1-passtest.py:PassTest.test",
result.stdout)
self.assertIn("PASS 1-passtest.py:PassTest.test", result.stdout)
self.assertEqual('', result.stderr)
self.assertIn(b"PASS 1-passtest.py:PassTest.test", result.stdout)
self.assertEqual(b'', result.stderr)
def test_none_success(self):
"""
......@@ -99,8 +99,8 @@ class StreamsTest(unittest.TestCase):
'passtest.py' % (AVOCADO, self.tmpdir))):
result = process.run(cmd)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)
self.assertEqual('', result.stdout)
self.assertEqual('', result.stderr)
self.assertEqual(b'', result.stdout)
self.assertEqual(b'', result.stderr)
def test_none_error(self):
"""
......@@ -112,8 +112,8 @@ class StreamsTest(unittest.TestCase):
'%s --silent unknown-whacky-command' % AVOCADO):
result = process.run(cmd, ignore_status=True)
self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL)
self.assertEqual('', result.stdout)
self.assertNotEqual('', result.stderr)
self.assertEqual(b'', result.stdout)
self.assertNotEqual(b'', result.stderr)
def test_custom_stream_and_level(self):
"""
......
......@@ -35,7 +35,7 @@ class SysInfoTest(unittest.TestCase):
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
'Avocado did not return rc %d:\n%s' % (expected_rc, result))
output = result.stdout + result.stderr
output = result.stdout_text + result.stderr_text
sysinfo_dir = None
for line in output.splitlines():
if 'JOB LOG' in line:
......@@ -61,7 +61,7 @@ class SysInfoTest(unittest.TestCase):
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
'Avocado did not return rc %d:\n%s' % (expected_rc, result))
output = result.stdout + result.stderr
output = result.stdout_text + result.stderr_text
sysinfo_dir = None
for line in output.splitlines():
if 'JOB LOG' in line:
......
......@@ -91,23 +91,23 @@ class UnittestCompat(unittest.TestCase):
cmd_line = '%s %s' % (sys.executable, self.unittest_script_good)
result = process.run(cmd_line)
self.assertEqual(0, result.exit_status)
self.assertIn('Ran 1 test in', result.stderr)
self.assertIn(b'Ran 1 test in', result.stderr)
def test_run_fail(self):
cmd_line = '%s %s' % (sys.executable, self.unittest_script_fail)
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(1, result.exit_status)
self.assertIn('Ran 1 test in', result.stderr)
self.assertIn('This test is supposed to fail', result.stderr)
self.assertIn('FAILED (failures=1)', result.stderr)
self.assertIn(b'Ran 1 test in', result.stderr)
self.assertIn(b'This test is supposed to fail', result.stderr)
self.assertIn(b'FAILED (failures=1)', result.stderr)
def test_run_error(self):
cmd_line = '%s %s' % (sys.executable, self.unittest_script_error)
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(1, result.exit_status)
self.assertIn('Ran 1 test in', result.stderr)
self.assertIn('This test is supposed to error', result.stderr)
self.assertIn('FAILED (errors=1)', result.stderr)
self.assertIn(b'Ran 1 test in', result.stderr)
self.assertIn(b'This test is supposed to error', result.stderr)
self.assertIn(b'FAILED (errors=1)', result.stderr)
def tearDown(self):
self.unittest_script_error.remove()
......
......@@ -152,7 +152,7 @@ class ProcessTest(unittest.TestCase):
proc = process.SubProcess(self.fake_uptime)
result = proc.run()
self.assertEqual(result.exit_status, 0, 'result: %s' % result)
self.assertIn('load average', result.stdout)
self.assertIn(b'load average', result.stdout)
def tearDown(self):
shutil.rmtree(self.base_logdir)
......
......@@ -30,7 +30,7 @@ class BaseIso9660(unittest.TestCase):
due to ast loader we can't just define a base-class.
"""
self.assertEqual(self.iso.read("file"),
"file content\n")
b"file content\n")
dst = os.path.join(self.tmpdir, "file")
self.iso.copy(os.path.join("Dir", "in_dir_file"), dst)
self.assertEqual(open(dst).read(), "content of in-dir-file\n")
......@@ -49,10 +49,11 @@ class BaseIso9660(unittest.TestCase):
base = self.iso.mnt_dir
dir_path = os.path.join(base, "Dir")
self.assertTrue(os.path.isdir(dir_path))
self.assertEqual(open(os.path.join(base, "file")).read(),
"file content\n")
self.assertEqual(open(os.path.join(base, "Dir", "in_dir_file")).read(),
"content of in-dir-file\n")
self.assertEqual(bytes(open(os.path.join(base, "file"), 'rb').read()),
b"file content\n")
in_dir_file_path = os.path.join(base, "Dir", "in_dir_file")
self.assertEqual(bytes(open(in_dir_file_path, 'rb').read()),
b"content of in-dir-file\n")
self.iso.close()
self.assertFalse(os.path.exists(base), "the mnt_dir is suppose to be "
"destroyed after iso.close()")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册