diff --git a/avocado/core/job.py b/avocado/core/job.py index 64a0480bc2f1ca26a9ec06181fff59242d09cd1f..33b8ece96eb461d27df4d22928adb00194e1c5e2 100644 --- a/avocado/core/job.py +++ b/avocado/core/job.py @@ -349,7 +349,7 @@ class Job(object): cmd = "%s show --summary --pretty='%%H'" % git res = process.run(cmd, ignore_status=True, verbose=False) if res.exit_status == 0: - top_commit = res.stdout.splitlines()[0][:8] + top_commit = res.stdout_text.splitlines()[0][:8] return " (GIT commit %s)" % top_commit finally: os.chdir(olddir) diff --git a/avocado/core/test.py b/avocado/core/test.py index 41e046fc74c91f996fcf8bf48c1287106b6a4862..43b0d60c77dfe7a6533abb38a1e605f258c0d469 100644 --- a/avocado/core/test.py +++ b/avocado/core/test.py @@ -1169,11 +1169,11 @@ class SimpleTest(Test): if regex is not None: re_warn = re.compile(regex) if warn_location in ['all', 'stdout']: - if re_warn.search(result.stdout): + if re_warn.search(result.stdout_text): raise exceptions.TestWarn(warn_msg % 'stdout') if warn_location in ['all', 'stderr']: - if re_warn.search(result.stderr): + if re_warn.search(result.stderr_text): raise exceptions.TestWarn(warn_msg % 'stderr') if skip_regex is not None: @@ -1182,11 +1182,11 @@ class SimpleTest(Test): "Check the log for details.") if skip_location in ['all', 'stdout']: - if re_skip.search(result.stdout): + if re_skip.search(result.stdout_text): raise exceptions.TestSkipError(skip_msg % 'stdout') if warn_location in ['all', 'stderr']: - if re_skip.search(result.stderr): + if re_skip.search(result.stderr_text): raise exceptions.TestSkipError(skip_msg % 'stderr') def test(self): diff --git a/avocado/utils/iso9660.py b/avocado/utils/iso9660.py index 852ff94a0c722ffe0421d51c884884e8fe63f7d4..de28006923b747651c6984d3e3a1d66149014357 100644 --- a/avocado/utils/iso9660.py +++ b/avocado/utils/iso9660.py @@ -222,12 +222,11 @@ class Iso9660IsoInfo(MixInMntDirMount, BaseIso9660): """ cmd = 'isoinfo -i %s -d' % path output = process.system_output(cmd) - - if re.findall("\nJoliet", output): + if b"\nJoliet" in output: self.joliet = True - if re.findall("\nRock Ridge signatures", output): + if b"\nRock Ridge signatures" in output: self.rock_ridge = True - if re.findall("\nEl Torito", output): + if b"\nEl Torito" in output: self.el_torito = True @staticmethod diff --git a/avocado/utils/process.py b/avocado/utils/process.py index 16bc87e4c3c325d760c2a600216b845540ffc271..08c9ca8e59ccff538f29dbc0a890dceefcf7e478 100644 --- a/avocado/utils/process.py +++ b/avocado/utils/process.py @@ -27,6 +27,7 @@ import shutil import signal import stat import subprocess +import sys import threading import time @@ -269,17 +270,35 @@ class CmdResult(object): :type duration: float :param pid: ID of the process :type pid: int + :param encoding: the encoding to use for the text version + of stdout and stderr, with the default being + Python's own (:func:`sys.getdefaultencoding`). + :type encoding: str """ def __init__(self, command="", stdout="", stderr="", - exit_status=None, duration=0, pid=None): + exit_status=None, duration=0, pid=None, + encoding=None): self.command = command self.exit_status = exit_status + #: The raw stdout (bytes) self.stdout = stdout + #: The raw stderr (bytes) self.stderr = stderr self.duration = duration self.interrupted = False self.pid = pid + if encoding is None: + encoding = sys.getdefaultencoding() + self.encoding = encoding + + @property + def stdout_text(self): + return self.stdout.decode(self.encoding) + + @property + def stderr_text(self): + return self.stderr.decode(self.encoding) def __repr__(self): cmd_rep = ("Command: %s\n" @@ -1312,12 +1331,12 @@ def system_output(cmd, timeout=None, verbose=True, ignore_status=False, :type strip_trail_nl: bool :return: Command output. - :rtype: str + :rtype: bytes :raise: :class:`CmdError`, if ``ignore_status=False``. """ cmd_result = run(cmd=cmd, timeout=timeout, verbose=verbose, ignore_status=ignore_status, allow_output_check=allow_output_check, shell=shell, env=env, sudo=sudo, ignore_bg_processes=ignore_bg_processes) if strip_trail_nl: - return cmd_result.stdout.rstrip('\n\r') + return cmd_result.stdout.rstrip(b'\n\r') return cmd_result.stdout diff --git a/examples/tests/doublefree.py b/examples/tests/doublefree.py index 30c9024a2ae4b5fb18a8094c52fdc102b4290afd..d58189f1662f8a0ac4c769b81cfdea60d1d61e11 100755 --- a/examples/tests/doublefree.py +++ b/examples/tests/doublefree.py @@ -45,9 +45,9 @@ class DoubleFreeTest(Test): self.log.info(cmd_result) output = cmd_result.stdout + cmd_result.stderr if sys.platform.startswith('darwin'): - pattern = 'pointer being freed was not allocated' + pattern = b'pointer being freed was not allocated' else: - pattern = 'free(): invalid pointer' + pattern = b'free(): invalid pointer' self.assertTrue(pattern in output, msg='Could not find pattern %s in output %s' % (pattern, output)) diff --git a/optional_plugins/varianter_yaml_to_mux/tests/test_multiplex.py b/optional_plugins/varianter_yaml_to_mux/tests/test_multiplex.py index 9fb490ef0e7bd83126c8276966df62e171db60b7..ba08dec536428680bc4b29e881788b2259b31962 100644 --- a/optional_plugins/varianter_yaml_to_mux/tests/test_multiplex.py +++ b/optional_plugins/varianter_yaml_to_mux/tests/test_multiplex.py @@ -12,7 +12,7 @@ basedir = os.path.abspath(basedir) AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado") -DEBUG_OUT = """ +DEBUG_OUT = b""" Variant mint-debug-amd-virtio-935e: amd@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml, virtio@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml, mint@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml, debug@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml /distro/mint:init => systemv@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml:/distro/mint /env/debug:opt_CFLAGS => -O0 -g@optional_plugins/varianter_yaml_to_mux/tests/.data/mux-environment.yaml:/env/debug @@ -38,7 +38,7 @@ class MultiplexTests(unittest.TestCase): if tests is not None: exp = ("PASS %s | ERROR 0 | FAIL %s | SKIP 0 | WARN 0 | " "INTERRUPT 0" % tests) - self.assertIn(exp, result.stdout, "%s not in stdout:\n%s" + self.assertIn(exp, result.stdout_text, "%s not in stdout:\n%s" % (exp, result)) return result @@ -52,7 +52,7 @@ class MultiplexTests(unittest.TestCase): cmd_line = '%s variants -m nonexist' % AVOCADO expected_rc = exit_codes.AVOCADO_FAIL result = self.run_and_check(cmd_line, expected_rc) - self.assertIn('No such file or directory', result.stderr) + self.assertIn('No such file or directory', result.stderr_text) def test_mplex_debug(self): cmd_line = ('%s variants -c -d -m ' @@ -106,9 +106,9 @@ class MultiplexTests(unittest.TestCase): % (AVOCADO, self.tmpdir)) expected_rc = exit_codes.AVOCADO_TESTS_FAIL result = self.run_and_check(cmd_line, expected_rc, (4, 4)) - self.assertIn("(1/8) passtest.py:PassTest.test;short", result.stdout) - self.assertIn("(2/8) passtest.py:PassTest.test;medium", result.stdout) - self.assertIn("(8/8) failtest.py:FailTest.test;longest", + self.assertIn(b"(1/8) passtest.py:PassTest.test;short", result.stdout) + self.assertIn(b"(2/8) passtest.py:PassTest.test;medium", result.stdout) + self.assertIn(b"(8/8) failtest.py:FailTest.test;longest", result.stdout) def test_run_mplex_failtest_tests_per_variant(self): @@ -119,9 +119,9 @@ class MultiplexTests(unittest.TestCase): % (AVOCADO, self.tmpdir)) expected_rc = exit_codes.AVOCADO_TESTS_FAIL result = self.run_and_check(cmd_line, expected_rc, (4, 4)) - self.assertIn("(1/8) passtest.py:PassTest.test;short", result.stdout) - self.assertIn("(2/8) failtest.py:FailTest.test;short", result.stdout) - self.assertIn("(8/8) failtest.py:FailTest.test;longest", + self.assertIn(b"(1/8) passtest.py:PassTest.test;short", result.stdout) + self.assertIn(b"(2/8) failtest.py:FailTest.test;short", result.stdout) + self.assertIn(b"(8/8) failtest.py:FailTest.test;longest", result.stdout) def test_run_double_mplex(self): @@ -155,15 +155,15 @@ class MultiplexTests(unittest.TestCase): msg_lines = msg.splitlines() msg_header = '[stdout] Custom variable: %s' % msg_lines[0] - self.assertIn(msg_header, result.stdout, + self.assertIn(msg_header, result.stdout_text, "Multiplexed variable should produce:" "\n %s\nwhich is not present in the output:\n %s" - % (msg_header, "\n ".join(result.stdout.splitlines()))) + % (msg_header, "\n ".join(result.stdout_text.splitlines()))) for msg_remain in msg_lines[1:]: - self.assertIn('[stdout] %s' % msg_remain, result.stdout, + self.assertIn('[stdout] %s' % msg_remain, result.stdout_text, "Multiplexed variable should produce:" "\n %s\nwhich is not present in the output:\n %s" - % (msg_remain, "\n ".join(result.stdout.splitlines()))) + % (msg_remain, "\n ".join(result.stdout_text.splitlines()))) def tearDown(self): shutil.rmtree(self.tmpdir) diff --git a/selftests/functional/test_argument_parsing.py b/selftests/functional/test_argument_parsing.py index 4f8fbb082e6d42426f88e987e13471685921ac55..2a2176c5ae94e032b6b2642fa1e254e98fd04893 100644 --- a/selftests/functional/test_argument_parsing.py +++ b/selftests/functional/test_argument_parsing.py @@ -39,8 +39,8 @@ class ArgumentParsingTest(unittest.TestCase): expected_rc = exit_codes.AVOCADO_FAIL self.assertEqual(result.exit_status, expected_rc, 'Avocado did not return rc %d:\n%s' % (expected_rc, result)) - subcommand_error_msg = 'avocado run: error: unrecognized arguments: '\ - '--whacky-argument' + subcommand_error_msg = (b'avocado run: error: unrecognized arguments: ' + b'--whacky-argument') self.assertIn(subcommand_error_msg, result.stderr) diff --git a/selftests/functional/test_basic.py b/selftests/functional/test_basic.py index 1148381fe128d0eff8ab2b574c726d9dfb892ce1..9181a316afe266075b59ea6a95d40ea03ab1ff17 100644 --- a/selftests/functional/test_basic.py +++ b/selftests/functional/test_basic.py @@ -140,7 +140,7 @@ GNU_ECHO_BINARY = probe_binary('echo') if GNU_ECHO_BINARY is not None: if probe_binary('man') is not None: echo_manpage = process.run('man %s' % os.path.basename(GNU_ECHO_BINARY)).stdout - if '-e' not in echo_manpage: + if b'-e' not in echo_manpage: GNU_ECHO_BINARY = probe_binary('gecho') READ_BINARY = probe_binary('read') SLEEP_BINARY = probe_binary('sleep') @@ -163,9 +163,9 @@ class RunnerOperationTest(unittest.TestCase): def test_show_version(self): result = process.run('%s -v' % AVOCADO, ignore_status=True) self.assertEqual(result.exit_status, 0) - self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr), + self.assertTrue(re.match(r"^Avocado \d+\.\d+$", result.stderr_text), "Version string does not match 'Avocado \\d\\.\\d:'\n" - "%r" % (result.stderr)) + "%r" % (result.stderr_text)) def test_alternate_config_datadir(self): """ @@ -195,9 +195,9 @@ class RunnerOperationTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertIn(' base ' + mapping['base_dir'], result.stdout) - self.assertIn(' data ' + mapping['data_dir'], result.stdout) - self.assertIn(' logs ' + mapping['logs_dir'], result.stdout) + self.assertIn(' base ' + mapping['base_dir'], result.stdout_text) + self.assertIn(' data ' + mapping['data_dir'], result.stdout_text) + self.assertIn(' logs ' + mapping['logs_dir'], result.stdout_text) def test_runner_all_ok(self): cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' @@ -214,8 +214,8 @@ class RunnerOperationTest(unittest.TestCase): 'passtest.py failtest.py passtest.py --failfast on' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) - self.assertIn('Interrupting job (failfast).', result.stdout) - self.assertIn('PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout) + self.assertIn(b'Interrupting job (failfast).', result.stdout) + self.assertIn(b'PASS 1 | ERROR 0 | FAIL 1 | SKIP 1', result.stdout) expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) @@ -225,8 +225,8 @@ class RunnerOperationTest(unittest.TestCase): 'passtest.py badtest.py --ignore-missing-references on' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) - self.assertIn("Unable to resolve reference(s) 'badtest.py'", result.stderr) - self.assertIn('PASS 1 | ERROR 0 | FAIL 0 | SKIP 0', result.stdout) + self.assertIn(b"Unable to resolve reference(s) 'badtest.py'", result.stderr) + self.assertIn(b'PASS 1 | ERROR 0 | FAIL 0 | SKIP 0', result.stdout) expected_rc = exit_codes.AVOCADO_ALL_OK self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) @@ -236,9 +236,9 @@ class RunnerOperationTest(unittest.TestCase): 'badtest.py badtest2.py --ignore-missing-references on' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) - self.assertIn("Unable to resolve reference(s) 'badtest.py', 'badtest2.py'", + self.assertIn(b"Unable to resolve reference(s) 'badtest.py', 'badtest2.py'", result.stderr) - self.assertEqual('', result.stdout) + self.assertEqual(b'', result.stdout) expected_rc = exit_codes.AVOCADO_JOB_FAIL self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) @@ -336,18 +336,16 @@ class RunnerOperationTest(unittest.TestCase): cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' '--xunit - doublefail.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) - output = result.stdout expected_rc = exit_codes.AVOCADO_TESTS_FAIL unexpected_rc = exit_codes.AVOCADO_FAIL self.assertNotEqual(result.exit_status, unexpected_rc, "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result)) self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertIn("TestError: Failing during tearDown. Yay!", output, + self.assertIn(b"TestError: Failing during tearDown. Yay!", result.stdout, "Cleanup exception not printed to log output") - self.assertIn("TestFail: This test is supposed to fail", - output, - "Test did not fail with action exception:\n%s" % output) + self.assertIn(b"TestFail: This test is supposed to fail", result.stdout, + "Test did not fail with action exception:\n%s" % result.stdout) def test_uncaught_exception(self): cmd_line = ("%s run --sysinfo=off --job-results-dir %s " @@ -357,7 +355,7 @@ class RunnerOperationTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertIn('"status": "ERROR"', result.stdout) + self.assertIn(b'"status": "ERROR"', result.stdout) def test_fail_on_exception(self): cmd_line = ("%s run --sysinfo=off --job-results-dir %s " @@ -367,7 +365,7 @@ class RunnerOperationTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertIn('"status": "FAIL"', result.stdout) + self.assertIn(b'"status": "FAIL"', result.stdout) def test_exception_not_in_path(self): os.mkdir(os.path.join(self.tmpdir, "shared_lib")) @@ -383,10 +381,10 @@ class RunnerOperationTest(unittest.TestCase): result = process.run("%s --show test run --sysinfo=off " "--job-results-dir %s %s" % (AVOCADO, self.tmpdir, mytest)) - self.assertIn("mytest.py:SharedLibTest.test -> CancelExc: This " - "should not crash on unpickling in runner", + self.assertIn(b"mytest.py:SharedLibTest.test -> CancelExc: This " + b"should not crash on unpickling in runner", result.stdout) - self.assertNotIn("Failed to read queue", result.stdout) + self.assertNotIn(b"Failed to read queue", result.stdout) def test_runner_timeout(self): cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' @@ -399,10 +397,10 @@ class RunnerOperationTest(unittest.TestCase): "Avocado crashed (rc %d):\n%s" % (unexpected_rc, result)) self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertIn("Runner error occurred: Timeout reached", output, + self.assertIn(b"Runner error occurred: Timeout reached", output, "Timeout reached message not found in the output:\n%s" % output) # Ensure no test aborted error messages show up - self.assertNotIn("TestAbortedError: Test aborted unexpectedly", output) + self.assertNotIn(b"TestAbortedError: Test aborted unexpectedly", output) @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2, "Skipping test that take a long time to run, are " @@ -411,7 +409,7 @@ class RunnerOperationTest(unittest.TestCase): cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' '--xunit - abort.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) - excerpt = 'Test died without reporting the status.' + excerpt = b'Test died without reporting the status.' expected_rc = exit_codes.AVOCADO_TESTS_FAIL unexpected_rc = exit_codes.AVOCADO_FAIL self.assertNotEqual(result.exit_status, unexpected_rc, @@ -425,37 +423,37 @@ class RunnerOperationTest(unittest.TestCase): 'passtest.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) - self.assertEqual(result.stdout, '') + self.assertEqual(result.stdout, b'') def test_empty_args_list(self): cmd_line = AVOCADO result = process.run(cmd_line, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL) - self.assertIn('error: too few arguments', result.stderr) + self.assertIn(b'error: too few arguments', result.stderr) def test_empty_test_list(self): cmd_line = '%s run --sysinfo=off --job-results-dir %s' % (AVOCADO, self.tmpdir) result = process.run(cmd_line, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_JOB_FAIL) - self.assertIn('No test references provided nor any other arguments ' - 'resolved into tests', result.stderr) + self.assertIn(b'No test references provided nor any other arguments ' + b'resolved into tests', result.stderr) def test_not_found(self): cmd_line = ('%s run --sysinfo=off --job-results-dir %s sbrubles' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_JOB_FAIL) - self.assertIn('Unable to resolve reference', result.stderr) - self.assertNotIn('Unable to resolve reference', result.stdout) + self.assertIn(b'Unable to resolve reference', result.stderr) + self.assertNotIn(b'Unable to resolve reference', result.stdout) def test_invalid_unique_id(self): cmd_line = ('%s run --sysinfo=off --job-results-dir %s --force-job-id ' 'foobar passtest.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) - self.assertIn('needs to be a 40 digit hex', result.stderr) - self.assertNotIn('needs to be a 40 digit hex', result.stdout) + self.assertIn(b'needs to be a 40 digit hex', result.stderr) + self.assertNotIn(b'needs to be a 40 digit hex', result.stdout) def test_valid_unique_id(self): cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' @@ -463,8 +461,8 @@ class RunnerOperationTest(unittest.TestCase): 'passtest.py' % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) - self.assertNotIn('needs to be a 40 digit hex', result.stderr) - self.assertIn('PASS', result.stdout) + self.assertNotIn(b'needs to be a 40 digit hex', result.stderr) + self.assertIn(b'PASS', result.stdout) def test_automatic_unique_id(self): cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' @@ -530,7 +528,7 @@ class RunnerOperationTest(unittest.TestCase): "Avocado did not return rc %d:\n%s" % (expected_rc, result)) self.assertIn('1-%s:MyTest.test_my_name -> TestError' % test, - result.stdout) + result.stdout_text) @unittest.skipIf(not READ_BINARY, "read binary not available.") @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1, @@ -563,7 +561,7 @@ class RunnerHumanOutputTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertIn('passtest.py:PassTest.test: PASS', result.stdout) + self.assertIn(b'passtest.py:PassTest.test: PASS', result.stdout) def test_output_fail(self): cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' @@ -573,7 +571,7 @@ class RunnerHumanOutputTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertIn('failtest.py:FailTest.test: FAIL', result.stdout) + self.assertIn(b'failtest.py:FailTest.test: FAIL', result.stdout) def test_output_error(self): cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' @@ -583,7 +581,7 @@ class RunnerHumanOutputTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertIn('errortest.py:ErrorTest.test: ERROR', result.stdout) + self.assertIn(b'errortest.py:ErrorTest.test: ERROR', result.stdout) def test_output_cancel(self): cmd_line = ('%s run --sysinfo=off --job-results-dir %s ' @@ -593,7 +591,8 @@ class RunnerHumanOutputTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertIn('PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 1', + self.assertIn(b'PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | ' + b'INTERRUPT 0 | CANCEL 1', result.stdout) @unittest.skipIf(not GNU_ECHO_BINARY, @@ -608,10 +607,10 @@ class RunnerHumanOutputTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %s:\n%s" % (expected_rc, result)) - self.assertIn('[stdout] foo', result.stdout, result) - self.assertIn('[stdout] \'"', result.stdout, result) - self.assertIn('[stdout] bar/baz', result.stdout, result) - self.assertIn('PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz', + self.assertIn(b'[stdout] foo', result.stdout, result) + self.assertIn(b'[stdout] \'"', result.stdout, result) + self.assertIn(b'[stdout] bar/baz', result.stdout, result) + self.assertIn(b'PASS 1-foo\\\\n\\\'\\"\\\\nbar/baz', result.stdout, result) # logdir name should escape special chars (/) test_dirs = glob.glob(os.path.join(self.tmpdir, 'latest', @@ -729,12 +728,12 @@ class RunnerSimpleTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %s:\n%s" % (expected_rc, result)) - self.assertIn('DEBUG| Debug message', result.stdout, result) - self.assertIn('INFO | Info message', result.stdout, result) - self.assertIn('WARN | Warning message (should cause this test to ' - 'finish with warning)', result.stdout, result) - self.assertIn('ERROR| Error message (ordinary message not changing ' - 'the results)', result.stdout, result) + self.assertIn(b'DEBUG| Debug message', result.stdout, result) + self.assertIn(b'INFO | Info message', result.stdout, result) + self.assertIn(b'WARN | Warning message (should cause this test to ' + b'finish with warning)', result.stdout, result) + self.assertIn(b'ERROR| Error message (ordinary message not changing ' + b'the results)', result.stdout, result) @unittest.skipIf(not GNU_ECHO_BINARY, "Uses echo as test") def test_fs_unfriendly_run(self): @@ -917,8 +916,8 @@ class ExternalRunnerTest(unittest.TestCase): '--external-runner=/bin/sh --external-runner-chdir=test %s' % (AVOCADO, self.tmpdir, self.pass_script.path)) result = process.run(cmd_line, ignore_status=True) - expected_output = ('Option "--external-runner-chdir=test" requires ' - '"--external-runner-testdir" to be set') + expected_output = (b'Option "--external-runner-chdir=test" requires ' + b'"--external-runner-testdir" to be set') self.assertIn(expected_output, result.stderr) expected_rc = exit_codes.AVOCADO_JOB_FAIL self.assertEqual(result.exit_status, expected_rc, @@ -929,8 +928,8 @@ class ExternalRunnerTest(unittest.TestCase): cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' '--external-runner=%s' % (AVOCADO, self.tmpdir, TRUE_CMD)) result = process.run(cmd_line, ignore_status=True) - expected_output = ('No test references provided nor any other ' - 'arguments resolved into tests') + expected_output = (b'No test references provided nor any other ' + b'arguments resolved into tests') self.assertIn(expected_output, result.stderr) expected_rc = exit_codes.AVOCADO_JOB_FAIL self.assertEqual(result.exit_status, expected_rc, @@ -972,7 +971,8 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertNotIn('No tests were found on current tests dir', result.stdout) + self.assertNotIn(b'No tests were found on current tests dir', + result.stdout) def test_list_error_output(self): cmd_line = '%s list sbrubles' % AVOCADO @@ -981,7 +981,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertIn("Unable to resolve reference", result.stderr) + self.assertIn(b"Unable to resolve reference", result.stderr) def test_list_no_file_loader(self): cmd_line = ("%s list --loaders external --verbose -- " @@ -990,12 +990,12 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK, "Avocado did not return rc %d:\n%s" % (exit_codes.AVOCADO_ALL_OK, result)) - exp = ("Type Test Tag(s)\n" - "MISSING this-wont-be-matched \n\n" - "TEST TYPES SUMMARY\n" - "==================\n" - "EXTERNAL: 0\n" - "MISSING: 1\n") + exp = (b"Type Test Tag(s)\n" + b"MISSING this-wont-be-matched \n\n" + b"TEST TYPES SUMMARY\n" + b"==================\n" + b"EXTERNAL: 0\n" + b"MISSING: 1\n") self.assertEqual(exp, result.stdout, "Stdout mismatch:\n%s\n\n%s" % (exp, result)) @@ -1011,7 +1011,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK, "Avocado did not return rc %d:\n%s" % (exit_codes.AVOCADO_ALL_OK, result)) - stdout_lines = result.stdout.splitlines() + stdout_lines = result.stdout_text.splitlines() self.assertIn("Tag(s)", stdout_lines[0]) full_test_name = "%s:MyTest.test" % test self.assertEqual("INSTRUMENTED %s BIG_TAG_NAME" % full_test_name, @@ -1029,7 +1029,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): "Avocado did not return rc %d:\n%s" % (expected_rc, result)) if sys.version_info[:2] >= (2, 7, 0): - self.assertNotIn('Disabled', result.stdout) + self.assertNotIn(b'Disabled', result.stdout) def test_config_plugin(self): cmd_line = '%s config --paginator off' % AVOCADO @@ -1038,7 +1038,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertNotIn('Disabled', result.stdout) + self.assertNotIn(b'Disabled', result.stdout) def test_config_plugin_datadir(self): cmd_line = '%s config --datadir --paginator off' % AVOCADO @@ -1047,7 +1047,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertNotIn('Disabled', result.stdout) + self.assertNotIn(b'Disabled', result.stdout) def test_disable_plugin(self): cmd_line = '%s plugins' % AVOCADO @@ -1056,7 +1056,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertIn("Collect system information", result.stdout) + self.assertIn(b"Collect system information", result.stdout) config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]" config = script.TemporaryScript("disable_sysinfo_cmd.conf", @@ -1068,7 +1068,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertNotIn("Collect system information", result.stdout) + self.assertNotIn(b"Collect system information", result.stdout) def test_plugin_order(self): """ @@ -1104,7 +1104,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): "Avocado did not return rc %d:\n%s" % (expected_rc, result)) for result_plugin in result_plugins: - self.assertIn(result_plugin, result.stdout) + self.assertIn(result_plugin, result.stdout_text) config_content_zip_first = "[plugins.result]\norder=['zip_archive']" config_zip_first = script.TemporaryScript("zip_first.conf", @@ -1140,7 +1140,7 @@ class PluginsTest(AbsPluginsTest, unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertNotIn("'Namespace' object has no attribute", result.stderr) + self.assertNotIn(b"'Namespace' object has no attribute", result.stderr) class ParseXMLError(Exception): diff --git a/selftests/functional/test_interrupt.py b/selftests/functional/test_interrupt.py index 5b1b6126b58671f6af921da9356b4051aebd0b29..3256924ee51f636d5900245a63c691a3c040ec8b 100644 --- a/selftests/functional/test_interrupt.py +++ b/selftests/functional/test_interrupt.py @@ -148,10 +148,10 @@ class InterruptTest(unittest.TestCase): output = self.proc.stdout.read() # Make sure the Interrupted requested sentence is there - self.assertIn('Interrupt requested. Waiting 2 seconds for test to ' - 'finish (ignoring new Ctrl+C until then)', output) + self.assertIn(b'Interrupt requested. Waiting 2 seconds for test to ' + b'finish (ignoring new Ctrl+C until then)', output) # Make sure the Killing test subprocess message did appear - self.assertIn('Killing test subprocess', output) + self.assertIn(b'Killing test subprocess', output) @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 2, "Skipping test that take a long time to run, are " @@ -191,7 +191,7 @@ class InterruptTest(unittest.TestCase): timeout=10), 'Avocado left processes behind.') # Make sure the Interrupted test sentence is there - self.assertIn('Terminated\n', self.proc.stdout.read()) + self.assertIn(b'Terminated\n', self.proc.stdout.read()) @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1, "Skipping test that take a long time to run, are " @@ -232,10 +232,10 @@ class InterruptTest(unittest.TestCase): output = self.proc.stdout.read() # Make sure the Interrupted requested sentence is there - self.assertIn('Interrupt requested. Waiting 2 seconds for test to ' - 'finish (ignoring new Ctrl+C until then)', output) + self.assertIn(b'Interrupt requested. Waiting 2 seconds for test to ' + b'finish (ignoring new Ctrl+C until then)', output) # Make sure the Killing test subprocess message is not there - self.assertNotIn('Killing test subprocess', output) + self.assertNotIn(b'Killing test subprocess', output) @unittest.skipIf(int(os.environ.get("AVOCADO_CHECK_LEVEL", 0)) < 1, "Skipping test that take a long time to run, are " @@ -275,7 +275,7 @@ class InterruptTest(unittest.TestCase): timeout=10), 'Avocado left processes behind.') # Make sure the Interrupted test sentence is there - self.assertIn('Terminated\n', self.proc.stdout.read()) + self.assertIn(b'Terminated\n', self.proc.stdout.read()) def tearDown(self): shutil.rmtree(self.tmpdir) diff --git a/selftests/functional/test_loader.py b/selftests/functional/test_loader.py index d0f1c492a903c76f8177861bb9a9e94c7bb2049b..ae6427a7ba52008528df95072d15b99d5c72850d 100644 --- a/selftests/functional/test_loader.py +++ b/selftests/functional/test_loader.py @@ -164,7 +164,7 @@ class LoaderTestFunctional(unittest.TestCase): test_script.save() cmd_line = ('%s list -V %s' % (AVOCADO, test_script.path)) result = process.run(cmd_line) - self.assertIn('%s: %s' % (exp_str, count), result.stdout) + self.assertIn('%s: %s' % (exp_str, count), result.stdout_text) test_script.remove() def _run_with_timeout(self, cmd_line, timeout): @@ -213,7 +213,7 @@ class LoaderTestFunctional(unittest.TestCase): ("Took more than 3 seconds to list tests. Loader " "probably loaded/executed Python code and slept for " "eleven seconds.")) - self.assertIn('INSTRUMENTED: 2', result.stdout) + self.assertIn(b'INSTRUMENTED: 2', result.stdout) def test_multiple_class(self): self._test('multipleclasses.py', AVOCADO_TEST_MULTIPLE_CLASSES, @@ -246,7 +246,7 @@ class LoaderTestFunctional(unittest.TestCase): mytest.save() cmd_line = "%s list -V %s" % (AVOCADO, mytest) result = process.run(cmd_line) - self.assertIn('SIMPLE: 1', result.stdout) + self.assertIn(b'SIMPLE: 1', result.stdout) # job should be able to finish under 5 seconds. If this fails, it's # possible that we hit the "simple test fork bomb" bug cmd_line = ("%s run --sysinfo=off --job-results-dir '%s' -- '%s'" @@ -357,10 +357,10 @@ class LoaderTestFunctional(unittest.TestCase): """ cmd = "%s list examples/tests/:fail" % AVOCADO result = process.run(cmd) - expected = ("INSTRUMENTED examples/tests/doublefail.py:DoubleFail.test\n" - "INSTRUMENTED examples/tests/fail_on_exception.py:FailOnException.test\n" - "INSTRUMENTED examples/tests/failtest.py:FailTest.test\n" - "SIMPLE examples/tests/failtest.sh\n") + expected = (b"INSTRUMENTED examples/tests/doublefail.py:DoubleFail.test\n" + b"INSTRUMENTED examples/tests/fail_on_exception.py:FailOnException.test\n" + b"INSTRUMENTED examples/tests/failtest.py:FailTest.test\n" + b"SIMPLE examples/tests/failtest.sh\n") self.assertEqual(expected, result.stdout) def tearDown(self): diff --git a/selftests/functional/test_output.py b/selftests/functional/test_output.py index 0bb60bbad82824cae0c507395325aa95f90357a6..2d64a3f46d718857d495dbcacc34b7bcf202739f 100644 --- a/selftests/functional/test_output.py +++ b/selftests/functional/test_output.py @@ -157,7 +157,7 @@ class OutputTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - bad_string = 'double free or corruption' + bad_string = b'double free or corruption' self.assertNotIn(bad_string, output, "Libc double free can be seen in avocado " "doublefree output:\n%s" % output) @@ -313,7 +313,7 @@ class OutputPluginTest(unittest.TestCase): '(--xunit)): Options ((--xunit --json)|' '(--json --xunit)) are trying to use stdout ' 'simultaneously\n') - self.assertIsNotNone(error_regex.match(result.stderr), + self.assertIsNotNone(error_regex.match(result.stderr_text), "Missing error message from output:\n%s" % result.stderr) @@ -328,7 +328,7 @@ class OutputPluginTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - error_excerpt = "HTML to stdout not supported" + error_excerpt = b"HTML to stdout not supported" self.assertIn(error_excerpt, output, "Missing excerpt error message from output:\n%s" % output) @@ -427,7 +427,7 @@ class OutputPluginTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertEqual(output, "", "Output is not empty:\n%s" % output) + self.assertEqual(output, b"", "Output is not empty:\n%s" % output) # Check if we are producing valid outputs with open(tmpfile2, 'r') as fp: json_results = json.load(fp) @@ -447,7 +447,7 @@ class OutputPluginTest(unittest.TestCase): "--job-results-dir %s --sysinfo=off" % (AVOCADO, self.tmpdir)) result = process.run(cmd_line, ignore_status=True) - output = result.stdout + result.stderr + output = result.stdout_text + result.stderr_text expected_rc = exit_codes.AVOCADO_TESTS_FAIL self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % @@ -469,7 +469,7 @@ class OutputPluginTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - job_id_list = re.findall('Job ID: (.*)', result.stdout, + job_id_list = re.findall('Job ID: (.*)', result.stdout_text, re.MULTILINE) self.assertTrue(job_id_list, 'No Job ID in stdout:\n%s' % result.stdout) @@ -487,7 +487,7 @@ class OutputPluginTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertEqual(output, "") + self.assertEqual(output, b"") def test_default_enabled_plugins(self): cmd_line = ('%s run --job-results-dir %s --sysinfo=off ' @@ -588,7 +588,7 @@ class OutputPluginTest(unittest.TestCase): self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) - self.assertEqual(output, '', + self.assertEqual(output, b'', 'After redirecting to file, output is not empty: %s' % output) with open(redirected_output_path, 'r') as redirected_output_file_obj: redirected_output = redirected_output_file_obj.read() @@ -617,7 +617,7 @@ class OutputPluginTest(unittest.TestCase): "--job-results-dir %s " "--tap -" % (AVOCADO, self.tmpdir)) result = process.run(cmd_line) - expr = '1..4' + expr = b'1..4' self.assertIn(expr, result.stdout, "'%s' not found in:\n%s" % (expr, result.stdout)) @@ -630,9 +630,9 @@ class OutputPluginTest(unittest.TestCase): ("avocado run to broken pipe did not return " "rc %d:\n%s" % (expected_rc, result))) self.assertEqual(len(result.stderr.splitlines()), 1) - self.assertIn("whacky-unknown-command", result.stderr) - self.assertIn("not found", result.stderr) - self.assertNotIn("Avocado crashed", result.stderr) + self.assertIn(b"whacky-unknown-command", result.stderr) + self.assertIn(b"not found", result.stderr) + self.assertNotIn(b"Avocado crashed", result.stderr) def test_results_plugins_no_tests(self): cmd_line = ("%s run UNEXISTING --job-results-dir %s" @@ -650,11 +650,11 @@ class OutputPluginTest(unittest.TestCase): self.assertFalse(os.path.exists(tap_results)) # Check that no UI output was generated - self.assertNotIn("RESULTS : PASS ", result.stdout) - self.assertNotIn("JOB TIME :", result.stdout) + self.assertNotIn(b"RESULTS : PASS ", result.stdout) + self.assertNotIn(b"JOB TIME :", result.stdout) # Check that plugins do not produce errors - self.assertNotIn("Error running method ", result.stderr) + self.assertNotIn(b"Error running method ", result.stderr) def tearDown(self): shutil.rmtree(self.tmpdir) diff --git a/selftests/functional/test_plugin_diff.py b/selftests/functional/test_plugin_diff.py index 564c06cf66e415c87f602a1d0f3d257a5ab0fb62..25ef7fc997cbcd350a5aa99f52d1c56a50c625f9 100644 --- a/selftests/functional/test_plugin_diff.py +++ b/selftests/functional/test_plugin_diff.py @@ -50,20 +50,16 @@ class DiffTests(unittest.TestCase): (AVOCADO, self.jobdir, self.jobdir2)) expected_rc = exit_codes.AVOCADO_ALL_OK result = self.run_and_check(cmd_line, expected_rc) - msg = "# COMMAND LINE" - self.assertIn(msg, result.stdout) - msg = "-./scripts/avocado run" - self.assertIn(msg, result.stdout) - msg = "+./scripts/avocado run" - self.assertIn(msg, result.stdout) + self.assertIn(b"# COMMAND LINE", result.stdout) + self.assertIn(b"-./scripts/avocado run", result.stdout) + self.assertIn(b"+./scripts/avocado run", result.stdout) def test_diff_nocmdline(self): cmd_line = ('%s diff %s %s --diff-filter nocmdline' % (AVOCADO, self.jobdir, self.jobdir2)) expected_rc = exit_codes.AVOCADO_ALL_OK result = self.run_and_check(cmd_line, expected_rc) - msg = "# COMMAND LINE" - self.assertNotIn(msg, result.stdout) + self.assertNotIn(b"# COMMAND LINE", result.stdout) def tearDown(self): shutil.rmtree(self.tmpdir) diff --git a/selftests/functional/test_plugin_jobscripts.py b/selftests/functional/test_plugin_jobscripts.py index 78407240d798dc4e5ed26bb0532b2571b7ac229f..1fd775f92b0d6b27a73d164d1a3578458c12507b 100644 --- a/selftests/functional/test_plugin_jobscripts.py +++ b/selftests/functional/test_plugin_jobscripts.py @@ -79,9 +79,9 @@ class JobScriptsTest(unittest.TestCase): # Pre/Post scripts failures do not (currently?) alter the exit status self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertNotIn('Pre job script "%s" exited with status "1"' % touch_script, - result.stderr) + result.stderr_text) self.assertNotIn('Post job script "%s" exited with status "1"' % rm_script, - result.stderr) + result.stderr_text) def test_status_non_zero(self): """ @@ -102,7 +102,7 @@ class JobScriptsTest(unittest.TestCase): # Pre/Post scripts failures do not (currently?) alter the exit status self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertEqual('Pre job script "%s" exited with status "1"\n' % non_zero_script, - result.stderr) + result.stderr_text) def test_non_existing_dir(self): """ @@ -124,9 +124,9 @@ class JobScriptsTest(unittest.TestCase): # Pre/Post scripts failures do not (currently?) alter the exit status self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) - self.assertIn('-job scripts has not been found', result.stderr) + self.assertIn(b'-job scripts has not been found', result.stderr) self.assertNotIn('Pre job script "%s" exited with status "1"' % non_zero_script, - result.stderr) + result.stderr_text) def tearDown(self): shutil.rmtree(self.tmpdir) diff --git a/selftests/functional/test_replay_basic.py b/selftests/functional/test_replay_basic.py index a06d3801ce1c2019676927616c438b60852fd852..56f94d315f07d0d510495ce83ae86c526c74f4c8 100644 --- a/selftests/functional/test_replay_basic.py +++ b/selftests/functional/test_replay_basic.py @@ -106,8 +106,8 @@ class ReplayTests(unittest.TestCase): % (AVOCADO, self.jobid, self.tmpdir)) expected_rc = exit_codes.AVOCADO_FAIL result = self.run_and_check(cmd_line, expected_rc) - msg = 'Invalid --replay-ignore option. Valid options are ' \ - '(more than one allowed): variants,config' + msg = (b'Invalid --replay-ignore option. Valid options are ' + b'(more than one allowed): variants,config') self.assertIn(msg, result.stderr) def test_run_replay_ignorevariants(self): @@ -119,7 +119,7 @@ class ReplayTests(unittest.TestCase): % (AVOCADO, self.jobid, self.tmpdir)) expected_rc = exit_codes.AVOCADO_ALL_OK result = self.run_and_check(cmd_line, expected_rc) - msg = 'Ignoring variants from source job with --replay-ignore.' + msg = b'Ignoring variants from source job with --replay-ignore.' self.assertIn(msg, result.stderr) def test_run_replay_invalidstatus(self): @@ -131,8 +131,8 @@ class ReplayTests(unittest.TestCase): % (AVOCADO, self.jobid, self.tmpdir)) expected_rc = exit_codes.AVOCADO_FAIL result = self.run_and_check(cmd_line, expected_rc) - msg = 'Invalid --replay-test-status option. Valid options are (more ' \ - 'than one allowed): SKIP,ERROR,FAIL,WARN,PASS,INTERRUPTED' + msg = (b'Invalid --replay-test-status option. Valid options are (more ' + b'than one allowed): SKIP,ERROR,FAIL,WARN,PASS,INTERRUPTED') self.assertIn(msg, result.stderr) def test_run_replay_statusfail(self): @@ -144,7 +144,8 @@ class ReplayTests(unittest.TestCase): % (AVOCADO, self.jobid, self.tmpdir)) expected_rc = exit_codes.AVOCADO_ALL_OK result = self.run_and_check(cmd_line, expected_rc) - msg = 'RESULTS : PASS 0 | ERROR 0 | FAIL 0 | SKIP 4 | WARN 0 | INTERRUPT 0' + msg = (b'RESULTS : PASS 0 | ERROR 0 | FAIL 0 | SKIP 4 | WARN 0 | ' + b'INTERRUPT 0') self.assertIn(msg, result.stdout) def test_run_replay_remotefail(self): @@ -156,7 +157,7 @@ class ReplayTests(unittest.TestCase): % (AVOCADO, self.jobid, self.tmpdir)) expected_rc = exit_codes.AVOCADO_FAIL result = self.run_and_check(cmd_line, expected_rc) - msg = "Currently we don't replay jobs in remote hosts." + msg = b"Currently we don't replay jobs in remote hosts." self.assertIn(msg, result.stderr) def test_run_replay_status_and_variants(self): @@ -168,8 +169,8 @@ class ReplayTests(unittest.TestCase): '--sysinfo=off' % (AVOCADO, self.jobid, self.tmpdir)) expected_rc = exit_codes.AVOCADO_FAIL result = self.run_and_check(cmd_line, expected_rc) - msg = ("Option `--replay-test-status` is incompatible with " - "`--replay-ignore variants`") + msg = (b"Option `--replay-test-status` is incompatible with " + b"`--replay-ignore variants`") self.assertIn(msg, result.stderr) def test_run_replay_status_and_references(self): @@ -181,8 +182,8 @@ class ReplayTests(unittest.TestCase): '--sysinfo=off' % (AVOCADO, self.jobid, self.tmpdir)) expected_rc = exit_codes.AVOCADO_FAIL result = self.run_and_check(cmd_line, expected_rc) - msg = ("Option --replay-test-status is incompatible with " - "test references given on the command line.") + msg = (b"Option --replay-test-status is incompatible with " + b"test references given on the command line.") self.assertIn(msg, result.stderr) def test_run_replay_and_mux(self): diff --git a/selftests/functional/test_replay_external_runner.py b/selftests/functional/test_replay_external_runner.py index 4ee8053fd926536abef970e5e09a0420c876952c..6b7bb2a90ee5c80891def5bb9a80365948a7eac0 100644 --- a/selftests/functional/test_replay_external_runner.py +++ b/selftests/functional/test_replay_external_runner.py @@ -47,8 +47,8 @@ class ReplayExtRunnerTests(unittest.TestCase): % (AVOCADO, self.jobid, self.tmpdir)) expected_rc = exit_codes.AVOCADO_ALL_OK result = self.run_and_check(cmd_line, expected_rc) - msg = "Overriding the replay external-runner with the "\ - "--external-runner value given on the command line." + msg = (b"Overriding the replay external-runner with the " + b"--external-runner value given on the command line.") self.assertIn(msg, result.stderr) def tearDown(self): diff --git a/selftests/functional/test_replay_failfast.py b/selftests/functional/test_replay_failfast.py index 014107275bd0ed8d4a425169d13bf78206ffd858..e58f1f204dd5906ff471060c32ec59b25aa2dc7d 100644 --- a/selftests/functional/test_replay_failfast.py +++ b/selftests/functional/test_replay_failfast.py @@ -49,7 +49,8 @@ class ReplayFailfastTests(unittest.TestCase): % (AVOCADO, self.jobid, self.tmpdir)) expected_rc = exit_codes.AVOCADO_TESTS_FAIL result = self.run_and_check(cmd_line, expected_rc) - msg = 'Overriding the replay failfast with the --failfast value given on the command line.' + msg = (b'Overriding the replay failfast with the --failfast value ' + b'given on the command line.') self.assertIn(msg, result.stderr) def tearDown(self): diff --git a/selftests/functional/test_standalone.py b/selftests/functional/test_standalone.py index b6734a48b0492a024ef49f26b677903e9d983d13..3458eed47981d73bff238f3d161800d4766ec83d 100644 --- a/selftests/functional/test_standalone.py +++ b/selftests/functional/test_standalone.py @@ -50,7 +50,7 @@ class StandaloneTests(unittest.TestCase): exc = "errortest_nasty.NastyException: Nasty-string-like-exception" else: exc = "NastyException: Nasty-string-like-exception" - count = result.stdout.count("\n%s" % exc) + count = result.stdout_text.count("\n%s" % exc) self.assertEqual(count, 2, "Exception \\n%s should be present twice in" "the log (once from the log, second time when parsing" "exception details." % (exc)) @@ -59,17 +59,17 @@ class StandaloneTests(unittest.TestCase): cmd_line = './examples/tests/errortest_nasty2.py -r' expected_rc = exit_codes.AVOCADO_TESTS_FAIL result = self.run_and_check(cmd_line, expected_rc, 'errortest_nasty2') - self.assertIn("Exception: Unable to get exception, check the traceback" - " for details.", result.stdout) + self.assertIn(b"Exception: Unable to get exception, check the traceback" + b" for details.", result.stdout) def test_errortest_nasty3(self): cmd_line = './examples/tests/errortest_nasty3.py -r' expected_rc = exit_codes.AVOCADO_TESTS_FAIL result = self.run_and_check(cmd_line, expected_rc, 'errortest_nasty3') if sys.version_info[0] == 3: - exc = "TypeError: exceptions must derive from BaseException" + exc = b"TypeError: exceptions must derive from BaseException" else: - exc = "TestError: