From 5a57585905f5588d53b1e1cd2724b19b54810299 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Tue, 11 Oct 2022 16:49:24 +0800 Subject: [PATCH] [CodeStyle] remove compat module (long_type, int_type, get_exception_message, floor_division) (#46686) --- python/paddle/compat.py | 35 ------------------- python/paddle/fluid/core.py | 15 ++++---- .../fluid/tests/unittests/dist_transformer.py | 19 +++++----- .../fluid/tests/unittests/test_compat.py | 28 --------------- .../fluid/tests/unittests/test_dataset.py | 4 +-- .../fluid/tests/unittests/test_exception.py | 3 +- .../test_imperative_data_loader_exception.py | 6 ++-- .../test_imperative_data_loader_exit_func.py | 2 +- .../unittests/test_imperative_double_grad.py | 6 ++-- .../test_imperative_signal_handler.py | 7 ++-- .../tests/unittests/test_lookup_table_op.py | 6 ++-- .../unittests/test_lookup_table_v2_op.py | 2 +- .../tests/unittests/test_operator_desc.py | 5 ++- .../fluid/tests/unittests/test_optimizer.py | 2 +- .../fluid/tests/unittests/test_prune.py | 2 +- .../fluid/tests/unittests/test_zeros_op.py | 2 +- .../xpu/test_lookup_table_v2_op_xpu.py | 2 +- 17 files changed, 37 insertions(+), 109 deletions(-) diff --git a/python/paddle/compat.py b/python/paddle/compat.py index c94132e4398..c278041b461 100644 --- a/python/paddle/compat.py +++ b/python/paddle/compat.py @@ -17,9 +17,6 @@ import math __all__ = [] -int_type = int -long_type = int - # str and bytes related functions def to_text(obj, encoding='utf-8', inplace=False): @@ -227,35 +224,3 @@ def round(x, d=0): else: import __builtin__ return __builtin__.round(x, d) - - -def floor_division(x, y): - """ - Compatible division which act the same behaviour in Python3 and Python2, - whose result will be a int value of floor(x / y) in Python3 and value of - (x / y) in Python2. - - Args: - x(int|float) : The number to divide. - y(int|float) : The number to be divided - - Returns: - division result of x // y - """ - return x // y - - -# exception related functions -def get_exception_message(exc): - """ - Get the error message of a specific exception - - Args: - exec(Exception) : The exception to get error message. - - Returns: - the error message of exec - """ - assert exc is not None - - return str(exc) diff --git a/python/paddle/fluid/core.py b/python/paddle/fluid/core.py index 7c7fff32fb2..1654fd827e5 100644 --- a/python/paddle/fluid/core.py +++ b/python/paddle/fluid/core.py @@ -47,18 +47,17 @@ except ImportError as e: from .. import compat as cpt if os.name == 'nt': executable_path = os.path.abspath(os.path.dirname(sys.executable)) - raise ImportError( - """NOTE: You may need to run \"set PATH=%s;%%PATH%%\" + raise ImportError("""NOTE: You may need to run \"set PATH=%s;%%PATH%%\" if you encounters \"DLL load failed\" errors. If you have python installed in other directory, replace \"%s\" with your own directory. The original error is: \n %s""" % - (executable_path, executable_path, cpt.get_exception_message(e))) + (executable_path, executable_path, str(e))) else: raise ImportError( """NOTE: You may need to run \"export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH\" if you encounters \"libmkldnn.so not found\" errors. If you have python installed in other directory, replace \"/usr/local/lib\" with your own - directory. The original error is: \n""" + cpt.get_exception_message(e)) + directory. The original error is: \n""" + str(e)) except Exception as e: raise e @@ -75,8 +74,7 @@ def avx_supported(): has_avx = os.popen('cat /proc/cpuinfo | grep -i avx').read() != '' except Exception as e: sys.stderr.write('Can not get the AVX flag from /proc/cpuinfo.\n' - 'The original error is: %s\n' % - cpt.get_exception_message(e)) + 'The original error is: %s\n' % str(e)) return has_avx elif sysstr == 'darwin': try: @@ -85,7 +83,7 @@ def avx_supported(): except Exception as e: sys.stderr.write( 'Can not get the AVX flag from machdep.cpu.features.\n' - 'The original error is: %s\n' % cpt.get_exception_message(e)) + 'The original error is: %s\n' % str(e)) if not has_avx: import subprocess pipe = subprocess.Popen( @@ -155,8 +153,7 @@ def avx_supported(): ctypes.c_size_t(0), ONE_PAGE) except Exception as e: sys.stderr.write('Failed getting the AVX flag on Windows.\n' - 'The original error is: %s\n' % - cpt.get_exception_message(e)) + 'The original error is: %s\n' % str(e)) return (retval & (1 << avx_bit)) > 0 else: sys.stderr.write('Do not get AVX flag on %s\n' % sysstr) diff --git a/python/paddle/fluid/tests/unittests/dist_transformer.py b/python/paddle/fluid/tests/unittests/dist_transformer.py index b37b70cfeed..32478b8c5fa 100644 --- a/python/paddle/fluid/tests/unittests/dist_transformer.py +++ b/python/paddle/fluid/tests/unittests/dist_transformer.py @@ -27,7 +27,6 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers from test_dist_base import TestDistRunnerBase, runtime_main, RUN_STEP import paddle.compat as cpt -from paddle.compat import long_type const_para_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant(0.001)) const_bias_attr = const_para_attr @@ -173,10 +172,10 @@ seq_len = ModelHyperParams.max_length input_descs = { # The actual data shape of src_word is: # [batch_size * max_src_len_in_batch, 1] - "src_word": [(batch_size, seq_len, long_type(1)), "int64", 2], + "src_word": [(batch_size, seq_len, 1), "int64", 2], # The actual data shape of src_pos is: # [batch_size * max_src_len_in_batch, 1] - "src_pos": [(batch_size, seq_len, long_type(1)), "int64"], + "src_pos": [(batch_size, seq_len, 1), "int64"], # This input is used to remove attention weights on paddings in the # encoder. # The actual data shape of src_slf_attn_bias is: @@ -185,11 +184,11 @@ input_descs = { [(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"], # The actual data shape of trg_word is: # [batch_size * max_trg_len_in_batch, 1] - "trg_word": [(batch_size, seq_len, long_type(1)), "int64", + "trg_word": [(batch_size, seq_len, 1), "int64", 2], # lod_level is only used in fast decoder. # The actual data shape of trg_pos is: # [batch_size * max_trg_len_in_batch, 1] - "trg_pos": [(batch_size, seq_len, long_type(1)), "int64"], + "trg_pos": [(batch_size, seq_len, 1), "int64"], # This input is used to remove attention weights on paddings and # subsequent words in the decoder. # The actual data shape of trg_slf_attn_bias is: @@ -208,15 +207,15 @@ input_descs = { "enc_output": [(batch_size, seq_len, ModelHyperParams.d_model), "float32"], # The actual data shape of label_word is: # [batch_size * max_trg_len_in_batch, 1] - "lbl_word": [(batch_size * seq_len, long_type(1)), "int64"], + "lbl_word": [(batch_size * seq_len, 1), "int64"], # This input is used to mask out the loss of padding tokens. # The actual data shape of label_weight is: # [batch_size * max_trg_len_in_batch, 1] - "lbl_weight": [(batch_size * seq_len, long_type(1)), "float32"], + "lbl_weight": [(batch_size * seq_len, 1), "float32"], # These inputs are used to change the shape tensor in beam-search decoder. - "trg_slf_attn_pre_softmax_shape_delta": [(long_type(2), ), "int32"], - "trg_slf_attn_post_softmax_shape_delta": [(long_type(4), ), "int32"], - "init_score": [(batch_size, long_type(1)), "float32"], + "trg_slf_attn_pre_softmax_shape_delta": [(2, ), "int32"], + "trg_slf_attn_post_softmax_shape_delta": [(4, ), "int32"], + "init_score": [(batch_size, 1), "float32"], } # Names of word embedding table which might be reused for weight sharing. diff --git a/python/paddle/fluid/tests/unittests/test_compat.py b/python/paddle/fluid/tests/unittests/test_compat.py index 7ff25fcfddd..e7ecdeda8b6 100644 --- a/python/paddle/fluid/tests/unittests/test_compat.py +++ b/python/paddle/fluid/tests/unittests/test_compat.py @@ -18,10 +18,6 @@ import paddle.compat as cpt class TestCompatible(unittest.TestCase): - def test_type(self): - self.assertEqual(cpt.int_type, int) - self.assertEqual(cpt.long_type, int) - def test_to_text(self): self.assertIsNone(cpt.to_text(None)) @@ -252,30 +248,6 @@ class TestCompatible(unittest.TestCase): self.assertEqual(5.0, cpt.round(5)) self.assertRaises(TypeError, cpt.round, None) - def test_floor_division(self): - self.assertEqual(0.0, cpt.floor_division(3, 4)) - self.assertEqual(1.0, cpt.floor_division(4, 3)) - self.assertEqual(2.0, cpt.floor_division(6, 3)) - self.assertEqual(-2.0, cpt.floor_division(-4, 3)) - self.assertEqual(-2.0, cpt.floor_division(-6, 3)) - self.assertRaises(ZeroDivisionError, cpt.floor_division, 3, 0) - self.assertRaises(TypeError, cpt.floor_division, None, None) - - def test_get_exception_message(self): - exception_message = "test_message" - self.assertRaises(AssertionError, cpt.get_exception_message, None) - try: - raise RuntimeError(exception_message) - except Exception as e: - self.assertEqual(exception_message, cpt.get_exception_message(e)) - self.assertIsNotNone(e) - - try: - raise Exception(exception_message) - except Exception as e: - self.assertEqual(exception_message, cpt.get_exception_message(e)) - self.assertIsNotNone(e) - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dataset.py b/python/paddle/fluid/tests/unittests/test_dataset.py index 83a16e43f39..13b6b9838cc 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset.py +++ b/python/paddle/fluid/tests/unittests/test_dataset.py @@ -904,7 +904,7 @@ class TestDatasetWithFetchHandler(unittest.TestCase): print("warning: we skip trainer_desc_pb2 import problem in windows") except RuntimeError as e: error_msg = "dataset is need and should be initialized" - self.assertEqual(error_msg, cpt.get_exception_message(e)) + self.assertEqual(error_msg, str(e)) except Exception as e: self.assertTrue(False) @@ -948,7 +948,7 @@ class TestDatasetWithFetchHandler(unittest.TestCase): print("warning: we skip trainer_desc_pb2 import problem in windows") except RuntimeError as e: error_msg = "dataset is need and should be initialized" - self.assertEqual(error_msg, cpt.get_exception_message(e)) + self.assertEqual(error_msg, str(e)) except Exception as e: self.assertTrue(False) diff --git a/python/paddle/fluid/tests/unittests/test_exception.py b/python/paddle/fluid/tests/unittests/test_exception.py index 266c83106f2..a3d9e744481 100644 --- a/python/paddle/fluid/tests/unittests/test_exception.py +++ b/python/paddle/fluid/tests/unittests/test_exception.py @@ -28,8 +28,7 @@ class TestException(unittest.TestCase): try: core.__unittest_throw_exception__() except RuntimeError as ex: - self.assertIn("This is a test of exception", - cpt.get_exception_message(ex)) + self.assertIn("This is a test of exception", str(ex)) exception = ex self.assertIsNotNone(exception) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exception.py b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exception.py index 4cc6d813bf8..a9fed3cae36 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exception.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exception.py @@ -67,8 +67,7 @@ class TestDygraphDataLoaderWithException(unittest.TestCase): for _ in loader(): print("test_single_process_with_thread_expection") except core.EnforceNotMet as ex: - self.assertIn("Blocking queue is killed", - cpt.get_exception_message(ex)) + self.assertIn("Blocking queue is killed", str(ex)) exception = ex self.assertIsNotNone(exception) @@ -130,8 +129,7 @@ class TestDygraphDataLoaderWithException(unittest.TestCase): for image, _ in loader(): fluid.layers.relu(image) except core.EnforceNotMet as ex: - self.assertIn("Blocking queue is killed", - cpt.get_exception_message(ex)) + self.assertIn("Blocking queue is killed", str(ex)) exception = ex self.assertIsNotNone(exception) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exit_func.py b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exit_func.py index 8c201d85a9c..b1c2f7715b6 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exit_func.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exit_func.py @@ -57,7 +57,7 @@ class TestRegisterExitFunc(unittest.TestCase): try: CleanupFuncRegistrar.register(5) except TypeError as ex: - self.assertIn("is not callable", cpt.get_exception_message(ex)) + self.assertIn("is not callable", str(ex)) exception = ex self.assertIsNotNone(exception) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py index ae1fe6d3706..5fb80c80d9d 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py @@ -109,7 +109,7 @@ class TestEagerGrad(TestCase): # allow_unused is false in default dx = fluid.dygraph.grad(out, [x, z]) except ValueError as e: - error_msg = cpt.get_exception_message(e) + error_msg = str(e) assert error_msg.find("allow_unused") > 0 def test_simple_example_eager_grad_not_allow_unused(self): @@ -133,7 +133,7 @@ class TestEagerGrad(TestCase): # duplicate input will arise RuntimeError errors dx = fluid.dygraph.grad(out, [x, x]) except RuntimeError as e: - error_msg = cpt.get_exception_message(e) + error_msg = str(e) assert error_msg.find("duplicate") > 0 def test_simple_example_eager_grad_duplicate_input(self): @@ -157,7 +157,7 @@ class TestEagerGrad(TestCase): # duplicate output will arise RuntimeError errors dx = fluid.dygraph.grad([out, out], [x]) except RuntimeError as e: - error_msg = cpt.get_exception_message(e) + error_msg = str(e) assert error_msg.find("duplicate") > 0 def test_simple_example_eager_grad_duplicate_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py b/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py index 3f2a897b6b3..72be7aafaf0 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py @@ -55,7 +55,7 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase): set_child_signal_handler(id(self), test_process.pid) time.sleep(5) except SystemError as ex: - self.assertIn("Fatal", cpt.get_exception_message(ex)) + self.assertIn("Fatal", str(ex)) exception = ex return exception @@ -88,8 +88,7 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase): set_child_signal_handler(id(self), test_process.pid) time.sleep(5) except SystemError as ex: - self.assertIn("Segmentation fault", - cpt.get_exception_message(ex)) + self.assertIn("Segmentation fault", str(ex)) exception = ex return exception @@ -122,7 +121,7 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase): set_child_signal_handler(id(self), test_process.pid) time.sleep(5) except SystemError as ex: - self.assertIn("Bus error", cpt.get_exception_message(ex)) + self.assertIn("Bus error", str(ex)) exception = ex return exception diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index 1470ae4df56..ce79317866b 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -82,7 +82,7 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): flatten_idx = ids.flatten() padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) - self.attrs = {'padding_idx': cpt.long_type(padding_idx)} + self.attrs = {'padding_idx': padding_idx} self.check_output() @@ -250,7 +250,7 @@ class TestLookupTableOpWithTensorIdsAndPaddingInt8( flatten_idx = ids.flatten() padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) - self.attrs = {'padding_idx': cpt.long_type(padding_idx)} + self.attrs = {'padding_idx': padding_idx} self.check_output() def test_check_grad(self): @@ -380,7 +380,7 @@ class TestLookupTableOpWithTensorIdsAndPaddingInt16( flatten_idx = ids.flatten() padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) - self.attrs = {'padding_idx': cpt.long_type(padding_idx)} + self.attrs = {'padding_idx': padding_idx} self.check_output() diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py index 5552d2cd68f..3b0a0934260 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py @@ -123,7 +123,7 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): flatten_idx = ids.flatten() padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) - self.attrs = {'padding_idx': cpt.long_type(padding_idx)} + self.attrs = {'padding_idx': padding_idx} self.check_output() diff --git a/python/paddle/fluid/tests/unittests/test_operator_desc.py b/python/paddle/fluid/tests/unittests/test_operator_desc.py index 7cf5b36f764..2b2c7287014 100644 --- a/python/paddle/fluid/tests/unittests/test_operator_desc.py +++ b/python/paddle/fluid/tests/unittests/test_operator_desc.py @@ -31,15 +31,14 @@ class TestOperator(unittest.TestCase): self.assertFail() except ValueError as v_err: self.assertEqual( - cpt.get_exception_message(v_err), + str(v_err), "`type` to initialized an Operator can not be None.") try: block.append_op(type="no_such_op") self.assertFail() except ValueError as a_err: self.assertEqual( - cpt.get_exception_message(a_err), - "Operator \"no_such_op\" has not been registered.") + str(a_err), "Operator \"no_such_op\" has not been registered.") def test_op_desc_creation(self): program = Program() diff --git a/python/paddle/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py index 8e127f3c60c..52ce642c0e7 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer.py @@ -990,7 +990,7 @@ class TestRecomputeOptimizer(unittest.TestCase): except NotImplementedError as e: self.assertEqual( "load function is not supported by Recompute Optimizer for now", - cpt.get_exception_message(e)) + str(e)) def test_dropout(self): """ diff --git a/python/paddle/fluid/tests/unittests/test_prune.py b/python/paddle/fluid/tests/unittests/test_prune.py index 4899d5c4d48..dc44f99d670 100644 --- a/python/paddle/fluid/tests/unittests/test_prune.py +++ b/python/paddle/fluid/tests/unittests/test_prune.py @@ -100,7 +100,7 @@ class TestPrune(unittest.TestCase): except ValueError as e: self.assertIn( "All targets of Program._prune_with_input() can only be Variable or Operator", - cpt.get_exception_message(e)) + str(e)) def mock(self, program, feed, fetch, optimize_ops): diff --git a/python/paddle/fluid/tests/unittests/test_zeros_op.py b/python/paddle/fluid/tests/unittests/test_zeros_op.py index 84d83d6a7b5..3072a04f257 100644 --- a/python/paddle/fluid/tests/unittests/test_zeros_op.py +++ b/python/paddle/fluid/tests/unittests/test_zeros_op.py @@ -103,7 +103,7 @@ class ApiZerosError(unittest.TestCase): shape = [-1, 5] out = paddle.zeros(shape) except Exception as e: - error_msg = cpt.get_exception_message(e) + error_msg = str(e) assert error_msg.find("expected to be no less than 0") > 0 def test_eager(self): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py index 77f09fc9864..6bb54c8f74e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py @@ -95,7 +95,7 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): flatten_idx = ids.flatten() padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) - self.attrs = {'padding_idx': cpt.long_type(padding_idx)} + self.attrs = {'padding_idx': padding_idx} self.check_output_with_place(place=paddle.XPUPlace(0)) -- GitLab