diff --git a/python/paddle/compat.py b/python/paddle/compat.py index c94132e43981f4f86fa0c084da38bbc19cfed8c2..c278041b4612626c1e9965b81a7f597e1f0c5a27 100644 --- a/python/paddle/compat.py +++ b/python/paddle/compat.py @@ -17,9 +17,6 @@ import math __all__ = [] -int_type = int -long_type = int - # str and bytes related functions def to_text(obj, encoding='utf-8', inplace=False): @@ -227,35 +224,3 @@ def round(x, d=0): else: import __builtin__ return __builtin__.round(x, d) - - -def floor_division(x, y): - """ - Compatible division which act the same behaviour in Python3 and Python2, - whose result will be a int value of floor(x / y) in Python3 and value of - (x / y) in Python2. - - Args: - x(int|float) : The number to divide. - y(int|float) : The number to be divided - - Returns: - division result of x // y - """ - return x // y - - -# exception related functions -def get_exception_message(exc): - """ - Get the error message of a specific exception - - Args: - exec(Exception) : The exception to get error message. - - Returns: - the error message of exec - """ - assert exc is not None - - return str(exc) diff --git a/python/paddle/fluid/core.py b/python/paddle/fluid/core.py index 7c7fff32fb268cadd06449327d542d9435d2ede5..1654fd827e5ec76c87ec310a0da34db6a6d201ad 100644 --- a/python/paddle/fluid/core.py +++ b/python/paddle/fluid/core.py @@ -47,18 +47,17 @@ except ImportError as e: from .. import compat as cpt if os.name == 'nt': executable_path = os.path.abspath(os.path.dirname(sys.executable)) - raise ImportError( - """NOTE: You may need to run \"set PATH=%s;%%PATH%%\" + raise ImportError("""NOTE: You may need to run \"set PATH=%s;%%PATH%%\" if you encounters \"DLL load failed\" errors. If you have python installed in other directory, replace \"%s\" with your own directory. The original error is: \n %s""" % - (executable_path, executable_path, cpt.get_exception_message(e))) + (executable_path, executable_path, str(e))) else: raise ImportError( """NOTE: You may need to run \"export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH\" if you encounters \"libmkldnn.so not found\" errors. If you have python installed in other directory, replace \"/usr/local/lib\" with your own - directory. The original error is: \n""" + cpt.get_exception_message(e)) + directory. The original error is: \n""" + str(e)) except Exception as e: raise e @@ -75,8 +74,7 @@ def avx_supported(): has_avx = os.popen('cat /proc/cpuinfo | grep -i avx').read() != '' except Exception as e: sys.stderr.write('Can not get the AVX flag from /proc/cpuinfo.\n' - 'The original error is: %s\n' % - cpt.get_exception_message(e)) + 'The original error is: %s\n' % str(e)) return has_avx elif sysstr == 'darwin': try: @@ -85,7 +83,7 @@ def avx_supported(): except Exception as e: sys.stderr.write( 'Can not get the AVX flag from machdep.cpu.features.\n' - 'The original error is: %s\n' % cpt.get_exception_message(e)) + 'The original error is: %s\n' % str(e)) if not has_avx: import subprocess pipe = subprocess.Popen( @@ -155,8 +153,7 @@ def avx_supported(): ctypes.c_size_t(0), ONE_PAGE) except Exception as e: sys.stderr.write('Failed getting the AVX flag on Windows.\n' - 'The original error is: %s\n' % - cpt.get_exception_message(e)) + 'The original error is: %s\n' % str(e)) return (retval & (1 << avx_bit)) > 0 else: sys.stderr.write('Do not get AVX flag on %s\n' % sysstr) diff --git a/python/paddle/fluid/tests/unittests/dist_transformer.py b/python/paddle/fluid/tests/unittests/dist_transformer.py index b37b70cfeed67fde49bb6f2bdcf28fce03f2030a..32478b8c5fada9bab5d87729327eb4172ad90b3a 100644 --- a/python/paddle/fluid/tests/unittests/dist_transformer.py +++ b/python/paddle/fluid/tests/unittests/dist_transformer.py @@ -27,7 +27,6 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers from test_dist_base import TestDistRunnerBase, runtime_main, RUN_STEP import paddle.compat as cpt -from paddle.compat import long_type const_para_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant(0.001)) const_bias_attr = const_para_attr @@ -173,10 +172,10 @@ seq_len = ModelHyperParams.max_length input_descs = { # The actual data shape of src_word is: # [batch_size * max_src_len_in_batch, 1] - "src_word": [(batch_size, seq_len, long_type(1)), "int64", 2], + "src_word": [(batch_size, seq_len, 1), "int64", 2], # The actual data shape of src_pos is: # [batch_size * max_src_len_in_batch, 1] - "src_pos": [(batch_size, seq_len, long_type(1)), "int64"], + "src_pos": [(batch_size, seq_len, 1), "int64"], # This input is used to remove attention weights on paddings in the # encoder. # The actual data shape of src_slf_attn_bias is: @@ -185,11 +184,11 @@ input_descs = { [(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"], # The actual data shape of trg_word is: # [batch_size * max_trg_len_in_batch, 1] - "trg_word": [(batch_size, seq_len, long_type(1)), "int64", + "trg_word": [(batch_size, seq_len, 1), "int64", 2], # lod_level is only used in fast decoder. # The actual data shape of trg_pos is: # [batch_size * max_trg_len_in_batch, 1] - "trg_pos": [(batch_size, seq_len, long_type(1)), "int64"], + "trg_pos": [(batch_size, seq_len, 1), "int64"], # This input is used to remove attention weights on paddings and # subsequent words in the decoder. # The actual data shape of trg_slf_attn_bias is: @@ -208,15 +207,15 @@ input_descs = { "enc_output": [(batch_size, seq_len, ModelHyperParams.d_model), "float32"], # The actual data shape of label_word is: # [batch_size * max_trg_len_in_batch, 1] - "lbl_word": [(batch_size * seq_len, long_type(1)), "int64"], + "lbl_word": [(batch_size * seq_len, 1), "int64"], # This input is used to mask out the loss of padding tokens. # The actual data shape of label_weight is: # [batch_size * max_trg_len_in_batch, 1] - "lbl_weight": [(batch_size * seq_len, long_type(1)), "float32"], + "lbl_weight": [(batch_size * seq_len, 1), "float32"], # These inputs are used to change the shape tensor in beam-search decoder. - "trg_slf_attn_pre_softmax_shape_delta": [(long_type(2), ), "int32"], - "trg_slf_attn_post_softmax_shape_delta": [(long_type(4), ), "int32"], - "init_score": [(batch_size, long_type(1)), "float32"], + "trg_slf_attn_pre_softmax_shape_delta": [(2, ), "int32"], + "trg_slf_attn_post_softmax_shape_delta": [(4, ), "int32"], + "init_score": [(batch_size, 1), "float32"], } # Names of word embedding table which might be reused for weight sharing. diff --git a/python/paddle/fluid/tests/unittests/test_compat.py b/python/paddle/fluid/tests/unittests/test_compat.py index 7ff25fcfddd38cd1e65c5086e4df52c512167b9b..e7ecdeda8b6c8d3ac83a95e825f4d22b658315ed 100644 --- a/python/paddle/fluid/tests/unittests/test_compat.py +++ b/python/paddle/fluid/tests/unittests/test_compat.py @@ -18,10 +18,6 @@ import paddle.compat as cpt class TestCompatible(unittest.TestCase): - def test_type(self): - self.assertEqual(cpt.int_type, int) - self.assertEqual(cpt.long_type, int) - def test_to_text(self): self.assertIsNone(cpt.to_text(None)) @@ -252,30 +248,6 @@ class TestCompatible(unittest.TestCase): self.assertEqual(5.0, cpt.round(5)) self.assertRaises(TypeError, cpt.round, None) - def test_floor_division(self): - self.assertEqual(0.0, cpt.floor_division(3, 4)) - self.assertEqual(1.0, cpt.floor_division(4, 3)) - self.assertEqual(2.0, cpt.floor_division(6, 3)) - self.assertEqual(-2.0, cpt.floor_division(-4, 3)) - self.assertEqual(-2.0, cpt.floor_division(-6, 3)) - self.assertRaises(ZeroDivisionError, cpt.floor_division, 3, 0) - self.assertRaises(TypeError, cpt.floor_division, None, None) - - def test_get_exception_message(self): - exception_message = "test_message" - self.assertRaises(AssertionError, cpt.get_exception_message, None) - try: - raise RuntimeError(exception_message) - except Exception as e: - self.assertEqual(exception_message, cpt.get_exception_message(e)) - self.assertIsNotNone(e) - - try: - raise Exception(exception_message) - except Exception as e: - self.assertEqual(exception_message, cpt.get_exception_message(e)) - self.assertIsNotNone(e) - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dataset.py b/python/paddle/fluid/tests/unittests/test_dataset.py index 83a16e43f391297126d6ace3b18e825c58e82d9d..13b6b9838ccdcbd83571fab1b71f819130a8194a 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset.py +++ b/python/paddle/fluid/tests/unittests/test_dataset.py @@ -904,7 +904,7 @@ class TestDatasetWithFetchHandler(unittest.TestCase): print("warning: we skip trainer_desc_pb2 import problem in windows") except RuntimeError as e: error_msg = "dataset is need and should be initialized" - self.assertEqual(error_msg, cpt.get_exception_message(e)) + self.assertEqual(error_msg, str(e)) except Exception as e: self.assertTrue(False) @@ -948,7 +948,7 @@ class TestDatasetWithFetchHandler(unittest.TestCase): print("warning: we skip trainer_desc_pb2 import problem in windows") except RuntimeError as e: error_msg = "dataset is need and should be initialized" - self.assertEqual(error_msg, cpt.get_exception_message(e)) + self.assertEqual(error_msg, str(e)) except Exception as e: self.assertTrue(False) diff --git a/python/paddle/fluid/tests/unittests/test_exception.py b/python/paddle/fluid/tests/unittests/test_exception.py index 266c83106f2eae05e30858505c246549a2153442..a3d9e744481b487d2f68625a074731b04b1fb908 100644 --- a/python/paddle/fluid/tests/unittests/test_exception.py +++ b/python/paddle/fluid/tests/unittests/test_exception.py @@ -28,8 +28,7 @@ class TestException(unittest.TestCase): try: core.__unittest_throw_exception__() except RuntimeError as ex: - self.assertIn("This is a test of exception", - cpt.get_exception_message(ex)) + self.assertIn("This is a test of exception", str(ex)) exception = ex self.assertIsNotNone(exception) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exception.py b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exception.py index 4cc6d813bf871ce18c8424d1635cba97f2cd5e99..a9fed3cae36c6dc3f9e04308158235878d8028f1 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exception.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exception.py @@ -67,8 +67,7 @@ class TestDygraphDataLoaderWithException(unittest.TestCase): for _ in loader(): print("test_single_process_with_thread_expection") except core.EnforceNotMet as ex: - self.assertIn("Blocking queue is killed", - cpt.get_exception_message(ex)) + self.assertIn("Blocking queue is killed", str(ex)) exception = ex self.assertIsNotNone(exception) @@ -130,8 +129,7 @@ class TestDygraphDataLoaderWithException(unittest.TestCase): for image, _ in loader(): fluid.layers.relu(image) except core.EnforceNotMet as ex: - self.assertIn("Blocking queue is killed", - cpt.get_exception_message(ex)) + self.assertIn("Blocking queue is killed", str(ex)) exception = ex self.assertIsNotNone(exception) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exit_func.py b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exit_func.py index 8c201d85a9cdd82d63e6df021f24118ff0ce1bea..b1c2f7715b60bcbe3bc9724dc0de6b7f2456da1e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exit_func.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exit_func.py @@ -57,7 +57,7 @@ class TestRegisterExitFunc(unittest.TestCase): try: CleanupFuncRegistrar.register(5) except TypeError as ex: - self.assertIn("is not callable", cpt.get_exception_message(ex)) + self.assertIn("is not callable", str(ex)) exception = ex self.assertIsNotNone(exception) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py index ae1fe6d3706663cdaa9912c3a91a37ee738b6992..5fb80c80d9d12eb2ac671aea65b50f27af715ca3 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py @@ -109,7 +109,7 @@ class TestEagerGrad(TestCase): # allow_unused is false in default dx = fluid.dygraph.grad(out, [x, z]) except ValueError as e: - error_msg = cpt.get_exception_message(e) + error_msg = str(e) assert error_msg.find("allow_unused") > 0 def test_simple_example_eager_grad_not_allow_unused(self): @@ -133,7 +133,7 @@ class TestEagerGrad(TestCase): # duplicate input will arise RuntimeError errors dx = fluid.dygraph.grad(out, [x, x]) except RuntimeError as e: - error_msg = cpt.get_exception_message(e) + error_msg = str(e) assert error_msg.find("duplicate") > 0 def test_simple_example_eager_grad_duplicate_input(self): @@ -157,7 +157,7 @@ class TestEagerGrad(TestCase): # duplicate output will arise RuntimeError errors dx = fluid.dygraph.grad([out, out], [x]) except RuntimeError as e: - error_msg = cpt.get_exception_message(e) + error_msg = str(e) assert error_msg.find("duplicate") > 0 def test_simple_example_eager_grad_duplicate_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py b/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py index 3f2a897b6b3e03fa6c7fe8e3f49b4623bb546467..72be7aafaf0d3229a2f21e5f5df2a9da8b4c6bc9 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py @@ -55,7 +55,7 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase): set_child_signal_handler(id(self), test_process.pid) time.sleep(5) except SystemError as ex: - self.assertIn("Fatal", cpt.get_exception_message(ex)) + self.assertIn("Fatal", str(ex)) exception = ex return exception @@ -88,8 +88,7 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase): set_child_signal_handler(id(self), test_process.pid) time.sleep(5) except SystemError as ex: - self.assertIn("Segmentation fault", - cpt.get_exception_message(ex)) + self.assertIn("Segmentation fault", str(ex)) exception = ex return exception @@ -122,7 +121,7 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase): set_child_signal_handler(id(self), test_process.pid) time.sleep(5) except SystemError as ex: - self.assertIn("Bus error", cpt.get_exception_message(ex)) + self.assertIn("Bus error", str(ex)) exception = ex return exception diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index 1470ae4df56c1399ea180436a69e4d9c06731742..ce79317866bc8d052d5bbfde98cbf5f2757b6c7c 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -82,7 +82,7 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): flatten_idx = ids.flatten() padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) - self.attrs = {'padding_idx': cpt.long_type(padding_idx)} + self.attrs = {'padding_idx': padding_idx} self.check_output() @@ -250,7 +250,7 @@ class TestLookupTableOpWithTensorIdsAndPaddingInt8( flatten_idx = ids.flatten() padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) - self.attrs = {'padding_idx': cpt.long_type(padding_idx)} + self.attrs = {'padding_idx': padding_idx} self.check_output() def test_check_grad(self): @@ -380,7 +380,7 @@ class TestLookupTableOpWithTensorIdsAndPaddingInt16( flatten_idx = ids.flatten() padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) - self.attrs = {'padding_idx': cpt.long_type(padding_idx)} + self.attrs = {'padding_idx': padding_idx} self.check_output() diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py index 5552d2cd68f5d95bffebe5022bc9dad2d7439176..3b0a093426053165b1be4cdb5402e78637767161 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py @@ -123,7 +123,7 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): flatten_idx = ids.flatten() padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) - self.attrs = {'padding_idx': cpt.long_type(padding_idx)} + self.attrs = {'padding_idx': padding_idx} self.check_output() diff --git a/python/paddle/fluid/tests/unittests/test_operator_desc.py b/python/paddle/fluid/tests/unittests/test_operator_desc.py index 7cf5b36f764c021fb6904da6bc118d0edb8c5d37..2b2c7287014a90e3507bfbd2f151eb323a6ab2d6 100644 --- a/python/paddle/fluid/tests/unittests/test_operator_desc.py +++ b/python/paddle/fluid/tests/unittests/test_operator_desc.py @@ -31,15 +31,14 @@ class TestOperator(unittest.TestCase): self.assertFail() except ValueError as v_err: self.assertEqual( - cpt.get_exception_message(v_err), + str(v_err), "`type` to initialized an Operator can not be None.") try: block.append_op(type="no_such_op") self.assertFail() except ValueError as a_err: self.assertEqual( - cpt.get_exception_message(a_err), - "Operator \"no_such_op\" has not been registered.") + str(a_err), "Operator \"no_such_op\" has not been registered.") def test_op_desc_creation(self): program = Program() diff --git a/python/paddle/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py index 8e127f3c60c0a4278edb43230f62554d3bb2b285..52ce642c0e7de3f8b5f0a1101f44ea0d0b16a5fd 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer.py @@ -990,7 +990,7 @@ class TestRecomputeOptimizer(unittest.TestCase): except NotImplementedError as e: self.assertEqual( "load function is not supported by Recompute Optimizer for now", - cpt.get_exception_message(e)) + str(e)) def test_dropout(self): """ diff --git a/python/paddle/fluid/tests/unittests/test_prune.py b/python/paddle/fluid/tests/unittests/test_prune.py index 4899d5c4d48839bfabef0c1cb2190eaa9a212b84..dc44f99d670b6f0c8ecd0cc0b277018bc936ca99 100644 --- a/python/paddle/fluid/tests/unittests/test_prune.py +++ b/python/paddle/fluid/tests/unittests/test_prune.py @@ -100,7 +100,7 @@ class TestPrune(unittest.TestCase): except ValueError as e: self.assertIn( "All targets of Program._prune_with_input() can only be Variable or Operator", - cpt.get_exception_message(e)) + str(e)) def mock(self, program, feed, fetch, optimize_ops): diff --git a/python/paddle/fluid/tests/unittests/test_zeros_op.py b/python/paddle/fluid/tests/unittests/test_zeros_op.py index 84d83d6a7b560b6ecca9ca299227607eb322f789..3072a04f257d4b34726c8bf6f7ff6236a05e7fa0 100644 --- a/python/paddle/fluid/tests/unittests/test_zeros_op.py +++ b/python/paddle/fluid/tests/unittests/test_zeros_op.py @@ -103,7 +103,7 @@ class ApiZerosError(unittest.TestCase): shape = [-1, 5] out = paddle.zeros(shape) except Exception as e: - error_msg = cpt.get_exception_message(e) + error_msg = str(e) assert error_msg.find("expected to be no less than 0") > 0 def test_eager(self): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py index 77f09fc9864a6262aa355418a858d48cc43466db..6bb54c8f74edf63401abb236856e9a5f43783e53 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py @@ -95,7 +95,7 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): flatten_idx = ids.flatten() padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) - self.attrs = {'padding_idx': cpt.long_type(padding_idx)} + self.attrs = {'padding_idx': padding_idx} self.check_output_with_place(place=paddle.XPUPlace(0))