From 0a6fe6994afcaff7b3c25ff122ce73cbad4a1fe5 Mon Sep 17 00:00:00 2001 From: Aurelius84 Date: Fri, 8 Apr 2022 16:08:52 +0800 Subject: [PATCH] [Eager]Fix segment_pool/allclose/isclose/scale API bug (#41506) * [Eager]Fix segment_pool/allclose/isclose/scale API bug * fix kernel register problem --- paddle/fluid/operators/cast_op.cu | 22 +++++++++++----------- python/paddle/incubate/tensor/math.py | 2 +- python/paddle/tensor/logic.py | 14 ++++++++++++-- python/paddle/utils/code_gen/backward.yaml | 3 ++- 4 files changed, 26 insertions(+), 15 deletions(-) diff --git a/paddle/fluid/operators/cast_op.cu b/paddle/fluid/operators/cast_op.cu index eb51215790..0afe09ec02 100644 --- a/paddle/fluid/operators/cast_op.cu +++ b/paddle/fluid/operators/cast_op.cu @@ -19,15 +19,15 @@ namespace ops = paddle::operators; namespace plat = paddle::platform; using CUDA = paddle::platform::CUDADeviceContext; -#define REGISTER_CAST_CUDA_BASE(op_name, ...) \ - REGISTER_OP_CUDA_KERNEL( \ - op_name, ops::CastOpKernel, \ - ops::CastOpKernel, ops::CastOpKernel, \ - ops::CastOpKernel, ops::CastOpKernel, \ - ops::CastOpKernel, ops::CastOpKernel, \ - ops::CastOpKernel, \ - ops::CastOpKernel>, \ - ops::CastOpKernel>, ##__VA_ARGS__); - // See [ why register transfer_dtype_op alias with cast_op? ] in cast_op.cc -REGISTER_CAST_CUDA_BASE(transfer_dtype, ops::CastOpKernel) +REGISTER_OP_CUDA_KERNEL(transfer_dtype, ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel>, + ops::CastOpKernel>, + ops::CastOpKernel); diff --git a/python/paddle/incubate/tensor/math.py b/python/paddle/incubate/tensor/math.py index da6eb4e17c..07dc7c1581 100644 --- a/python/paddle/incubate/tensor/math.py +++ b/python/paddle/incubate/tensor/math.py @@ -222,7 +222,7 @@ def segment_max(data, segment_ids, name=None): """ if in_dygraph_mode(): - out = _C_ops.final_state_segment_pool(data, segment_ids, "MAX")[0] + out, tmp = _C_ops.final_state_segment_pool(data, segment_ids, "MAX") return out if _non_static_mode(): diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 27aa333b1a..636b2ef17c 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -127,7 +127,12 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): """ if in_dygraph_mode(): - return _C_ops.final_state_allclose(x, y, rtol, atol, equal_nan) + # NOTE(dev): Pass tol as Tensor to fix precision loss problem, because + # C++ backend will cast it into float32 if passing float from python. + as_tensor = lambda x: paddle.to_tensor([x], dtype='float64', place='cpu') + return _C_ops.final_state_allclose(x, y, + as_tensor(rtol), + as_tensor(atol), equal_nan) if _in_legacy_dygraph(): return _C_ops.allclose(x, y, 'rtol', str(rtol), 'atol', @@ -689,7 +694,12 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): """ if in_dygraph_mode(): - return _C_ops.final_state_isclose(x, y, rtol, atol, equal_nan) + # NOTE(dev): Pass tol as Tensor to fix precision loss problem, because + # C++ backend will cast it into float32 if passing float from python. + as_tensor = lambda x: paddle.to_tensor([x], dtype='float64', place='cpu') + return _C_ops.final_state_isclose(x, y, + as_tensor(rtol), + as_tensor(atol), equal_nan) if _in_legacy_dygraph(): return _C_ops.isclose(x, y, 'rtol', str(rtol), 'atol', diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index 3456fe3260..602fecc83b 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -1217,7 +1217,7 @@ forward : scale (Tensor x, Scalar scale, float bias, bool bias_after_scale) -> Tensor(out) args : (Tensor out_grad, Scalar scale=1.0, float bias=0.0, bool bias_after_scale=true) output : Tensor(x_grad) - invoke : scale(out_grad, scale, bias, bias_after_scale) + invoke : scale(out_grad, scale, 0.0, bias_after_scale) - backward_api : scatter_grad forward : scatter (Tensor x, Tensor index, Tensor updates, bool overwrite) -> Tensor(out) @@ -1250,6 +1250,7 @@ param : [x] kernel : func : segment_pool_grad + data_type : x optional : summed_ids - backward_api : selu_grad -- GitLab