diff --git a/paddle/fluid/operators/cast_op.cu b/paddle/fluid/operators/cast_op.cu index eb51215790bbcdbc9e7d0c3adad482d9a69324b9..0afe09ec028e348f87270afe577b56b84c130c13 100644 --- a/paddle/fluid/operators/cast_op.cu +++ b/paddle/fluid/operators/cast_op.cu @@ -19,15 +19,15 @@ namespace ops = paddle::operators; namespace plat = paddle::platform; using CUDA = paddle::platform::CUDADeviceContext; -#define REGISTER_CAST_CUDA_BASE(op_name, ...) \ - REGISTER_OP_CUDA_KERNEL( \ - op_name, ops::CastOpKernel, \ - ops::CastOpKernel, ops::CastOpKernel, \ - ops::CastOpKernel, ops::CastOpKernel, \ - ops::CastOpKernel, ops::CastOpKernel, \ - ops::CastOpKernel, \ - ops::CastOpKernel>, \ - ops::CastOpKernel>, ##__VA_ARGS__); - // See [ why register transfer_dtype_op alias with cast_op? ] in cast_op.cc -REGISTER_CAST_CUDA_BASE(transfer_dtype, ops::CastOpKernel) +REGISTER_OP_CUDA_KERNEL(transfer_dtype, ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel, + ops::CastOpKernel>, + ops::CastOpKernel>, + ops::CastOpKernel); diff --git a/python/paddle/incubate/tensor/math.py b/python/paddle/incubate/tensor/math.py index da6eb4e17c7fb207e3f00aff2e1b39bfdaebe019..07dc7c1581fc49e1f9f33e31292c9b8125834489 100644 --- a/python/paddle/incubate/tensor/math.py +++ b/python/paddle/incubate/tensor/math.py @@ -222,7 +222,7 @@ def segment_max(data, segment_ids, name=None): """ if in_dygraph_mode(): - out = _C_ops.final_state_segment_pool(data, segment_ids, "MAX")[0] + out, tmp = _C_ops.final_state_segment_pool(data, segment_ids, "MAX") return out if _non_static_mode(): diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 27aa333b1a54650de487553dbf534a28f44af4ae..636b2ef17c6a0eeb166cdaf338ce2405ee5619f1 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -127,7 +127,12 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): """ if in_dygraph_mode(): - return _C_ops.final_state_allclose(x, y, rtol, atol, equal_nan) + # NOTE(dev): Pass tol as Tensor to fix precision loss problem, because + # C++ backend will cast it into float32 if passing float from python. + as_tensor = lambda x: paddle.to_tensor([x], dtype='float64', place='cpu') + return _C_ops.final_state_allclose(x, y, + as_tensor(rtol), + as_tensor(atol), equal_nan) if _in_legacy_dygraph(): return _C_ops.allclose(x, y, 'rtol', str(rtol), 'atol', @@ -689,7 +694,12 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): """ if in_dygraph_mode(): - return _C_ops.final_state_isclose(x, y, rtol, atol, equal_nan) + # NOTE(dev): Pass tol as Tensor to fix precision loss problem, because + # C++ backend will cast it into float32 if passing float from python. + as_tensor = lambda x: paddle.to_tensor([x], dtype='float64', place='cpu') + return _C_ops.final_state_isclose(x, y, + as_tensor(rtol), + as_tensor(atol), equal_nan) if _in_legacy_dygraph(): return _C_ops.isclose(x, y, 'rtol', str(rtol), 'atol', diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index 3456fe3260abcccd61eec9d307068dde8481d51b..602fecc83b8f786e7105384b23023a470e7cef12 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -1217,7 +1217,7 @@ forward : scale (Tensor x, Scalar scale, float bias, bool bias_after_scale) -> Tensor(out) args : (Tensor out_grad, Scalar scale=1.0, float bias=0.0, bool bias_after_scale=true) output : Tensor(x_grad) - invoke : scale(out_grad, scale, bias, bias_after_scale) + invoke : scale(out_grad, scale, 0.0, bias_after_scale) - backward_api : scatter_grad forward : scatter (Tensor x, Tensor index, Tensor updates, bool overwrite) -> Tensor(out) @@ -1250,6 +1250,7 @@ param : [x] kernel : func : segment_pool_grad + data_type : x optional : summed_ids - backward_api : selu_grad