From b75d8c7ec535bb26d0d40ad56336d70e53452453 Mon Sep 17 00:00:00 2001 From: xiaoguoguo626807 <100397923+xiaoguoguo626807@users.noreply.github.com> Date: Sat, 13 May 2023 09:13:54 +0800 Subject: [PATCH] Revert elementwise add (#53745) * modify concat_grad add sum comp rule * delete default mul_double_grad * delete high grad test * recover yaml * modify yaml * recover add_double_grad prim --- .../eager/auto_code_generator/generator/eager_gen.py | 1 - paddle/phi/api/yaml/legacy_backward.yaml | 12 ++++++++++++ test/prim/prim/vjp/test_comp_high_grad.py | 3 ++- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index 08a5f57d293..8d28fa2438f 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -67,7 +67,6 @@ black_ops_list = [ prim_white_list = [ "matmul_double_grad", "tanh_double_grad", - "add_double_grad", "subtract_double_grad", ] diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 3eaafe2b407..758a2ac3147 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -30,6 +30,7 @@ kernel : func : add_double_grad optional : grad_x_grad, grad_y_grad + backward : add_triple_grad inplace : (grad_x_grad -> grad_out_grad) composite : add_double_grad(y, grad_out, grad_x_grad, grad_y_grad, axis, grad_out_grad) @@ -47,6 +48,17 @@ backward : add_double_grad inplace : (out_grad -> x_grad) +- backward_op : add_triple_grad + forward : add_double_grad (Tensor y, Tensor grad_out, Tensor grad_grad_x, Tensor grad_grad_y, int axis = -1) -> Tensor(grad_grad_out) + args : (Tensor grad_grad_x, Tensor grad_grad_y, Tensor grad_grad_out_grad, int axis = -1) + output : Tensor(grad_grad_x_grad), Tensor(grad_grad_y_grad) + infer_meta : + func : GeneralBinaryGradInferMeta + param : [grad_grad_x, grad_grad_y] + kernel : + func : add_triple_grad + inplace : (grad_grad_out_grad -> grad_grad_x_grad) + - backward_op : amax_grad forward: amax (Tensor x, int64_t[] axis={}, bool keepdim=false) -> Tensor(out) args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis={}, bool keepdim=false, bool reduce_all=false) diff --git a/test/prim/prim/vjp/test_comp_high_grad.py b/test/prim/prim/vjp/test_comp_high_grad.py index 87b3c8f300e..99268b1b58e 100644 --- a/test/prim/prim/vjp/test_comp_high_grad.py +++ b/test/prim/prim/vjp/test_comp_high_grad.py @@ -25,7 +25,7 @@ import paddle from paddle import fluid from paddle.fluid import core - +''' @param.parameterized_class( ('shape1', 'shape2'), [ @@ -120,6 +120,7 @@ class TestAddHighGradCheck(unittest.TestCase): for p in places: self.func_double(p) self.func_triple(p) +''' @param.parameterized_class( -- GitLab