From b4024aaffa79134d826b0cecb58fe83ea3f7b512 Mon Sep 17 00:00:00 2001 From: xiaoguoguo626807 <100397923+xiaoguoguo626807@users.noreply.github.com> Date: Thu, 11 May 2023 16:12:48 +0800 Subject: [PATCH] Revert elementwise (#53663) * modify concat_grad add sum comp rule * delete default mul_double_grad * delete high grad test * recover yaml * modify yaml --- .../eager/auto_code_generator/generator/eager_gen.py | 1 - paddle/phi/api/yaml/legacy_backward.yaml | 12 ++++++++++++ test/prim/prim/vjp/test_comp_high_grad.py | 4 ++-- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index ed21e1171c1..08a5f57d293 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -68,7 +68,6 @@ prim_white_list = [ "matmul_double_grad", "tanh_double_grad", "add_double_grad", - "multiply_double_grad", "subtract_double_grad", ] diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 9d1b2ce5b49..3eaafe2b407 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -617,6 +617,7 @@ func : multiply_double_grad optional : grad_x_grad, grad_y_grad inplace : (grad_x_grad -> grad_out_grad) + backward : multiply_triple_grad composite : multiply_double_grad(x, y, grad_out, grad_x_grad, grad_y_grad, axis, x_grad, y_grad, grad_out_grad) - backward_op : multiply_grad @@ -631,6 +632,17 @@ composite: multiply_grad(x, y, out_grad, axis, x_grad, y_grad) backward : multiply_double_grad +- backward_op : multiply_triple_grad + forward : multiply_double_grad (Tensor x, Tensor y, Tensor fwd_grad_out, Tensor fwd_grad_grad_x, Tensor fwd_grad_grad_y, int aixs = -1) -> Tensor(grad_x), Tensor(grad_y), Tensor(grad_grad_out) + args : (Tensor x, Tensor y, Tensor fwd_grad_out, Tensor fwd_grad_grad_x, Tensor fwd_grad_grad_y, Tensor grad_x_grad, Tensor grad_y_grad, Tensor grad_grad_out_grad, int axis = -1) + output : Tensor(x_grad), Tensor(y_grad), Tensor(fwd_grad_out_grad), Tensor(fwd_grad_grad_x_grad), Tensor(fwd_grad_grad_y_grad) + infer_meta : + func : GeneralQuinaryGradInferMeta + param : [x, y, fwd_grad_out, fwd_grad_grad_x, fwd_grad_grad_y] + kernel : + func : multiply_triple_grad + optional : fwd_grad_grad_x, fwd_grad_grad_y, grad_x_grad, grad_y_grad, grad_grad_out_grad + - backward_op : norm_grad forward : norm (Tensor x, int axis, float epsilon, bool is_test) -> Tensor(out), Tensor(norm) args : (Tensor x, Tensor norm, Tensor out_grad, int axis, float epsilon, bool is_test) diff --git a/test/prim/prim/vjp/test_comp_high_grad.py b/test/prim/prim/vjp/test_comp_high_grad.py index 76283528e24..87b3c8f300e 100644 --- a/test/prim/prim/vjp/test_comp_high_grad.py +++ b/test/prim/prim/vjp/test_comp_high_grad.py @@ -226,6 +226,7 @@ class TestSubtractHighGradCheck(unittest.TestCase): self.func_triple(p) +''' @param.parameterized_class( ('shape1', 'shape2'), [ @@ -328,7 +329,6 @@ class TestMultiplyHighGradCheck(unittest.TestCase): for p in places: self.func_double(p) self.func_triple(p) - - +''' if __name__ == '__main__': unittest.main() -- GitLab