From 1eefd66abee1db18c8a344a66210048e8890a3fb Mon Sep 17 00:00:00 2001 From: Charles-hit <56987902+Charles-hit@users.noreply.github.com> Date: Tue, 13 Sep 2022 20:33:53 +0800 Subject: [PATCH] support expand_v2 op backward refuse forward (#45941) --- paddle/phi/api/yaml/legacy_backward.yaml | 5 +- .../tests/unittests/test_expand_v2_op.py | 79 ++++++++++++++++++- 2 files changed, 79 insertions(+), 5 deletions(-) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 3028e0c6c7..cef9e44eea 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -797,10 +797,7 @@ forward : expand_grad (Tensor x, Tensor grad_out, IntArray shape) -> Tensor(grad_x) args : (Tensor grad_x_grad, IntArray shape) output : Tensor(grad_out_grad) - infer_meta : - func : ExpandInferMeta - kernel : - func : expand + invoke : expand(grad_x_grad, shape) - backward_api : expand_grad forward : expand (Tensor x, IntArray shape) -> Tensor(out) diff --git a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py index 6fc6fc8f7e..82fb8284fe 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py @@ -18,9 +18,12 @@ import unittest import numpy as np from op_test import OpTest import paddle.fluid as fluid -from paddle.fluid import compiler, Program, program_guard +from paddle.fluid import compiler, Program, program_guard, core import paddle from paddle.fluid.framework import _test_eager_guard +import gradient_checker +from decorator_helper import prog_scope +import paddle.fluid.layers as layers # Situation 1: shape is a list(without tensor) @@ -284,6 +287,80 @@ class TestExpandV2DygraphAPI(unittest.TestCase): egr_expand_1.numpy()) +class TestExpandDoubleGradCheck(unittest.TestCase): + + def expand_wrapper(self, x): + return paddle.expand(x[0], [2, 3]) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data = layers.data('data', [2, 3], False, dtype) + data.persistable = True + out = paddle.expand(data, [2, 3]) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.double_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.double_grad_check_for_dygraph(self.expand_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + +class TestExpandTripleGradCheck(unittest.TestCase): + + def expand_wrapper(self, x): + return paddle.expand(x[0], [2, 3]) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data = layers.data('data', [2, 3], False, dtype) + data.persistable = True + out = paddle.expand(data, [2, 3]) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.triple_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.triple_grad_check_for_dygraph(self.expand_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + if __name__ == "__main__": paddle.enable_static() unittest.main() -- GitLab