diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index cef9e44eeaf7f977871e2e012c5d4663cce8ef28..a284f4304e655d24dc07a9472110ac9b1c2fe2cf 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -2530,10 +2530,7 @@ forward : tile_grad (Tensor x, Tensor grad_out, IntArray repeat_times) -> Tensor(grad_x) args : (Tensor grad_x_grad, IntArray repeat_times) output : Tensor(grad_out_grad) - infer_meta : - func : TileInferMeta - kernel : - func : tile + invoke : tile(grad_x_grad, repeat_times) - backward_api : tile_grad forward : tile (Tensor x, IntArray repeat_times) -> Tensor(out) diff --git a/python/paddle/fluid/tests/unittests/test_tile_op.py b/python/paddle/fluid/tests/unittests/test_tile_op.py index c1c6820d9c17e0244f455fba65df859a7ea34da5..9f694ab3319f304949dd0c3e2924ac5cb440a772 100644 --- a/python/paddle/fluid/tests/unittests/test_tile_op.py +++ b/python/paddle/fluid/tests/unittests/test_tile_op.py @@ -19,7 +19,10 @@ import numpy as np from op_test import OpTest import paddle import paddle.fluid as fluid -from paddle.fluid import compiler, Program, program_guard +from paddle.fluid import compiler, Program, program_guard, core +import gradient_checker +from decorator_helper import prog_scope +import paddle.fluid.layers as layers #Situation 1: repeat_times is a list (without tensor) @@ -263,6 +266,80 @@ class TestTileAPI(unittest.TestCase): assert np.array_equal(out_3.numpy(), np.tile(np_x, (2, 3))) +class TestTileDoubleGradCheck(unittest.TestCase): + + def tile_wrapper(self, x): + return paddle.tile(x[0], [2, 1]) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data = layers.data('data', [1, 2], False, dtype) + data.persistable = True + out = paddle.tile(data, [2, 1]) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.double_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.double_grad_check_for_dygraph(self.tile_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + +class TestTileTripleGradCheck(unittest.TestCase): + + def tile_wrapper(self, x): + return paddle.tile(x[0], [2, 1]) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data = layers.data('data', [1, 2], False, dtype) + data.persistable = True + out = paddle.tile(data, [2, 1]) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.triple_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.triple_grad_check_for_dygraph(self.tile_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + if __name__ == "__main__": paddle.enable_static() unittest.main()