diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 27738c49bae713a0f62fd642e79e50c07f56bacb..3028e0c6c73c63855de19799d51ea6265e1311df 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -592,9 +592,6 @@ - backward_api : cumsum_grad forward : cumsum(Tensor x, Scalar axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] args : (Tensor out_grad, Scalar axis, bool flatten, bool exclusive, bool reverse) output : Tensor(x_grad) invoke : cumsum(out_grad, axis, flatten, exclusive, !reverse) @@ -884,11 +881,7 @@ forward : flip (Tensor x, int[] axis) -> Tensor(out) args : (Tensor out_grad, int[] axis) output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - param: [out_grad] - kernel : - func : flip + invoke : flip(out_grad, axis) - backward_api : floor_grad forward : floor(Tensor x) -> Tensor(out) @@ -1971,8 +1964,6 @@ forward : reverse (Tensor x, IntArray axis) -> Tensor(out) args : (Tensor out_grad, IntArray axis) output : Tensor(x_grad) - infer_meta : - func : ReverseInferMeta invoke : reverse(out_grad, axis) - backward_api : roi_align_grad diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index 2f66cd80ddef6a9f309f255df6af403d847c8a50..42def391ac17af70559c67cae34381a592ccb8a6 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -24,6 +24,9 @@ import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard import paddle.inference as paddle_infer +import gradient_checker +from decorator_helper import prog_scope +import paddle.fluid.layers as layers class TestCumsumOp(unittest.TestCase): @@ -380,5 +383,79 @@ class TestTensorAxis(unittest.TestCase): np.testing.assert_allclose(static_out[0], infer_out) +class TestCumsumDoubleGradCheck(unittest.TestCase): + + def cumsum_wrapper(self, x): + return paddle.cumsum(x[0], 0) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float64 + + data = layers.data('data', [3, 4], False, dtype) + data.persistable = True + out = paddle.cumsum(data, 0) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.double_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.double_grad_check_for_dygraph(self.cumsum_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + +class TestCumsumTripleGradCheck(unittest.TestCase): + + def cumsum_wrapper(self, x): + return paddle.cumsum(x[0], 0) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data = layers.data('data', [2, 3], False, dtype) + data.persistable = True + out = paddle.cumsum(data, 0) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.triple_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.triple_grad_check_for_dygraph(self.cumsum_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_flip.py b/python/paddle/fluid/tests/unittests/test_flip.py index a933595be878397af5f8c640096919333a8b5023..774978f753935e51ade5f462b057a3f914631f97 100644 --- a/python/paddle/fluid/tests/unittests/test_flip.py +++ b/python/paddle/fluid/tests/unittests/test_flip.py @@ -21,6 +21,9 @@ import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import Program, program_guard from op_test import OpTest +import gradient_checker +from decorator_helper import prog_scope +import paddle.fluid.layers as layers class TestFlipOp_API(unittest.TestCase): @@ -137,6 +140,80 @@ class TestFlipOpNegAxis(TestFlipOp): self.axis = [-1] +class TestFlipDoubleGradCheck(unittest.TestCase): + + def flip_wrapper(self, x): + return paddle.flip(x[0], [0, 1]) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data = layers.data('data', [3, 2, 2], False, dtype) + data.persistable = True + out = paddle.flip(data, [0, 1]) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.double_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.double_grad_check_for_dygraph(self.flip_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + +class TestFlipTripleGradCheck(unittest.TestCase): + + def flip_wrapper(self, x): + return paddle.flip(x[0], [0, 1]) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data = layers.data('data', [3, 2, 2], False, dtype) + data.persistable = True + out = paddle.flip(data, [0, 1]) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.triple_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.triple_grad_check_for_dygraph(self.flip_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + if __name__ == "__main__": paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_reverse_op.py b/python/paddle/fluid/tests/unittests/test_reverse_op.py index 7f09d9b70631dcbbbb541f7c6eb379f868678d4e..9896a7bccf686518e007165e5569e23d117eb2de 100644 --- a/python/paddle/fluid/tests/unittests/test_reverse_op.py +++ b/python/paddle/fluid/tests/unittests/test_reverse_op.py @@ -21,6 +21,9 @@ from op_test import OpTest import paddle import paddle.fluid as fluid from paddle.fluid import core +import gradient_checker +from decorator_helper import prog_scope +import paddle.fluid.layers as layers from paddle.fluid.framework import program_guard, Program from test_attribute_var import UnittestBase @@ -267,6 +270,80 @@ class TestReverseAxisListTensor(TestReverseAxisTensor): return out +class TestReverseDoubleGradCheck(unittest.TestCase): + + def reverse_wrapper(self, x): + return fluid.layers.reverse(x[0], [0, 1]) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float64 + + data = layers.data('data', [3, 4], False, dtype) + data.persistable = True + out = fluid.layers.reverse(data, [0, 1]) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.double_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.double_grad_check_for_dygraph(self.reverse_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + +class TestReverseTripleGradCheck(unittest.TestCase): + + def reverse_wrapper(self, x): + return fluid.layers.reverse(x[0], [0, 1]) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data = layers.data('data', [2, 3], False, dtype) + data.persistable = True + out = fluid.layers.reverse(data, [0, 1]) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.triple_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.triple_grad_check_for_dygraph(self.reverse_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + if __name__ == '__main__': paddle.enable_static() unittest.main()