From d9fac780fb096dfd8d0b3845656c83ac8f1f6332 Mon Sep 17 00:00:00 2001 From: Charles-hit <56987902+Charles-hit@users.noreply.github.com> Date: Wed, 14 Sep 2022 14:17:13 +0800 Subject: [PATCH] support slice op backward refuse forward and add high level unit test (#45960) --- paddle/phi/api/yaml/legacy_backward.yaml | 6 +- .../fluid/tests/unittests/test_slice_op.py | 89 +++++++++++++++++++ 2 files changed, 90 insertions(+), 5 deletions(-) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index d22ebf60f5..7471d310a6 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -2138,11 +2138,7 @@ forward : slice_grad (Tensor input, Tensor grad_out, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis) -> Tensor(grad_input) args : (Tensor grad_input_grad, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis) output : Tensor(grad_out_grad) - infer_meta : - func : UnchangedInferMeta - param : [grad_input_grad] - kernel : - func : slice + invoke : slice(grad_input_grad, axes, starts, ends, infer_flags, decrease_axis) - backward_api : slice_grad forward : slice (Tensor input, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis) -> Tensor(out) diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index 57864c62e4..fee36c0d42 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -22,6 +22,9 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers import paddle from paddle.fluid.framework import _test_eager_guard, _enable_legacy_dygraph +import gradient_checker +from decorator_helper import prog_scope +import paddle.fluid.layers as layers paddle.enable_static() @@ -840,6 +843,92 @@ class TestImperativeCUDAPinnedInput(unittest.TestCase): self.assertEqual(sliced.shape, [2, 70, 80]) +class TestSliceDoubleGradCheck(unittest.TestCase): + + def slice_wrapper(self, x): + return paddle.slice(x[0], + axes=[0, 1, 2], + starts=[-3, 0, 2], + ends=[3, 2, 4]) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data = layers.data('data', [4, 5, 6], False, dtype) + data.persistable = True + out = paddle.slice(data, + axes=[0, 1, 2], + starts=[-3, 0, 2], + ends=[3, 2, 4]) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.double_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.double_grad_check_for_dygraph(self.slice_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + +class TestSliceTripleGradCheck(unittest.TestCase): + + def slice_wrapper(self, x): + return paddle.slice(x[0], + axes=[0, 1, 2], + starts=[-3, 0, 2], + ends=[3, 2, 4]) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data = layers.data('data', [4, 5, 6], False, dtype) + data.persistable = True + out = paddle.slice(data, + axes=[0, 1, 2], + starts=[-3, 0, 2], + ends=[3, 2, 4]) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.triple_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.triple_grad_check_for_dygraph(self.slice_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + if __name__ == '__main__': paddle.enable_static() unittest.main() -- GitLab