From ff1da1880ece95db066de737d117a1a671385286 Mon Sep 17 00:00:00 2001 From: Charles-hit <56987902+Charles-hit@users.noreply.github.com> Date: Tue, 13 Sep 2022 20:40:12 +0800 Subject: [PATCH] support concat backward refuse forward (#45940) --- paddle/phi/api/yaml/legacy_backward.yaml | 6 +- .../fluid/tests/unittests/test_concat_op.py | 81 +++++++++++++++++++ 2 files changed, 82 insertions(+), 5 deletions(-) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index a284f4304e6..090283877ff 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -430,11 +430,7 @@ forward : concat_grad (Tensor[] x, Tensor grad_out, Scalar axis) -> Tensor[](grad_x) args : (Tensor[] grad_x_grad, Scalar axis = 0) output : Tensor(grad_out_grad) - infer_meta : - func : ConcatInferMeta - param : [grad_x_grad, axis] - kernel : - func : concat + invoke : concat(grad_x_grad, axis) - backward_api : concat_grad forward : concat (Tensor[] x, Scalar axis) -> Tensor(out) diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index 0bf3d6230d8..10c74107083 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -21,6 +21,9 @@ import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard, core from paddle.fluid.framework import _test_eager_guard import paddle +import gradient_checker +from decorator_helper import prog_scope +import paddle.fluid.layers as layers class TestConcatOp(OpTest): @@ -451,5 +454,83 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase): res[0], np.concatenate([self.x] * self.iter_num, axis=self.axis)) +class TestConcatDoubleGradCheck(unittest.TestCase): + + def concat_wrapper(self, x): + return paddle.concat(x) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data1 = layers.data('data1', [2, 3], False, dtype) + data1.persistable = True + data2 = layers.data('data2', [2, 3], False, dtype) + data2.persistable = True + out = paddle.concat([data1, data2]) + data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) + data2_arr = np.random.uniform(-1, 1, data2.shape).astype(dtype) + gradient_checker.double_grad_check([data1, data2], + out, + x_init=[data1_arr, data2_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.double_grad_check_for_dygraph( + self.concat_wrapper, [data1, data2], + out, + x_init=[data1_arr, data2_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + +class TestConcatTripleGradCheck(unittest.TestCase): + + def concat_wrapper(self, x): + return paddle.concat(x, 1) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data1 = layers.data('data1', [2, 3, 4], False, dtype) + data1.persistable = True + data2 = layers.data('data2', [2, 3, 4], False, dtype) + data2.persistable = True + out = paddle.concat([data1, data2], 1) + data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) + data2_arr = np.random.uniform(-1, 1, data2.shape).astype(dtype) + gradient_checker.double_grad_check([data1, data2], + out, + x_init=[data1_arr, data2_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.double_grad_check_for_dygraph( + self.concat_wrapper, [data1, data2], + out, + x_init=[data1_arr, data2_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + if __name__ == '__main__': unittest.main() -- GitLab