From 65dd828e6491692916052bfd288487db3296dbf8 Mon Sep 17 00:00:00 2001 From: Charles-hit <56987902+Charles-hit@users.noreply.github.com> Date: Wed, 14 Sep 2022 10:55:17 +0800 Subject: [PATCH] support assign op backward refuse forward (#45879) --- paddle/phi/api/yaml/legacy_backward.yaml | 28 +------ .../fluid/tests/unittests/test_assign_op.py | 77 +++++++++++++++++++ 2 files changed, 78 insertions(+), 27 deletions(-) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 090283877ff..d22ebf60f56 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -182,27 +182,11 @@ func : asinh_grad inplace : (out_grad -> x_grad) -- backward_api : assign_double_grad - forward : assign_grad (Tensor grad_out) -> Tensor(grad_x) - args : (Tensor grad_x_grad) - output : Tensor(grad_out_grad) - infer_meta : - func : UnchangedInferMeta - kernel : - func : assign - backward: assign_triple_grad - inplace : (grad_x_grad -> grad_out_grad) - - backward_api : assign_grad forward : assign (Tensor x) -> Tensor(out) args : (Tensor out_grad) output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - kernel : - func : assign - backward: assign_double_grad - inplace : (out_grad -> x_grad) + invoke : assign(out_grad) - backward_api : assign_out__grad forward : assign_out_ (Tensor x, Tensor output) -> Tensor(out) @@ -214,16 +198,6 @@ func : assign inplace : (out_grad -> x_grad) -- backward_api : assign_triple_grad - forward : assign_double_grad (Tensor grad_out) -> Tensor(grad_x) - args : (Tensor grad_x_grad) - output : Tensor(grad_out_grad) - infer_meta : - func : UnchangedInferMeta - kernel : - func : assign - inplace : (grad_x_grad -> grad_out_grad) - - backward_api : atan_grad forward : atan (Tensor x) -> Tensor(out) args : (Tensor x, Tensor out_grad) diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index fa902542f16..9721cca5bf9 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -24,6 +24,9 @@ import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard from paddle.fluid.backward import append_backward import paddle.fluid.framework as framework +import gradient_checker +from decorator_helper import prog_scope +import paddle.fluid.layers as layers class TestAssignOp(op_test.OpTest): @@ -258,5 +261,79 @@ class TestAssignOpErrorApi(unittest.TestCase): paddle.disable_static() +class TestAssignDoubleGradCheck(unittest.TestCase): + + def assign_wrapper(self, x): + return paddle.fluid.layers.assign(x[0]) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data = layers.data('data', [3, 4, 5], False, dtype) + data.persistable = True + out = paddle.fluid.layers.assign(data) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.double_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.double_grad_check_for_dygraph(self.assign_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + +class TestAssignTripleGradCheck(unittest.TestCase): + + def assign_wrapper(self, x): + return paddle.fluid.layers.assign(x[0]) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data = layers.data('data', [3, 4, 5], False, dtype) + data.persistable = True + out = paddle.fluid.layers.assign(data) + data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) + + gradient_checker.triple_grad_check([data], + out, + x_init=[data_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.triple_grad_check_for_dygraph(self.assign_wrapper, + [data], + out, + x_init=[data_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + if __name__ == '__main__': unittest.main() -- GitLab