diff --git a/python/paddle/fluid/tests/unittests/test_nn_grad.py b/python/paddle/fluid/tests/unittests/test_nn_grad.py index d89465c5aecab546e54d5c8312f838e1da34a902..3a100cd321e036422b30c72757059ce017163f7d 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_nn_grad.py @@ -157,6 +157,9 @@ class TestExpandDoubleGradCheck(unittest.TestCase): class TestTileDoubleGradCheck(unittest.TestCase): + def tile_wrapper(self, x): + return paddle.tile(x[0], [4, 9]) + @prog_scope() def func(self, place): x_shape = [3, 12] @@ -171,6 +174,8 @@ class TestTileDoubleGradCheck(unittest.TestCase): gradient_checker.double_grad_check( [x], out, x_init=x_arr, place=place, eps=eps) + gradient_checker.double_grad_check_for_dygraph( + self.tile_wrapper, [x], out, x_init=x_arr, place=place) def test_grad(self): places = [fluid.CPUPlace()] @@ -181,6 +186,9 @@ class TestTileDoubleGradCheck(unittest.TestCase): class TestExpandV2DoubleGradCheck(unittest.TestCase): + def expand_wrapper(self, x): + return paddle.expand(x[0], [4, 12]) + @prog_scope() def func(self, place): x_shape = [1, 12] @@ -195,6 +203,8 @@ class TestExpandV2DoubleGradCheck(unittest.TestCase): gradient_checker.double_grad_check( [x], out, x_init=x_arr, place=place, eps=eps) + gradient_checker.double_grad_check_for_dygraph( + self.expand_wrapper, [x], out, x_init=x_arr, place=place) def test_grad(self): places = [fluid.CPUPlace()] @@ -253,6 +263,9 @@ class TestUnsqueezeDoubleGradCheck(unittest.TestCase): class TestClipDoubleGradCheck(unittest.TestCase): + def clip_wrapper(self, x): + return paddle.clip(x[0], min=-1., max=1.) + @prog_scope() def func(self, place): x_shape = [2, 4, 10] @@ -264,6 +277,8 @@ class TestClipDoubleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-5., 5., x_shape).astype(dtype) gradient_checker.double_grad_check([x], out, x_init=x_arr, place=place) + gradient_checker.double_grad_check_for_dygraph( + self.clip_wrapper, [x], out, x_init=x_arr, place=place) def test_grad(self): places = [fluid.CPUPlace()] @@ -357,6 +372,9 @@ class TestConstantPadDoubleGradCheckCase1(TestConstantPadDoubleGradCheck): class TestConcatDoubleGradCheck(unittest.TestCase): + def concat_wrapper(self, x): + return paddle.concat(x, axis=0) + @prog_scope() def func(self, place): x_shape = [2, 3, 4, 5] @@ -373,6 +391,11 @@ class TestConcatDoubleGradCheck(unittest.TestCase): gradient_checker.double_grad_check( [x1, x2], out, x_init=[x1_arr, x2_arr], place=place) + gradient_checker.double_grad_check_for_dygraph( + self.concat_wrapper, [x1, x2], + out, + x_init=[x1_arr, x2_arr], + place=place) def test_grad(self): places = [fluid.CPUPlace()] diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index 7c68829c0959f603a284b309bd771bf516eb8fa7..1d27473d5c25c40aa87a211b593f45cb6ca839a3 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -251,6 +251,16 @@ kernel : func : cholesky_solve_grad +- backward_api : clip_double_grad + forward : clip_grad (Tensor x, Tensor grad_out, Scalar min = 0., Scalar max = 0.) -> Tensor(grad_x) + args : (Tensor x, Tensor grad_x_grad, Scalar min = 0., Scalar max = 0.) + output : Tensor(grad_out_grad) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : clip_grad + - backward_api : clip_grad forward : clip (Tensor x, Scalar min, Scalar max) -> Tensor(out) args : (Tensor x, Tensor out_grad, Scalar min = 0., Scalar max = 0.) @@ -260,6 +270,18 @@ param : [x] kernel : func : clip_grad + backward : clip_double_grad + +- backward_api : concat_double_grad + forward : concat_grad (Tensor[] x, Tensor grad_out, Scalar axis) -> Tensor[](grad_x) + args : (Tensor[] grad_x_grad, Scalar axis = 0) + output : Tensor(grad_out_grad) + infer_meta : + func : ConcatInferMeta + param : [grad_x_grad, axis] + kernel : + func : concat + no_need_buffer : x - backward_api : concat_grad forward : concat (Tensor[] x, Scalar axis) -> Tensor(out) @@ -271,6 +293,7 @@ kernel : func : concat_grad no_need_buffer : x + backward : concat_double_grad - backward_api : conj_grad forward : conj (Tensor x) -> Tensor(out) @@ -582,6 +605,15 @@ func : expand_as_grad no_need_buffer : x +- backward_api : expand_double_grad + forward : expand_grad (Tensor x, Tensor grad_out, IntArray shape) -> Tensor(grad_x) + args : (Tensor grad_x_grad, IntArray shape) + output : Tensor(grad_out_grad) + infer_meta : + func : ExpandInferMeta + kernel : + func : expand + - backward_api : expand_grad forward : expand (Tensor x, IntArray shape) -> Tensor(out) args : (Tensor x, Tensor out_grad, IntArray shape) @@ -592,6 +624,7 @@ kernel : func : expand_grad no_need_buffer : x + backward : expand_double_grad - backward_api : expm1_grad forward : expm1 (Tensor x) -> Tensor(out) @@ -1881,6 +1914,15 @@ kernel : func : thresholded_relu_grad +- backward_api : tile_double_grad + forward : tile_grad (Tensor x, Tensor grad_out, IntArray repeat_times) -> Tensor(grad_x) + args : (Tensor grad_x_grad, IntArray repeat_times) + output : Tensor(grad_out_grad) + infer_meta : + func : TileInferMeta + kernel : + func : tile + - backward_api : tile_grad forward : tile (Tensor x, IntArray repeat_times) -> Tensor(out) args : (Tensor x, Tensor out_grad, IntArray repeat_times) @@ -1891,6 +1933,7 @@ kernel : func : tile_grad no_need_buffer : x + backward : tile_double_grad - backward_api : top_k_grad forward : top_k (Tensor x, Scalar k, int axis = -1, bool largest = true, bool sorted = true) -> Tensor(out), Tensor(indices)