diff --git a/python/paddle/fluid/tests/unittests/test_conv_transpose_nn_grad.py b/python/paddle/fluid/tests/unittests/test_conv_transpose_nn_grad.py index a4ef15b1f0db3c3737c063d4471cc54ef24ea074..b9e9224b9e4029d0d9a70b75a1c30c5a8278a6e6 100644 --- a/python/paddle/fluid/tests/unittests/test_conv_transpose_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_conv_transpose_nn_grad.py @@ -27,6 +27,9 @@ from decorator_helper import prog_scope class TestConvTransposeDoubleGradCheck(unittest.TestCase): + def conv_transpose_wrapper(self, x): + return paddle.nn.functional.conv2d_transpose(x[0], x[1], groups=1) + @prog_scope() def func(self, place): shape = [2, 4, 3, 3] @@ -55,6 +58,11 @@ class TestConvTransposeDoubleGradCheck(unittest.TestCase): else: gradient_checker.double_grad_check( [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps) + gradient_checker.double_grad_check_for_dygraph( + self.conv_transpose_wrapper, [x] + w, + y, + x_init=[x_arr] + w_arr, + place=place) def test_grad(self): places = [] @@ -67,6 +75,10 @@ class TestConvTransposeDoubleGradCheck(unittest.TestCase): class TestConvTranspose2DoubleGradCheck_AsyPadding( TestConvTransposeDoubleGradCheck): + def conv_transpose_wrapper(self, x): + return paddle.nn.functional.conv2d_transpose( + x[0], x[1], groups=1, padding=[1, 0, 0, 1]) + @prog_scope() def func(self, place): shape = [2, 2, 3, 3] @@ -100,10 +112,19 @@ class TestConvTranspose2DoubleGradCheck_AsyPadding( else: gradient_checker.double_grad_check( [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps) + gradient_checker.double_grad_check_for_dygraph( + self.conv_transpose_wrapper, [x] + w, + y, + x_init=[x_arr] + w_arr, + place=place) class TestConvTranspose2DoubleGradCheck_PaddingSAME( TestConvTransposeDoubleGradCheck): + def conv_transpose_wrapper(self, x): + return paddle.nn.functional.conv2d_transpose( + x[0], x[1], groups=1, padding="SAME") + @prog_scope() def func(self, place): shape = [2, 2, 3, 3] @@ -137,10 +158,19 @@ class TestConvTranspose2DoubleGradCheck_PaddingSAME( else: gradient_checker.double_grad_check( [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps) + gradient_checker.double_grad_check_for_dygraph( + self.conv_transpose_wrapper, [x] + w, + y, + x_init=[x_arr] + w_arr, + place=place) class TestConvTranspose2DoubleGradCheck_PaddingVALID( TestConvTransposeDoubleGradCheck): + def conv_transpose_wrapper(self, x): + return paddle.nn.functional.conv2d_transpose( + x[0], x[1], groups=1, padding="VALID") + @prog_scope() def func(self, place): shape = [2, 2, 3, 3] @@ -174,10 +204,19 @@ class TestConvTranspose2DoubleGradCheck_PaddingVALID( else: gradient_checker.double_grad_check( [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps) + gradient_checker.double_grad_check_for_dygraph( + self.conv_transpose_wrapper, [x] + w, + y, + x_init=[x_arr] + w_arr, + place=place) class TestConvTranspose2DoubleGradCheck_ChannelLast( TestConvTransposeDoubleGradCheck): + def conv_transpose_wrapper(self, x): + return paddle.nn.functional.conv2d_transpose( + x[0], x[1], groups=1, padding=[1, 1], data_format="NHWC") + @prog_scope() def func(self, place): shape = [2, 3, 3, 2] @@ -213,6 +252,11 @@ class TestConvTranspose2DoubleGradCheck_ChannelLast( else: gradient_checker.double_grad_check( [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps) + gradient_checker.double_grad_check_for_dygraph( + self.conv_transpose_wrapper, [x] + w, + y, + x_init=[x_arr] + w_arr, + place=place) if __name__ == "__main__": diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index 3de9e323c2ed98365c2e8085cdba1a1f52db28dc..ff49fd426146b261bc5dc5e692cbcf14b9f99756 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -301,6 +301,16 @@ use_gpudnn : true optional : grad_input_grad, grad_filter_grad +- backward_api : conv2d_transpose_double_grad + forward : conv2d_transpose_grad(Tensor x, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(grad_x), Tensor(grad_filter) + args : (Tensor x, Tensor filter, Tensor grad_out, Tensor grad_x_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) + output : Tensor(x_grad), Tensor(filter_grad), Tensor(grad_out_grad) + infer_meta : + func : Conv2dTransposeDoubleGradInferMeta + kernel : + func : conv2d_transpose_grad_grad + use_gpudnn : true + - backward_api : conv2d_transpose_grad forward : conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out) args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) @@ -310,6 +320,7 @@ kernel : func : conv2d_transpose_grad use_gpudnn : true + backward : conv2d_transpose_double_grad - backward_api : conv3d_transpose_grad forward : conv3d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)