未验证 提交 c16345cb 编写于 作者: C chentianyu03 提交者: GitHub

[Yaml]add Double grad (#42638)

* add conv2d_transpose_double_grad yaml

* add test_conv_transpose double_grad test case
上级 3ac9e754
......@@ -27,6 +27,9 @@ from decorator_helper import prog_scope
class TestConvTransposeDoubleGradCheck(unittest.TestCase):
def conv_transpose_wrapper(self, x):
return paddle.nn.functional.conv2d_transpose(x[0], x[1], groups=1)
@prog_scope()
def func(self, place):
shape = [2, 4, 3, 3]
......@@ -55,6 +58,11 @@ class TestConvTransposeDoubleGradCheck(unittest.TestCase):
else:
gradient_checker.double_grad_check(
[x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.conv_transpose_wrapper, [x] + w,
y,
x_init=[x_arr] + w_arr,
place=place)
def test_grad(self):
places = []
......@@ -67,6 +75,10 @@ class TestConvTransposeDoubleGradCheck(unittest.TestCase):
class TestConvTranspose2DoubleGradCheck_AsyPadding(
TestConvTransposeDoubleGradCheck):
def conv_transpose_wrapper(self, x):
return paddle.nn.functional.conv2d_transpose(
x[0], x[1], groups=1, padding=[1, 0, 0, 1])
@prog_scope()
def func(self, place):
shape = [2, 2, 3, 3]
......@@ -100,10 +112,19 @@ class TestConvTranspose2DoubleGradCheck_AsyPadding(
else:
gradient_checker.double_grad_check(
[x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.conv_transpose_wrapper, [x] + w,
y,
x_init=[x_arr] + w_arr,
place=place)
class TestConvTranspose2DoubleGradCheck_PaddingSAME(
TestConvTransposeDoubleGradCheck):
def conv_transpose_wrapper(self, x):
return paddle.nn.functional.conv2d_transpose(
x[0], x[1], groups=1, padding="SAME")
@prog_scope()
def func(self, place):
shape = [2, 2, 3, 3]
......@@ -137,10 +158,19 @@ class TestConvTranspose2DoubleGradCheck_PaddingSAME(
else:
gradient_checker.double_grad_check(
[x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.conv_transpose_wrapper, [x] + w,
y,
x_init=[x_arr] + w_arr,
place=place)
class TestConvTranspose2DoubleGradCheck_PaddingVALID(
TestConvTransposeDoubleGradCheck):
def conv_transpose_wrapper(self, x):
return paddle.nn.functional.conv2d_transpose(
x[0], x[1], groups=1, padding="VALID")
@prog_scope()
def func(self, place):
shape = [2, 2, 3, 3]
......@@ -174,10 +204,19 @@ class TestConvTranspose2DoubleGradCheck_PaddingVALID(
else:
gradient_checker.double_grad_check(
[x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.conv_transpose_wrapper, [x] + w,
y,
x_init=[x_arr] + w_arr,
place=place)
class TestConvTranspose2DoubleGradCheck_ChannelLast(
TestConvTransposeDoubleGradCheck):
def conv_transpose_wrapper(self, x):
return paddle.nn.functional.conv2d_transpose(
x[0], x[1], groups=1, padding=[1, 1], data_format="NHWC")
@prog_scope()
def func(self, place):
shape = [2, 3, 3, 2]
......@@ -213,6 +252,11 @@ class TestConvTranspose2DoubleGradCheck_ChannelLast(
else:
gradient_checker.double_grad_check(
[x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.conv_transpose_wrapper, [x] + w,
y,
x_init=[x_arr] + w_arr,
place=place)
if __name__ == "__main__":
......
......@@ -301,6 +301,16 @@
use_gpudnn : true
optional : grad_input_grad, grad_filter_grad
- backward_api : conv2d_transpose_double_grad
forward : conv2d_transpose_grad(Tensor x, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(grad_x), Tensor(grad_filter)
args : (Tensor x, Tensor filter, Tensor grad_out, Tensor grad_x_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(x_grad), Tensor(filter_grad), Tensor(grad_out_grad)
infer_meta :
func : Conv2dTransposeDoubleGradInferMeta
kernel :
func : conv2d_transpose_grad_grad
use_gpudnn : true
- backward_api : conv2d_transpose_grad
forward : conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
......@@ -310,6 +320,7 @@
kernel :
func : conv2d_transpose_grad
use_gpudnn : true
backward : conv2d_transpose_double_grad
- backward_api : conv3d_transpose_grad
forward : conv3d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册