未验证 提交 d4cdfa55 编写于 作者: C chentianyu03 提交者: GitHub

[Yaml]add pad/pad3d/squeeze/unsqueeze yaml and test case (#42774)

* add pad3d_double_grad yaml and test case

* add squeeze and unsqueeze double grad

* add double grad config

* add pad_grad and pad_double_grad yaml

* add pad_double_grad in config
上级 de735a9a
......@@ -29,7 +29,8 @@ ops_to_fill_zero_for_empty_grads = set([
"tanh_double_grad", "tanh_triple_grad", "subtract_double_grad",
"divide_double_grad", "log_double_grad", "elu_double_grad",
"leaky_relu_double_grad", "sqrt_double_grad", "rsqrt_double_grad",
"square_double_grad", "celu_double_grad"
"square_double_grad", "celu_double_grad", "pad_double_grad",
"pad3d_double_grad", "squeeze_double_grad", "unsqueeze_double_grad"
])
# For API dispatch used at python-level
......
......@@ -215,6 +215,10 @@ class TestExpandV2DoubleGradCheck(unittest.TestCase):
class TestSqueezeDoubleGradCheck(unittest.TestCase):
def squeeze_warpper(self, x):
axes = [0, 2]
return paddle.squeeze(x[0], axes)
@prog_scope()
def func(self, place):
x_shape = [1, 3, 1, 40]
......@@ -229,6 +233,8 @@ class TestSqueezeDoubleGradCheck(unittest.TestCase):
gradient_checker.double_grad_check(
[x], out, x_init=x_arr, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.squeeze_warpper, [x], out, x_init=x_arr, place=place)
def test_grad(self):
places = [fluid.CPUPlace()]
......@@ -239,6 +245,10 @@ class TestSqueezeDoubleGradCheck(unittest.TestCase):
class TestUnsqueezeDoubleGradCheck(unittest.TestCase):
def unsqueeze_wrapper(self, x):
axes = [1, 2]
return paddle.unsqueeze(x[0], axes)
@prog_scope()
def func(self, place):
x_shape = [3, 40]
......@@ -253,6 +263,8 @@ class TestUnsqueezeDoubleGradCheck(unittest.TestCase):
gradient_checker.double_grad_check(
[x], out, x_init=x_arr, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.unsqueeze_wrapper, [x], out, x_init=x_arr, place=place)
def test_grad(self):
places = [fluid.CPUPlace()]
......@@ -333,6 +345,10 @@ class TestTransposeDoubleGradCheckCase1(unittest.TestCase):
class TestConstantPadDoubleGradCheck(unittest.TestCase):
def pad_wrapper(self, x):
pad = [1, 1, 1, 1]
return paddle.nn.functional.pad(x[0], pad)
@prog_scope()
def func(self, place):
x_shape = [2, 3, 4, 5]
......@@ -347,6 +363,8 @@ class TestConstantPadDoubleGradCheck(unittest.TestCase):
gradient_checker.double_grad_check(
[x], out, x_init=x_arr, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.pad_wrapper, [x], out, x_init=x_arr, place=place)
def test_grad(self):
places = [fluid.CPUPlace()]
......
......@@ -1556,7 +1556,7 @@
func : PadInferMeta
kernel :
func : pad
# backward : pad_grad
backward : pad_grad
- api : pad3d
args : (Tensor x, IntArray paddings, str mode, float pad_value, str data_format)
......
......@@ -1338,6 +1338,15 @@
kernel :
func : p_norm_grad
- backward_api : pad3d_double_grad
forward : pad3d_grad(Tensor x, Tensor grad_out, IntArray paddings, str mode, float pad_value, str data_format) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray paddings, str mode, float pad_value, str data_format)
output : Tensor(grad_out_grad)
infer_meta :
func : Pad3dInferMeta
kernel :
func : pad3d
- backward_api : pad3d_grad
forward : pad3d(Tensor x, IntArray paddings, str mode, float pad_value, str data_format) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray paddings, str mode, float pad_value, str data_format)
......@@ -1348,6 +1357,29 @@
kernel :
func : pad3d_grad
no_need_buffer : x
backward : pad3d_double_grad
- backward_api : pad_double_grad
forward : pad_grad(Tensor x, Tensor grad_out, int[] paddings, float pad_value) -> Tensor(grad_x)
args : (Tensor grad_x_grad, int[] paddings, float pad_value)
output : Tensor(grad_out_grad)
infer_meta :
func : PadInferMeta
kernel :
func : pad
- backward_api : pad_grad
forward : pad(Tensor x, int[] paddings, float pad_value) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int[] paddings, float pad_value)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pad_grad
param: [out_grad, paddings, pad_value]
no_need_buffer : x
backward : pad_double_grad
- backward_api : pixel_shuffle_grad
forward : pixel_shuffle (Tensor x, int upscale_factor, str data_format) -> Tensor(out)
......@@ -1813,6 +1845,12 @@
func : square_grad
backward : square_double_grad
- backward_api : squeeze_double_grad
forward : squeeze_grad(Tensor xshape, Tensor grad_out, int[] axes) -> Tensor(grad_x)
args : (Tensor grad_x_grad, int[] axes)
output : Tensor(grad_out_grad)
invoke: squeeze(grad_x_grad, axes)
- backward_api : squeeze_grad
forward : squeeze(Tensor x, int[] axes) -> Tensor(out), Tensor(xshape)
args : (Tensor xshape, Tensor out_grad, int[] axes)
......@@ -1823,6 +1861,7 @@
kernel :
func : squeeze_grad
inplace : (out_grad -> x_grad)
backward: squeeze_double_grad
- backward_api : stack_grad
forward : stack (Tensor[] x, int axis) -> Tensor(out)
......@@ -2085,16 +2124,24 @@
func : unfold_grad
no_need_buffer : x
- backward_api : unsqueeze_double_grad
forward : unsqueeze_grad(Tensor xshape, Tensor grad_out, IntArray axes) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray axes)
output : Tensor(grad_out_grad)
invoke : unsqueeze(grad_x_grad, axes)
- backward_api : unsqueeze_grad
forward : unsqueeze(Tensor x, IntArray axes) -> Tensor(out), Tensor(xshape)
args : (Tensor xshape, Tensor out_grad)
args : (Tensor xshape, Tensor out_grad, IntArray axes)
output : Tensor(x_grad)
infer_meta :
func : KernelWithXShapeInferMeta
param: [xshape]
kernel :
func : unsqueeze_grad
param: [xshape, out_grad]
inplace : (out_grad -> x_grad)
backward : unsqueeze_double_grad
- backward_api : where_grad
forward : where (Tensor condition, Tensor x, Tensor y) -> Tensor(out)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册