未验证 提交 8b546f1c 编写于 作者: C chentianyu03 提交者: GitHub

double grad yaml and test case (#42553)

* add abs double grad yaml and test case

* add pool2d double grad yaml

* add pool2d dygraph double grad test case
上级 778ea4ec
......@@ -135,6 +135,34 @@ class TestTanhDoubleGradCheck(unittest.TestCase):
self.func(p)
class TestAbsDoubleGradCheck(unittest.TestCase):
def abs_wrapper(self, x):
return paddle.abs(x[0])
@prog_scope()
def func(self, place):
shape = [2, 3, 7, 9]
eps = 0.0005
dtype = np.float64
x = layers.data('x', shape, False, dtype=dtype)
x.persistable = True
y = paddle.abs(x)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
x_arr[np.abs(x_arr) < 0.005] = 0.002
gradient_checker.double_grad_check(
[x], y, x_init=x_arr, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.abs_wrapper, [x], y, x_init=x_arr, place=place)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestReluDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
......
......@@ -407,6 +407,10 @@ class TestAvgPool2DDoubleGradCheckCase1(unittest.TestCase):
class TestAvgPool2DDoubleGradCheckCase2(unittest.TestCase):
def pool2d_wrapper(self, x):
return paddle.nn.functional.avg_pool2d(
x[0], kernel_size=2, data_format="NHWC")
@prog_scope()
def func(self, place):
input_NHWC = fluid.layers.data(
......@@ -416,13 +420,16 @@ class TestAvgPool2DDoubleGradCheckCase2(unittest.TestCase):
dtype="float32")
input_NHWC.persistable = True
y = layers.pool2d(
input_NHWC, pool_size=2, pool_type="avg", data_format="NHWC")
y = paddle.nn.functional.avg_pool2d(
input_NHWC, kernel_size=2, data_format="NHWC")
x_arr = np.random.uniform(-1, 1, [2, 5, 5, 3]).astype(np.float32)
gradient_checker.double_grad_check(
[input_NHWC], y, x_init=x_arr, place=place, eps=0.05)
gradient_checker.double_grad_check_for_dygraph(
self.pool2d_wrapper, [input_NHWC], y, x_init=x_arr, place=place)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
......@@ -432,6 +439,10 @@ class TestAvgPool2DDoubleGradCheckCase2(unittest.TestCase):
class TestAvgPool2DDoubleGradCheckCase3(unittest.TestCase):
def pool2d_wrapper(self, x):
return paddle.nn.functional.avg_pool2d(
x[0], kernel_size=2, padding=[1, 1])
@prog_scope()
def func(self, place):
input_NCHW = fluid.layers.data(
......@@ -441,12 +452,14 @@ class TestAvgPool2DDoubleGradCheckCase3(unittest.TestCase):
dtype="float32")
input_NCHW.persistable = True
y = layers.pool2d(
input_NCHW, pool_size=2, pool_type="avg", pool_padding=[1, 1])
y = paddle.nn.functional.avg_pool2d(
input_NCHW, kernel_size=2, padding=[1, 1])
x_arr = np.random.uniform(-1, 1, [2, 3, 5, 5]).astype(np.float32)
gradient_checker.double_grad_check(
[input_NCHW], y, x_init=x_arr, place=place, eps=0.05)
gradient_checker.double_grad_check_for_dygraph(
self.pool2d_wrapper, [input_NCHW], y, x_init=x_arr, place=place)
def test_grad(self):
places = [fluid.CPUPlace()]
......@@ -457,6 +470,9 @@ class TestAvgPool2DDoubleGradCheckCase3(unittest.TestCase):
class TestAvgPool2DDoubleGradCheckCase4(unittest.TestCase):
def pool2d_wrapper(self, x):
return paddle.nn.functional.avg_pool2d(x[0], kernel_size=[4, 4])
@prog_scope()
def func(self, place):
input_NCHW = fluid.layers.data(
......@@ -467,10 +483,13 @@ class TestAvgPool2DDoubleGradCheckCase4(unittest.TestCase):
input_NCHW.persistable = True
y = layers.pool2d(input_NCHW, pool_size=[4, 4], pool_type="avg")
y = paddle.nn.functional.avg_pool2d(input_NCHW, kernel_size=[4, 4])
x_arr = np.random.uniform(-1, 1, [2, 3, 5, 5]).astype(np.float32)
gradient_checker.double_grad_check(
[input_NCHW], y, x_init=x_arr, place=place, eps=0.05)
gradient_checker.double_grad_check_for_dygraph(
self.pool2d_wrapper, [input_NCHW], y, x_init=x_arr, place=place)
def test_grad(self):
places = [fluid.CPUPlace()]
......
- backward_api : abs_double_grad
forward : abs_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_x_grad)
output : Tensor(grad_out_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : abs_double_grad
data_transform:
skip_transform : grad_x_grad
- backward_api : abs_grad
forward : abs (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
......@@ -9,6 +21,7 @@
func : abs_grad
data_transform:
skip_transform : out_grad
backward : abs_double_grad
- backward_api : acos_grad
forward : acos (Tensor x) -> Tensor(out)
......@@ -1283,6 +1296,16 @@
kernel :
func : poisson_grad
- backward_api : pool2d_double_grad
forward : pool2d_grad(Tensor x, Tensor out, Tensor grad_out, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(grad_x)
args : (Tensor grad_x_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output : Tensor(grad_out_grad)
infer_meta :
func : PoolInferMeta
kernel :
func : pool2d_double_grad
use_gpudnn : true
- backward_api : pool2d_grad
forward : pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
......@@ -1292,6 +1315,7 @@
kernel :
func : pool2d_grad
use_gpudnn : true
backward : pool2d_double_grad
- backward_api : pool2d_grad_gpudnn_unused
forward : pool2d_gpudnn_unused(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册