未验证 提交 8b546f1c 编写于 作者: C chentianyu03 提交者: GitHub

double grad yaml and test case (#42553)

* add abs double grad yaml and test case

* add pool2d double grad yaml

* add pool2d dygraph double grad test case
上级 778ea4ec
...@@ -135,6 +135,34 @@ class TestTanhDoubleGradCheck(unittest.TestCase): ...@@ -135,6 +135,34 @@ class TestTanhDoubleGradCheck(unittest.TestCase):
self.func(p) self.func(p)
class TestAbsDoubleGradCheck(unittest.TestCase):
def abs_wrapper(self, x):
return paddle.abs(x[0])
@prog_scope()
def func(self, place):
shape = [2, 3, 7, 9]
eps = 0.0005
dtype = np.float64
x = layers.data('x', shape, False, dtype=dtype)
x.persistable = True
y = paddle.abs(x)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
x_arr[np.abs(x_arr) < 0.005] = 0.002
gradient_checker.double_grad_check(
[x], y, x_init=x_arr, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.abs_wrapper, [x], y, x_init=x_arr, place=place)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestReluDoubleGradCheck(unittest.TestCase): class TestReluDoubleGradCheck(unittest.TestCase):
@prog_scope() @prog_scope()
def func(self, place): def func(self, place):
......
...@@ -407,6 +407,10 @@ class TestAvgPool2DDoubleGradCheckCase1(unittest.TestCase): ...@@ -407,6 +407,10 @@ class TestAvgPool2DDoubleGradCheckCase1(unittest.TestCase):
class TestAvgPool2DDoubleGradCheckCase2(unittest.TestCase): class TestAvgPool2DDoubleGradCheckCase2(unittest.TestCase):
def pool2d_wrapper(self, x):
return paddle.nn.functional.avg_pool2d(
x[0], kernel_size=2, data_format="NHWC")
@prog_scope() @prog_scope()
def func(self, place): def func(self, place):
input_NHWC = fluid.layers.data( input_NHWC = fluid.layers.data(
...@@ -416,13 +420,16 @@ class TestAvgPool2DDoubleGradCheckCase2(unittest.TestCase): ...@@ -416,13 +420,16 @@ class TestAvgPool2DDoubleGradCheckCase2(unittest.TestCase):
dtype="float32") dtype="float32")
input_NHWC.persistable = True input_NHWC.persistable = True
y = layers.pool2d( y = paddle.nn.functional.avg_pool2d(
input_NHWC, pool_size=2, pool_type="avg", data_format="NHWC") input_NHWC, kernel_size=2, data_format="NHWC")
x_arr = np.random.uniform(-1, 1, [2, 5, 5, 3]).astype(np.float32) x_arr = np.random.uniform(-1, 1, [2, 5, 5, 3]).astype(np.float32)
gradient_checker.double_grad_check( gradient_checker.double_grad_check(
[input_NHWC], y, x_init=x_arr, place=place, eps=0.05) [input_NHWC], y, x_init=x_arr, place=place, eps=0.05)
gradient_checker.double_grad_check_for_dygraph(
self.pool2d_wrapper, [input_NHWC], y, x_init=x_arr, place=place)
def test_grad(self): def test_grad(self):
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
...@@ -432,6 +439,10 @@ class TestAvgPool2DDoubleGradCheckCase2(unittest.TestCase): ...@@ -432,6 +439,10 @@ class TestAvgPool2DDoubleGradCheckCase2(unittest.TestCase):
class TestAvgPool2DDoubleGradCheckCase3(unittest.TestCase): class TestAvgPool2DDoubleGradCheckCase3(unittest.TestCase):
def pool2d_wrapper(self, x):
return paddle.nn.functional.avg_pool2d(
x[0], kernel_size=2, padding=[1, 1])
@prog_scope() @prog_scope()
def func(self, place): def func(self, place):
input_NCHW = fluid.layers.data( input_NCHW = fluid.layers.data(
...@@ -441,12 +452,14 @@ class TestAvgPool2DDoubleGradCheckCase3(unittest.TestCase): ...@@ -441,12 +452,14 @@ class TestAvgPool2DDoubleGradCheckCase3(unittest.TestCase):
dtype="float32") dtype="float32")
input_NCHW.persistable = True input_NCHW.persistable = True
y = layers.pool2d( y = paddle.nn.functional.avg_pool2d(
input_NCHW, pool_size=2, pool_type="avg", pool_padding=[1, 1]) input_NCHW, kernel_size=2, padding=[1, 1])
x_arr = np.random.uniform(-1, 1, [2, 3, 5, 5]).astype(np.float32) x_arr = np.random.uniform(-1, 1, [2, 3, 5, 5]).astype(np.float32)
gradient_checker.double_grad_check( gradient_checker.double_grad_check(
[input_NCHW], y, x_init=x_arr, place=place, eps=0.05) [input_NCHW], y, x_init=x_arr, place=place, eps=0.05)
gradient_checker.double_grad_check_for_dygraph(
self.pool2d_wrapper, [input_NCHW], y, x_init=x_arr, place=place)
def test_grad(self): def test_grad(self):
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
...@@ -457,6 +470,9 @@ class TestAvgPool2DDoubleGradCheckCase3(unittest.TestCase): ...@@ -457,6 +470,9 @@ class TestAvgPool2DDoubleGradCheckCase3(unittest.TestCase):
class TestAvgPool2DDoubleGradCheckCase4(unittest.TestCase): class TestAvgPool2DDoubleGradCheckCase4(unittest.TestCase):
def pool2d_wrapper(self, x):
return paddle.nn.functional.avg_pool2d(x[0], kernel_size=[4, 4])
@prog_scope() @prog_scope()
def func(self, place): def func(self, place):
input_NCHW = fluid.layers.data( input_NCHW = fluid.layers.data(
...@@ -467,10 +483,13 @@ class TestAvgPool2DDoubleGradCheckCase4(unittest.TestCase): ...@@ -467,10 +483,13 @@ class TestAvgPool2DDoubleGradCheckCase4(unittest.TestCase):
input_NCHW.persistable = True input_NCHW.persistable = True
y = layers.pool2d(input_NCHW, pool_size=[4, 4], pool_type="avg") y = layers.pool2d(input_NCHW, pool_size=[4, 4], pool_type="avg")
y = paddle.nn.functional.avg_pool2d(input_NCHW, kernel_size=[4, 4])
x_arr = np.random.uniform(-1, 1, [2, 3, 5, 5]).astype(np.float32) x_arr = np.random.uniform(-1, 1, [2, 3, 5, 5]).astype(np.float32)
gradient_checker.double_grad_check( gradient_checker.double_grad_check(
[input_NCHW], y, x_init=x_arr, place=place, eps=0.05) [input_NCHW], y, x_init=x_arr, place=place, eps=0.05)
gradient_checker.double_grad_check_for_dygraph(
self.pool2d_wrapper, [input_NCHW], y, x_init=x_arr, place=place)
def test_grad(self): def test_grad(self):
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
......
- backward_api : abs_double_grad
forward : abs_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_x_grad)
output : Tensor(grad_out_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : abs_double_grad
data_transform:
skip_transform : grad_x_grad
- backward_api : abs_grad - backward_api : abs_grad
forward : abs (Tensor x) -> Tensor(out) forward : abs (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
...@@ -9,6 +21,7 @@ ...@@ -9,6 +21,7 @@
func : abs_grad func : abs_grad
data_transform: data_transform:
skip_transform : out_grad skip_transform : out_grad
backward : abs_double_grad
- backward_api : acos_grad - backward_api : acos_grad
forward : acos (Tensor x) -> Tensor(out) forward : acos (Tensor x) -> Tensor(out)
...@@ -1283,6 +1296,16 @@ ...@@ -1283,6 +1296,16 @@
kernel : kernel :
func : poisson_grad func : poisson_grad
- backward_api : pool2d_double_grad
forward : pool2d_grad(Tensor x, Tensor out, Tensor grad_out, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(grad_x)
args : (Tensor grad_x_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output : Tensor(grad_out_grad)
infer_meta :
func : PoolInferMeta
kernel :
func : pool2d_double_grad
use_gpudnn : true
- backward_api : pool2d_grad - backward_api : pool2d_grad
forward : pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out) forward : pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
...@@ -1292,6 +1315,7 @@ ...@@ -1292,6 +1315,7 @@
kernel : kernel :
func : pool2d_grad func : pool2d_grad
use_gpudnn : true use_gpudnn : true
backward : pool2d_double_grad
- backward_api : pool2d_grad_gpudnn_unused - backward_api : pool2d_grad_gpudnn_unused
forward : pool2d_gpudnn_unused(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out) forward : pool2d_gpudnn_unused(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册