未验证 提交 dbe189b1 编写于 作者: Y YuanRisheng 提交者: GitHub

add unit test for batch_norm and leaky_relu (#42369)

上级 5faf76b7
......@@ -27,7 +27,7 @@ ops_to_fill_zero_for_empty_grads = set([
"add_triple_grad", "multiply_double_grad", "multiply_triple_grad",
"conv2d_grad_grad", "batch_norm_double_grad", "tanh_double_grad",
"tanh_triple_grad", "subtract_double_grad", "divide_double_grad",
"log_double_grad", "elu_double_grad"
"log_double_grad", "elu_double_grad", "leaky_relu_double_grad"
])
# For API dispatch used at python-level
......
......@@ -161,6 +161,9 @@ class TestReluDoubleGradCheck(unittest.TestCase):
class TestLeakyReluDoubleGradCheck(unittest.TestCase):
def leaky_relu_wrapper(self, x):
return paddle.nn.functional.leaky_relu(x[0], negative_slope=0.2)
@prog_scope()
def func(self, place):
shape = [2, 3, 7, 9]
......@@ -177,6 +180,8 @@ class TestLeakyReluDoubleGradCheck(unittest.TestCase):
gradient_checker.double_grad_check(
[x], y, x_init=x_arr, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.leaky_relu_wrapper, [x], y, x_init=x_arr, place=place)
def test_grad(self):
paddle.enable_static()
......
......@@ -43,6 +43,7 @@ class TestInstanceNormDoubleGradCheck(unittest.TestCase):
[x], z, x_init=x_arr, atol=atol, place=place, eps=eps)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
......@@ -77,6 +78,14 @@ class TestBatchNormDoubleGradCheck(unittest.TestCase):
self.data_layout = 'NCHW'
self.use_global_stats = False
self.shape = [2, 3, 4, 5]
self.channel_index = 1
def batch_norm_wrapper(self, x):
batch_norm = paddle.nn.BatchNorm2D(
self.shape[self.channel_index],
data_format=self.data_layout,
use_global_stats=self.use_global_stats)
return batch_norm(x[0])
@prog_scope()
def func(self, place):
......@@ -94,8 +103,15 @@ class TestBatchNormDoubleGradCheck(unittest.TestCase):
x_arr = np.random.uniform(-1, 1, self.shape).astype(dtype)
gradient_checker.double_grad_check(
[x], z, x_init=x_arr, atol=atol, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.batch_norm_wrapper, [x],
z,
x_init=x_arr,
atol=atol,
place=place)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
......@@ -108,6 +124,7 @@ class TestBatchNormDoubleGradCheckCase1(TestBatchNormDoubleGradCheck):
self.data_layout = 'NHWC'
self.use_global_stats = False
self.shape = [2, 3, 4, 5]
self.channel_index = 3
class TestBatchNormDoubleGradCheckCase2(TestBatchNormDoubleGradCheck):
......@@ -115,6 +132,7 @@ class TestBatchNormDoubleGradCheckCase2(TestBatchNormDoubleGradCheck):
self.data_layout = 'NCHW'
self.use_global_stats = True
self.shape = [2, 3, 4, 5]
self.channel_index = 1
class TestBatchNormDoubleGradCheckCase3(TestBatchNormDoubleGradCheck):
......@@ -122,6 +140,7 @@ class TestBatchNormDoubleGradCheckCase3(TestBatchNormDoubleGradCheck):
self.data_layout = 'NHWC'
self.use_global_stats = True
self.shape = [2, 3, 4, 5]
self.channel_index = 3
class TestBatchNormDoubleGradCheckCase4(TestBatchNormDoubleGradCheck):
......@@ -129,6 +148,14 @@ class TestBatchNormDoubleGradCheckCase4(TestBatchNormDoubleGradCheck):
self.data_layout = 'NCHW'
self.use_global_stats = False
self.shape = [2, 2, 3, 4, 5]
self.channel_index = 1
def batch_norm_wrapper(self, x):
batch_norm = paddle.nn.BatchNorm3D(
self.shape[self.channel_index],
data_format=self.data_layout,
use_global_stats=self.use_global_stats)
return batch_norm(x[0])
class TestBatchNormDoubleGradCheckCase5(TestBatchNormDoubleGradCheck):
......@@ -165,8 +192,8 @@ class TestBatchNormDoubleGradCheckCase6(TestBatchNormDoubleGradCheckCase5):
self.data_layout = 'NCHW'
self.use_global_stats = True
self.shape = [2, 3, 4, 5]
self.channel_index = 1
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册