From c6f888e5a51aee5ab937d2715c876b5d12d0fec1 Mon Sep 17 00:00:00 2001 From: zhupengyang <1165938320@qq.com> Date: Sun, 19 Jan 2020 14:30:13 +0800 Subject: [PATCH] update unittest accuracy to float64 for relu, prelu, maxout (#22273) --- paddle/fluid/operators/maxout_op.cc | 6 ++++-- paddle/fluid/operators/prelu_op.cc | 7 ++++--- .../unittests/mkldnn/test_activation_mkldnn_op.py | 3 ++- .../unittests/ngraph/test_activation_ngraph_op.py | 3 ++- .../paddle/fluid/tests/unittests/test_maxout_op.py | 7 +++---- .../paddle/fluid/tests/unittests/test_prelu_op.py | 14 ++++++-------- .../unittests/white_list/op_accuracy_white_list.py | 3 --- 7 files changed, 21 insertions(+), 22 deletions(-) diff --git a/paddle/fluid/operators/maxout_op.cc b/paddle/fluid/operators/maxout_op.cc index 33b555a0c75..cbe0724dbd6 100644 --- a/paddle/fluid/operators/maxout_op.cc +++ b/paddle/fluid/operators/maxout_op.cc @@ -118,7 +118,9 @@ REGISTER_OPERATOR( paddle::framework::DefaultGradOpMaker); REGISTER_OPERATOR(maxout_grad, ops::MaxOutOpGrad); REGISTER_OP_CPU_KERNEL( - maxout, ops::MaxOutKernel); + maxout, ops::MaxOutKernel, + ops::MaxOutKernel); REGISTER_OP_CPU_KERNEL( maxout_grad, - ops::MaxOutGradKernel); + ops::MaxOutGradKernel, + ops::MaxOutGradKernel); diff --git a/paddle/fluid/operators/prelu_op.cc b/paddle/fluid/operators/prelu_op.cc index 7127177e35b..aff661f5d5d 100644 --- a/paddle/fluid/operators/prelu_op.cc +++ b/paddle/fluid/operators/prelu_op.cc @@ -161,7 +161,8 @@ REGISTER_OPERATOR(prelu, ops::PReluOp, ops::PReluOpMaker, ops::PReluGradOpMaker); REGISTER_OPERATOR(prelu_grad, ops::PReluGradOp); REGISTER_OP_CPU_KERNEL( - prelu, ops::PReluKernel); + prelu, ops::PReluKernel, + ops::PReluKernel); REGISTER_OP_CPU_KERNEL( - prelu_grad, - ops::PReluGradKernel); + prelu_grad, ops::PReluGradKernel, + ops::PReluGradKernel); diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py index c988e6275ff..dcdbb4619b5 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py @@ -17,7 +17,7 @@ from __future__ import print_function import unittest import numpy as np import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest +from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci from paddle.fluid.tests.unittests.test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs, TestLeakyRelu from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd @@ -111,6 +111,7 @@ class TestMKLDNNAbsDim2(TestAbs): ['X'], 'Out', max_relative_error=0.007, check_dygraph=False) +@skip_check_grad_ci(reason="Use float32 in mkldnn relu op.") class TestMKLDNNReluDim4(TestRelu): def setUp(self): super(TestMKLDNNReluDim4, self).setUp() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py index 19e1c8cccbe..6d8c3569d7f 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py @@ -17,10 +17,11 @@ from __future__ import print_function import unittest, sys sys.path.append("../") import numpy as np -from op_test import OpTest +from op_test import OpTest, skip_check_grad_ci from test_activation_op import TestAbs, TestGelu, TestSigmoid, TestSquare, TestRelu, TestTanh +@skip_check_grad_ci(reason="Use float32 in ngraph relu op.") class TestNGRAPHReluDim4(TestRelu): def setUp(self): super(TestNGRAPHReluDim4, self).setUp() diff --git a/python/paddle/fluid/tests/unittests/test_maxout_op.py b/python/paddle/fluid/tests/unittests/test_maxout_op.py index 77fd97ba554..529c86a85a8 100644 --- a/python/paddle/fluid/tests/unittests/test_maxout_op.py +++ b/python/paddle/fluid/tests/unittests/test_maxout_op.py @@ -34,14 +34,13 @@ class TestMaxOutOp(OpTest): def setUp(self): self.op_type = "maxout" self.init_test_case() - input = np.random.random(self.shape).astype("float32") - output = self.MaxOut_forward_naive(input, self.groups, - self.axis).astype("float32") + input = np.random.random(self.shape) + output = self.MaxOut_forward_naive(input, self.groups, self.axis) self.inputs = {'X': input} self.attrs = {'groups': self.groups, 'axis': self.axis} - self.outputs = {'Out': output.astype('float32')} + self.outputs = {'Out': output} def test_check_output(self): self.check_output() diff --git a/python/paddle/fluid/tests/unittests/test_prelu_op.py b/python/paddle/fluid/tests/unittests/test_prelu_op.py index a30db2c4243..14c5840ef80 100644 --- a/python/paddle/fluid/tests/unittests/test_prelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_prelu_op.py @@ -26,21 +26,19 @@ class PReluTest(OpTest): self.init_attr() self.op_type = "prelu" - x_np = np.random.uniform(-1, 1, self.x_shape).astype("float32") + x_np = np.random.uniform(-1, 1, self.x_shape) # Since zero point in prelu is not differentiable, avoid randomize # zero. x_np[np.abs(x_np) < 0.005] = 0.02 if self.attrs == {'mode': "all"}: - alpha_np = np.random.rand(1).astype("float32") - self.inputs = {'X': x_np, 'Alpha': alpha_np} + alpha_np = np.random.uniform(-1, -0.5, (1)) elif self.attrs == {'mode': "channel"}: - alpha_np = np.random.rand(1, x_np.shape[1], 1, 1).astype("float32") - self.inputs = {'X': x_np, 'Alpha': alpha_np} + alpha_np = np.random.uniform(-1, -0.5, (1, x_np.shape[1], 1, 1)) else: - alpha_np = np.random.rand(1, x_np.shape[1], x_np.shape[2], - x_np.shape[3]).astype("float32") - self.inputs = {'X': x_np, 'Alpha': alpha_np} + alpha_np = np.random.uniform(-1, -0.5, \ + (1, x_np.shape[1], x_np.shape[2], x_np.shape[3])) + self.inputs = {'X': x_np, 'Alpha': alpha_np} out_np = np.maximum(self.inputs['X'], 0.) out_np = out_np + np.minimum(self.inputs['X'], diff --git a/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py b/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py index 771d7ac6c49..a4470bf063a 100644 --- a/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py @@ -46,7 +46,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [ 'matmul', \ 'max_pool2d_with_index', \ 'max_pool3d_with_index', \ - 'maxout', \ 'minus', \ 'modified_huber_loss', \ 'mul', \ @@ -56,12 +55,10 @@ NO_FP64_CHECK_GRAD_OP_LIST = [ 'pad_constant_like', \ 'pool2d', \ 'pool3d', \ - 'prelu', \ 'prroi_pool', \ 'rank_loss', \ 'reduce_max', \ 'reduce_min', \ - 'relu', \ 'reshape2', \ 'roi_perspective_transform', \ 'row_conv', \ -- GitLab