diff --git a/paddle/fluid/operators/maxout_op.cc b/paddle/fluid/operators/maxout_op.cc index 33b555a0c75c025850fafd19698aa6c70da1cf64..cbe0724dbd6a5f4ea0b07de70edf0f8596dfa8ed 100644 --- a/paddle/fluid/operators/maxout_op.cc +++ b/paddle/fluid/operators/maxout_op.cc @@ -118,7 +118,9 @@ REGISTER_OPERATOR( paddle::framework::DefaultGradOpMaker); REGISTER_OPERATOR(maxout_grad, ops::MaxOutOpGrad); REGISTER_OP_CPU_KERNEL( - maxout, ops::MaxOutKernel); + maxout, ops::MaxOutKernel, + ops::MaxOutKernel); REGISTER_OP_CPU_KERNEL( maxout_grad, - ops::MaxOutGradKernel); + ops::MaxOutGradKernel, + ops::MaxOutGradKernel); diff --git a/paddle/fluid/operators/prelu_op.cc b/paddle/fluid/operators/prelu_op.cc index 7127177e35b60c50802a9ea9bb6132c78eed5f8e..aff661f5d5d90d6db2495e661558faa589879320 100644 --- a/paddle/fluid/operators/prelu_op.cc +++ b/paddle/fluid/operators/prelu_op.cc @@ -161,7 +161,8 @@ REGISTER_OPERATOR(prelu, ops::PReluOp, ops::PReluOpMaker, ops::PReluGradOpMaker); REGISTER_OPERATOR(prelu_grad, ops::PReluGradOp); REGISTER_OP_CPU_KERNEL( - prelu, ops::PReluKernel); + prelu, ops::PReluKernel, + ops::PReluKernel); REGISTER_OP_CPU_KERNEL( - prelu_grad, - ops::PReluGradKernel); + prelu_grad, ops::PReluGradKernel, + ops::PReluGradKernel); diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py index c988e6275ffd9ec035d1f6023c330bcf1d4307fc..dcdbb4619b5fcc204fd0d6761a1648eb0c4c4571 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py @@ -17,7 +17,7 @@ from __future__ import print_function import unittest import numpy as np import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest +from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci from paddle.fluid.tests.unittests.test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs, TestLeakyRelu from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd @@ -111,6 +111,7 @@ class TestMKLDNNAbsDim2(TestAbs): ['X'], 'Out', max_relative_error=0.007, check_dygraph=False) +@skip_check_grad_ci(reason="Use float32 in mkldnn relu op.") class TestMKLDNNReluDim4(TestRelu): def setUp(self): super(TestMKLDNNReluDim4, self).setUp() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py index 19e1c8cccbe1cb9a75e68c2c88f8c3d04520063b..6d8c3569d7fab13b666158784d52c5e30486c998 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py @@ -17,10 +17,11 @@ from __future__ import print_function import unittest, sys sys.path.append("../") import numpy as np -from op_test import OpTest +from op_test import OpTest, skip_check_grad_ci from test_activation_op import TestAbs, TestGelu, TestSigmoid, TestSquare, TestRelu, TestTanh +@skip_check_grad_ci(reason="Use float32 in ngraph relu op.") class TestNGRAPHReluDim4(TestRelu): def setUp(self): super(TestNGRAPHReluDim4, self).setUp() diff --git a/python/paddle/fluid/tests/unittests/test_maxout_op.py b/python/paddle/fluid/tests/unittests/test_maxout_op.py index 77fd97ba55402ad8ee1489e4ed466cc99dad136c..529c86a85a8d4d04f4fe783c93d581937e2bdead 100644 --- a/python/paddle/fluid/tests/unittests/test_maxout_op.py +++ b/python/paddle/fluid/tests/unittests/test_maxout_op.py @@ -34,14 +34,13 @@ class TestMaxOutOp(OpTest): def setUp(self): self.op_type = "maxout" self.init_test_case() - input = np.random.random(self.shape).astype("float32") - output = self.MaxOut_forward_naive(input, self.groups, - self.axis).astype("float32") + input = np.random.random(self.shape) + output = self.MaxOut_forward_naive(input, self.groups, self.axis) self.inputs = {'X': input} self.attrs = {'groups': self.groups, 'axis': self.axis} - self.outputs = {'Out': output.astype('float32')} + self.outputs = {'Out': output} def test_check_output(self): self.check_output() diff --git a/python/paddle/fluid/tests/unittests/test_prelu_op.py b/python/paddle/fluid/tests/unittests/test_prelu_op.py index a30db2c42430c4b9ee36baabec6ce3cbf3dd7ba3..14c5840ef80abc30d2b7c8cfe348ecb5873783ec 100644 --- a/python/paddle/fluid/tests/unittests/test_prelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_prelu_op.py @@ -26,21 +26,19 @@ class PReluTest(OpTest): self.init_attr() self.op_type = "prelu" - x_np = np.random.uniform(-1, 1, self.x_shape).astype("float32") + x_np = np.random.uniform(-1, 1, self.x_shape) # Since zero point in prelu is not differentiable, avoid randomize # zero. x_np[np.abs(x_np) < 0.005] = 0.02 if self.attrs == {'mode': "all"}: - alpha_np = np.random.rand(1).astype("float32") - self.inputs = {'X': x_np, 'Alpha': alpha_np} + alpha_np = np.random.uniform(-1, -0.5, (1)) elif self.attrs == {'mode': "channel"}: - alpha_np = np.random.rand(1, x_np.shape[1], 1, 1).astype("float32") - self.inputs = {'X': x_np, 'Alpha': alpha_np} + alpha_np = np.random.uniform(-1, -0.5, (1, x_np.shape[1], 1, 1)) else: - alpha_np = np.random.rand(1, x_np.shape[1], x_np.shape[2], - x_np.shape[3]).astype("float32") - self.inputs = {'X': x_np, 'Alpha': alpha_np} + alpha_np = np.random.uniform(-1, -0.5, \ + (1, x_np.shape[1], x_np.shape[2], x_np.shape[3])) + self.inputs = {'X': x_np, 'Alpha': alpha_np} out_np = np.maximum(self.inputs['X'], 0.) out_np = out_np + np.minimum(self.inputs['X'], diff --git a/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py b/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py index 771d7ac6c49903ff0684c3b6de9bfe036fc84d6c..a4470bf063a861a71e17448e74513f639d2a0388 100644 --- a/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py @@ -46,7 +46,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [ 'matmul', \ 'max_pool2d_with_index', \ 'max_pool3d_with_index', \ - 'maxout', \ 'minus', \ 'modified_huber_loss', \ 'mul', \ @@ -56,12 +55,10 @@ NO_FP64_CHECK_GRAD_OP_LIST = [ 'pad_constant_like', \ 'pool2d', \ 'pool3d', \ - 'prelu', \ 'prroi_pool', \ 'rank_loss', \ 'reduce_max', \ 'reduce_min', \ - 'relu', \ 'reshape2', \ 'roi_perspective_transform', \ 'row_conv', \