提交 c6f888e5 编写于 作者: Z zhupengyang 提交者: Tao Luo

update unittest accuracy to float64 for relu, prelu, maxout (#22273)

上级 0d8b222b
......@@ -118,7 +118,9 @@ REGISTER_OPERATOR(
paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>);
REGISTER_OPERATOR(maxout_grad, ops::MaxOutOpGrad);
REGISTER_OP_CPU_KERNEL(
maxout, ops::MaxOutKernel<paddle::platform::CPUDeviceContext, float>);
maxout, ops::MaxOutKernel<paddle::platform::CPUDeviceContext, float>,
ops::MaxOutKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
maxout_grad,
ops::MaxOutGradKernel<paddle::platform::CPUDeviceContext, float>);
ops::MaxOutGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::MaxOutGradKernel<paddle::platform::CPUDeviceContext, double>);
......@@ -161,7 +161,8 @@ REGISTER_OPERATOR(prelu, ops::PReluOp, ops::PReluOpMaker,
ops::PReluGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(prelu_grad, ops::PReluGradOp);
REGISTER_OP_CPU_KERNEL(
prelu, ops::PReluKernel<paddle::platform::CPUDeviceContext, float>);
prelu, ops::PReluKernel<paddle::platform::CPUDeviceContext, float>,
ops::PReluKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
prelu_grad,
ops::PReluGradKernel<paddle::platform::CPUDeviceContext, float>);
prelu_grad, ops::PReluGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::PReluGradKernel<paddle::platform::CPUDeviceContext, double>);
......@@ -17,7 +17,7 @@ from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
from paddle.fluid.tests.unittests.test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs, TestLeakyRelu
from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd
......@@ -111,6 +111,7 @@ class TestMKLDNNAbsDim2(TestAbs):
['X'], 'Out', max_relative_error=0.007, check_dygraph=False)
@skip_check_grad_ci(reason="Use float32 in mkldnn relu op.")
class TestMKLDNNReluDim4(TestRelu):
def setUp(self):
super(TestMKLDNNReluDim4, self).setUp()
......
......@@ -17,10 +17,11 @@ from __future__ import print_function
import unittest, sys
sys.path.append("../")
import numpy as np
from op_test import OpTest
from op_test import OpTest, skip_check_grad_ci
from test_activation_op import TestAbs, TestGelu, TestSigmoid, TestSquare, TestRelu, TestTanh
@skip_check_grad_ci(reason="Use float32 in ngraph relu op.")
class TestNGRAPHReluDim4(TestRelu):
def setUp(self):
super(TestNGRAPHReluDim4, self).setUp()
......
......@@ -34,14 +34,13 @@ class TestMaxOutOp(OpTest):
def setUp(self):
self.op_type = "maxout"
self.init_test_case()
input = np.random.random(self.shape).astype("float32")
output = self.MaxOut_forward_naive(input, self.groups,
self.axis).astype("float32")
input = np.random.random(self.shape)
output = self.MaxOut_forward_naive(input, self.groups, self.axis)
self.inputs = {'X': input}
self.attrs = {'groups': self.groups, 'axis': self.axis}
self.outputs = {'Out': output.astype('float32')}
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
......
......@@ -26,21 +26,19 @@ class PReluTest(OpTest):
self.init_attr()
self.op_type = "prelu"
x_np = np.random.uniform(-1, 1, self.x_shape).astype("float32")
x_np = np.random.uniform(-1, 1, self.x_shape)
# Since zero point in prelu is not differentiable, avoid randomize
# zero.
x_np[np.abs(x_np) < 0.005] = 0.02
if self.attrs == {'mode': "all"}:
alpha_np = np.random.rand(1).astype("float32")
self.inputs = {'X': x_np, 'Alpha': alpha_np}
alpha_np = np.random.uniform(-1, -0.5, (1))
elif self.attrs == {'mode': "channel"}:
alpha_np = np.random.rand(1, x_np.shape[1], 1, 1).astype("float32")
self.inputs = {'X': x_np, 'Alpha': alpha_np}
alpha_np = np.random.uniform(-1, -0.5, (1, x_np.shape[1], 1, 1))
else:
alpha_np = np.random.rand(1, x_np.shape[1], x_np.shape[2],
x_np.shape[3]).astype("float32")
self.inputs = {'X': x_np, 'Alpha': alpha_np}
alpha_np = np.random.uniform(-1, -0.5, \
(1, x_np.shape[1], x_np.shape[2], x_np.shape[3]))
self.inputs = {'X': x_np, 'Alpha': alpha_np}
out_np = np.maximum(self.inputs['X'], 0.)
out_np = out_np + np.minimum(self.inputs['X'],
......
......@@ -46,7 +46,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [
'matmul', \
'max_pool2d_with_index', \
'max_pool3d_with_index', \
'maxout', \
'minus', \
'modified_huber_loss', \
'mul', \
......@@ -56,12 +55,10 @@ NO_FP64_CHECK_GRAD_OP_LIST = [
'pad_constant_like', \
'pool2d', \
'pool3d', \
'prelu', \
'prroi_pool', \
'rank_loss', \
'reduce_max', \
'reduce_min', \
'relu', \
'reshape2', \
'roi_perspective_transform', \
'row_conv', \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册