提交 c6f888e5 编写于 作者: Z zhupengyang 提交者: Tao Luo

update unittest accuracy to float64 for relu, prelu, maxout (#22273)

上级 0d8b222b
...@@ -118,7 +118,9 @@ REGISTER_OPERATOR( ...@@ -118,7 +118,9 @@ REGISTER_OPERATOR(
paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>); paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>);
REGISTER_OPERATOR(maxout_grad, ops::MaxOutOpGrad); REGISTER_OPERATOR(maxout_grad, ops::MaxOutOpGrad);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
maxout, ops::MaxOutKernel<paddle::platform::CPUDeviceContext, float>); maxout, ops::MaxOutKernel<paddle::platform::CPUDeviceContext, float>,
ops::MaxOutKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
maxout_grad, maxout_grad,
ops::MaxOutGradKernel<paddle::platform::CPUDeviceContext, float>); ops::MaxOutGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::MaxOutGradKernel<paddle::platform::CPUDeviceContext, double>);
...@@ -161,7 +161,8 @@ REGISTER_OPERATOR(prelu, ops::PReluOp, ops::PReluOpMaker, ...@@ -161,7 +161,8 @@ REGISTER_OPERATOR(prelu, ops::PReluOp, ops::PReluOpMaker,
ops::PReluGradOpMaker<paddle::imperative::OpBase>); ops::PReluGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(prelu_grad, ops::PReluGradOp); REGISTER_OPERATOR(prelu_grad, ops::PReluGradOp);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
prelu, ops::PReluKernel<paddle::platform::CPUDeviceContext, float>); prelu, ops::PReluKernel<paddle::platform::CPUDeviceContext, float>,
ops::PReluKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
prelu_grad, prelu_grad, ops::PReluGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::PReluGradKernel<paddle::platform::CPUDeviceContext, float>); ops::PReluGradKernel<paddle::platform::CPUDeviceContext, double>);
...@@ -17,7 +17,7 @@ from __future__ import print_function ...@@ -17,7 +17,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
from paddle.fluid.tests.unittests.test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs, TestLeakyRelu from paddle.fluid.tests.unittests.test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs, TestLeakyRelu
from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd
...@@ -111,6 +111,7 @@ class TestMKLDNNAbsDim2(TestAbs): ...@@ -111,6 +111,7 @@ class TestMKLDNNAbsDim2(TestAbs):
['X'], 'Out', max_relative_error=0.007, check_dygraph=False) ['X'], 'Out', max_relative_error=0.007, check_dygraph=False)
@skip_check_grad_ci(reason="Use float32 in mkldnn relu op.")
class TestMKLDNNReluDim4(TestRelu): class TestMKLDNNReluDim4(TestRelu):
def setUp(self): def setUp(self):
super(TestMKLDNNReluDim4, self).setUp() super(TestMKLDNNReluDim4, self).setUp()
......
...@@ -17,10 +17,11 @@ from __future__ import print_function ...@@ -17,10 +17,11 @@ from __future__ import print_function
import unittest, sys import unittest, sys
sys.path.append("../") sys.path.append("../")
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest, skip_check_grad_ci
from test_activation_op import TestAbs, TestGelu, TestSigmoid, TestSquare, TestRelu, TestTanh from test_activation_op import TestAbs, TestGelu, TestSigmoid, TestSquare, TestRelu, TestTanh
@skip_check_grad_ci(reason="Use float32 in ngraph relu op.")
class TestNGRAPHReluDim4(TestRelu): class TestNGRAPHReluDim4(TestRelu):
def setUp(self): def setUp(self):
super(TestNGRAPHReluDim4, self).setUp() super(TestNGRAPHReluDim4, self).setUp()
......
...@@ -34,14 +34,13 @@ class TestMaxOutOp(OpTest): ...@@ -34,14 +34,13 @@ class TestMaxOutOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "maxout" self.op_type = "maxout"
self.init_test_case() self.init_test_case()
input = np.random.random(self.shape).astype("float32") input = np.random.random(self.shape)
output = self.MaxOut_forward_naive(input, self.groups, output = self.MaxOut_forward_naive(input, self.groups, self.axis)
self.axis).astype("float32")
self.inputs = {'X': input} self.inputs = {'X': input}
self.attrs = {'groups': self.groups, 'axis': self.axis} self.attrs = {'groups': self.groups, 'axis': self.axis}
self.outputs = {'Out': output.astype('float32')} self.outputs = {'Out': output}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
......
...@@ -26,21 +26,19 @@ class PReluTest(OpTest): ...@@ -26,21 +26,19 @@ class PReluTest(OpTest):
self.init_attr() self.init_attr()
self.op_type = "prelu" self.op_type = "prelu"
x_np = np.random.uniform(-1, 1, self.x_shape).astype("float32") x_np = np.random.uniform(-1, 1, self.x_shape)
# Since zero point in prelu is not differentiable, avoid randomize # Since zero point in prelu is not differentiable, avoid randomize
# zero. # zero.
x_np[np.abs(x_np) < 0.005] = 0.02 x_np[np.abs(x_np) < 0.005] = 0.02
if self.attrs == {'mode': "all"}: if self.attrs == {'mode': "all"}:
alpha_np = np.random.rand(1).astype("float32") alpha_np = np.random.uniform(-1, -0.5, (1))
self.inputs = {'X': x_np, 'Alpha': alpha_np}
elif self.attrs == {'mode': "channel"}: elif self.attrs == {'mode': "channel"}:
alpha_np = np.random.rand(1, x_np.shape[1], 1, 1).astype("float32") alpha_np = np.random.uniform(-1, -0.5, (1, x_np.shape[1], 1, 1))
self.inputs = {'X': x_np, 'Alpha': alpha_np}
else: else:
alpha_np = np.random.rand(1, x_np.shape[1], x_np.shape[2], alpha_np = np.random.uniform(-1, -0.5, \
x_np.shape[3]).astype("float32") (1, x_np.shape[1], x_np.shape[2], x_np.shape[3]))
self.inputs = {'X': x_np, 'Alpha': alpha_np} self.inputs = {'X': x_np, 'Alpha': alpha_np}
out_np = np.maximum(self.inputs['X'], 0.) out_np = np.maximum(self.inputs['X'], 0.)
out_np = out_np + np.minimum(self.inputs['X'], out_np = out_np + np.minimum(self.inputs['X'],
......
...@@ -46,7 +46,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [ ...@@ -46,7 +46,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [
'matmul', \ 'matmul', \
'max_pool2d_with_index', \ 'max_pool2d_with_index', \
'max_pool3d_with_index', \ 'max_pool3d_with_index', \
'maxout', \
'minus', \ 'minus', \
'modified_huber_loss', \ 'modified_huber_loss', \
'mul', \ 'mul', \
...@@ -56,12 +55,10 @@ NO_FP64_CHECK_GRAD_OP_LIST = [ ...@@ -56,12 +55,10 @@ NO_FP64_CHECK_GRAD_OP_LIST = [
'pad_constant_like', \ 'pad_constant_like', \
'pool2d', \ 'pool2d', \
'pool3d', \ 'pool3d', \
'prelu', \
'prroi_pool', \ 'prroi_pool', \
'rank_loss', \ 'rank_loss', \
'reduce_max', \ 'reduce_max', \
'reduce_min', \ 'reduce_min', \
'relu', \
'reshape2', \ 'reshape2', \
'roi_perspective_transform', \ 'roi_perspective_transform', \
'row_conv', \ 'row_conv', \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册