提交 318d5bba 编写于 作者: G GaoWei8 提交者: Tao Luo

add input type and dtype check for elu_op (#20106)

* elu input check
test=develop

* test=develop
上级 98acfe97
......@@ -10883,6 +10883,17 @@ def elu(x, alpha=1.0, name=None):
y = fluid.layers.elu(x, alpha=0.2)
"""
helper = LayerHelper('elu', **locals())
if not isinstance(x, Variable):
raise TypeError(
"The type of 'x' in elu must be Variable, but received %s" %
(type(x)))
if convert_dtype(x.dtype) in ['float16']:
warnings.warn(
"The data type of 'x' in elu only support float16 in GPU now.")
if convert_dtype(x.dtype) not in ['float16', 'float32', 'float64']:
raise TypeError(
"The data type of 'x' in elu must be float16 (only support on GPU), float32 or float64, but received %s."
% (convert_dtype(x.dtype)))
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='elu',
......
......@@ -19,6 +19,8 @@ import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from scipy.special import expit, erf
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
class TestActivation(OpTest):
......@@ -519,6 +521,18 @@ class TestELU(TestActivation):
self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestELUOpError(OpTest):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of elu_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.elu, x1)
# The input dtype of elu_op must be float16 float32 or float64.
x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
self.assertRaises(TypeError, fluid.layers.elu, x2)
class TestReciprocal(TestActivation):
def setUp(self):
self.op_type = "reciprocal"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册