未验证 提交 65a02fc1 编写于 作者: T Tao Luo 提交者: GitHub

add input type and dtype check for softmax_op (#19975)

* add input type and dtype check for softmax_op

test=develop

* refine error message

test=develop
上级 e89b1288
...@@ -34,6 +34,7 @@ from .. import unique_name ...@@ -34,6 +34,7 @@ from .. import unique_name
from functools import reduce from functools import reduce
from .. import core from .. import core
from ..dygraph import layers from ..dygraph import layers
from ..data_feeder import convert_dtype
__all__ = [ __all__ = [
'fc', 'fc',
...@@ -2249,6 +2250,15 @@ def softmax(input, use_cudnn=False, name=None, axis=-1): ...@@ -2249,6 +2250,15 @@ def softmax(input, use_cudnn=False, name=None, axis=-1):
""" """
helper = LayerHelper('softmax', **locals()) helper = LayerHelper('softmax', **locals())
if not isinstance(input, Variable):
raise TypeError(
"The type of 'input' in softmax must be Variable, but received %s" %
(type(input)))
if convert_dtype(input.dtype) not in ['float32', 'float64']:
raise TypeError(
"The data type of 'input' in softmax must be float32 or float64, but received %s."
% (convert_dtype(input.dtype)))
dtype = helper.input_dtype() dtype = helper.input_dtype()
softmax_out = helper.create_variable_for_type_inference(dtype) softmax_out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
......
...@@ -18,6 +18,8 @@ import unittest ...@@ -18,6 +18,8 @@ import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
def stable_softmax(x): def stable_softmax(x):
...@@ -74,6 +76,18 @@ class TestSoftmaxOp(OpTest): ...@@ -74,6 +76,18 @@ class TestSoftmaxOp(OpTest):
self.check_grad(["X"], "Out", max_relative_error=0.01) self.check_grad(["X"], "Out", max_relative_error=0.01)
class TestSoftmaxOpError(OpTest):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of softmax_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.softmax, x1)
# The input dtype of softmax_op must be float32 or float64.
x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
self.assertRaises(TypeError, fluid.layers.softmax, x2)
class TestSoftmaxOp2(TestSoftmaxOp): class TestSoftmaxOp2(TestSoftmaxOp):
def get_x_shape(self): def get_x_shape(self):
return [2, 3, 4, 5] return [2, 3, 4, 5]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册