未验证 提交 cdbe5707 编写于 作者: M mamingjie-China 提交者: GitHub

API(argsort, argmax, argmin, cast, diag) error message enhancement

* API(argsort, argmax, argmin, cast, diag) error message enhancement, test=develop
上级 b8d07501
...@@ -136,14 +136,21 @@ class ArgMinMaxOp : public framework::OperatorWithKernel { ...@@ -136,14 +136,21 @@ class ArgMinMaxOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "arg_min_max");
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null"); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "arg_min_max");
const auto& x_dims = ctx->GetInputDim("X"); const auto& x_dims = ctx->GetInputDim("X");
int64_t axis = ctx->Attrs().Get<int64_t>("axis"); int64_t axis = ctx->Attrs().Get<int64_t>("axis");
bool keepdims = ctx->Attrs().Get<bool>("keepdims"); bool keepdims = ctx->Attrs().Get<bool>("keepdims");
PADDLE_ENFORCE(axis >= -x_dims.size() && axis < x_dims.size(), PADDLE_ENFORCE_GE(axis, -x_dims.size(),
"'axis' must be inside [-Rank(X), Rank(X))"); platform::errors::InvalidArgument(
"'axis'(%d) must be greater than or equal to"
" -Rank(X)(%d).",
axis, -x_dims.size()));
PADDLE_ENFORCE_LT(
axis, x_dims.size(),
platform::errors::InvalidArgument(
"'axis'(%d) must be less than Rank(X)(%d).", axis, x_dims.size()));
auto x_rank = x_dims.size(); auto x_rank = x_dims.size();
if (axis < 0) axis += x_rank; if (axis < 0) axis += x_rank;
......
...@@ -23,25 +23,23 @@ class ArgsortOp : public framework::OperatorWithKernel { ...@@ -23,25 +23,23 @@ class ArgsortOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "argsort");
"Input(X) of ArgsortOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "argsort");
PADDLE_ENFORCE(ctx->HasOutput("Out"), OP_INOUT_CHECK(ctx->HasOutput("Indices"), "Output", "Indices", "argsort");
"Output(Out) of ArgsortOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Indices"),
"Output(Indices) of ArgsortOp should not be null.");
auto in_dims = ctx->GetInputDim("X"); auto in_dims = ctx->GetInputDim("X");
int axis = ctx->Attrs().Get<int>("axis"); int axis = ctx->Attrs().Get<int>("axis");
auto num_dims = in_dims.size(); auto num_dims = in_dims.size();
PADDLE_ENFORCE(axis < num_dims, PADDLE_ENFORCE_GE(axis, -num_dims,
"Attr(axis) %d of ArgsortOp is out of bounds for Input(X)'s " platform::errors::InvalidArgument(
"rank %d.", "'axis'(%d) must be greater than or equal to"
axis, num_dims); " -num_dims(%d).",
PADDLE_ENFORCE(axis >= -num_dims, axis, -num_dims));
"Attr(axis) %d of ArgsortOp must be not less than " PADDLE_ENFORCE_LT(
"-rank(Input(X)) (%d).", axis, num_dims,
axis, num_dims); platform::errors::InvalidArgument(
"'axis'(%d) must be less than num_dims(%d).", axis, num_dims));
ctx->ShareDim("X", "Out"); ctx->ShareDim("X", "Out");
ctx->ShareDim("X", "Indices"); ctx->ShareDim("X", "Indices");
......
...@@ -59,12 +59,8 @@ class CastOp : public framework::OperatorWithKernel { ...@@ -59,12 +59,8 @@ class CastOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext *context) const override { void InferShape(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE_EQ( OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", "cast");
context->HasInput("X"), true, OP_INOUT_CHECK(context->HasOutput("Out"), "Output", "Out", "cast");
platform::errors::NotFound("The input(X) of cast op must be set"));
PADDLE_ENFORCE_EQ(
context->HasOutput("Out"), true,
platform::errors::NotFound("The output of cast op must be set"));
context->SetOutputDim("Out", context->GetInputDim("X")); context->SetOutputDim("Out", context->GetInputDim("X"));
context->ShareLoD("X", "Out"); context->ShareLoD("X", "Out");
} }
......
...@@ -22,15 +22,16 @@ class DiagOp : public framework::OperatorWithKernel { ...@@ -22,15 +22,16 @@ class DiagOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Diagonal"), OP_INOUT_CHECK(ctx->HasInput("Diagonal"), "Input", "Diagonal", "diag");
"Input(Diagonal) of DiagOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "diag");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of DiagOp should not be null.");
auto s_dims = ctx->GetInputDim("Diagonal"); auto s_dims = ctx->GetInputDim("Diagonal");
PADDLE_ENFORCE(s_dims.size() == 1,
"The rank of Input(Diagonal) should only be 1."); PADDLE_ENFORCE_EQ(
s_dims.size(), 1UL,
platform::errors::InvalidArgument(
"The dimension of 'diagonal' must be 1, but now it is %d.",
s_dims.size()));
ctx->SetOutputDim("Out", {s_dims[0], s_dims[0]}); ctx->SetOutputDim("Out", {s_dims[0], s_dims[0]});
} }
......
...@@ -196,11 +196,16 @@ def cast(x, dtype): ...@@ -196,11 +196,16 @@ def cast(x, dtype):
# [[ 1 -2] # [[ 1 -2]
# [ 0 4]] int32 # [ 0 4]] int32
""" """
helper = LayerHelper('cast', **locals())
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', x, 'x',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], ['bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
'cast') 'cast')
check_dtype(dtype, 'dtype', [
'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int64',
'uint8'
], 'cast')
helper = LayerHelper('cast', **locals())
out = helper.create_variable_for_type_inference(dtype=dtype) out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op( helper.append_op(
type='cast', type='cast',
...@@ -768,6 +773,9 @@ def argmin(x, axis=0): ...@@ -768,6 +773,9 @@ def argmin(x, axis=0):
# [[0 0 2] # [[0 0 2]
# [1 0 2]] # [1 0 2]]
""" """
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
'argmin')
helper = LayerHelper("arg_min", **locals()) helper = LayerHelper("arg_min", **locals())
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op( helper.append_op(
...@@ -828,6 +836,9 @@ def argmax(x, axis=0): ...@@ -828,6 +836,9 @@ def argmax(x, axis=0):
# [[2 3 1] # [[2 3 1]
# [0 3 1]] # [0 3 1]]
""" """
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
'argmax')
helper = LayerHelper("arg_max", **locals()) helper = LayerHelper("arg_max", **locals())
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op( helper.append_op(
...@@ -909,6 +920,9 @@ def argsort(input, axis=-1, descending=False, name=None): ...@@ -909,6 +920,9 @@ def argsort(input, axis=-1, descending=False, name=None):
# [4. 7. 4. 6.] # [4. 7. 4. 6.]
# [5. 7. 7. 9.]]] # [5. 7. 7. 9.]]]
""" """
check_variable_and_dtype(
input, 'input',
['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], 'argsort')
helper = LayerHelper("argsort", **locals()) helper = LayerHelper("argsort", **locals())
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(
dtype=input.dtype, stop_gradient=True) dtype=input.dtype, stop_gradient=True)
...@@ -1106,6 +1120,7 @@ def has_inf(x): ...@@ -1106,6 +1120,7 @@ def has_inf(x):
res = fluid.layers.has_inf(data) res = fluid.layers.has_inf(data)
""" """
# check_type(x, 'x', (Variable), 'has_inf')
helper = LayerHelper("isinf", **locals()) helper = LayerHelper("isinf", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
...@@ -1130,6 +1145,7 @@ def has_nan(x): ...@@ -1130,6 +1145,7 @@ def has_nan(x):
res = fluid.layers.has_nan(data) res = fluid.layers.has_nan(data)
""" """
# check_type(x, 'x', (Variable), 'has_nan')
helper = LayerHelper("isnan", **locals()) helper = LayerHelper("isnan", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
...@@ -1333,7 +1349,9 @@ def diag(diagonal): ...@@ -1333,7 +1349,9 @@ def diag(diagonal):
# diagonal.shape=(3,) data.shape=(3, 3) # diagonal.shape=(3,) data.shape=(3, 3)
""" """
check_type(diagonal, 'diagonal', (Variable, numpy.ndarray), 'diag')
check_dtype(diagonal.dtype, 'diagonal',
['float32', 'float64', 'int32', 'int64'], 'diag')
helper = LayerHelper("diag", **locals()) helper = LayerHelper("diag", **locals())
if not isinstance(diagonal, Variable): if not isinstance(diagonal, Variable):
......
...@@ -173,7 +173,7 @@ class FusionGroupPassCastTest(FusionGroupPassTest): ...@@ -173,7 +173,7 @@ class FusionGroupPassCastTest(FusionGroupPassTest):
self.feed_vars = self._prepare_feed_vars([2, 2], dtype, 2) self.feed_vars = self._prepare_feed_vars([2, 2], dtype, 2)
tmp_0 = layers.elementwise_add(self.feed_vars[0], self.feed_vars[1]) tmp_0 = layers.elementwise_add(self.feed_vars[0], self.feed_vars[1])
tmp_1 = layers.cast(tmp_0, dtype="double") tmp_1 = layers.cast(tmp_0, dtype="float64")
tmp_2 = layers.cast(tmp_1, dtype="float32") tmp_2 = layers.cast(tmp_1, dtype="float32")
self.append_gradients(tmp_2) self.append_gradients(tmp_2)
......
...@@ -20,6 +20,7 @@ from op_test import OpTest ...@@ -20,6 +20,7 @@ from op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
class BaseTestCase(OpTest): class BaseTestCase(OpTest):
...@@ -285,5 +286,22 @@ class APT_ArgMaxTest(unittest.TestCase): ...@@ -285,5 +286,22 @@ class APT_ArgMaxTest(unittest.TestCase):
self.assertRaises(TypeError, test_dtype2) self.assertRaises(TypeError, test_dtype2)
class TestArgMinMaxOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
def test_argmax_x_type():
x1 = [1, 2, 3]
output = fluid.layers.argmax(x=x1)
self.assertRaises(TypeError, test_argmax_x_type)
def test_argmin_x_type():
x2 = [1, 2, 3]
output = fluid.layers.argmin(x=x2)
self.assertRaises(TypeError, test_argmin_x_type)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -351,5 +351,28 @@ class TestSortOnGPU(TestSortOnCPU): ...@@ -351,5 +351,28 @@ class TestSortOnGPU(TestSortOnCPU):
self.place = core.CPUPlace() self.place = core.CPUPlace()
class TestArgsortErrorOnCPU(unittest.TestCase):
def init_place(self):
self.place = core.CPUPlace()
def test_error(self):
self.init_place()
with fluid.program_guard(fluid.Program()):
def test_input_type():
x = [1]
output = fluid.layers.argsort(input=x)
self.assertRaises(TypeError, test_input_type)
class TestArgsortErrorOnGPU(TestArgsortErrorOnCPU):
def init_place(self):
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -78,10 +78,14 @@ class TestCastOpError(unittest.TestCase): ...@@ -78,10 +78,14 @@ class TestCastOpError(unittest.TestCase):
np.array([[-1]]), [[1]], fluid.CPUPlace()) np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.cast, x1, 'int32') self.assertRaises(TypeError, fluid.layers.cast, x1, 'int32')
# The input dtype of cast_op must be bool, float16, float32, float64, int32, int64, uint8. # The input dtype of cast_op must be bool, float16, float32, float64, int32, int64, uint8.
x2 = fluid.layers.data(name='x2', shape=[4], dtype='int8') x2 = fluid.layers.data(name='x2', shape=[4], dtype='int16')
self.assertRaises(TypeError, fluid.layers.cast, x2, 'int32') self.assertRaises(TypeError, fluid.layers.cast, x2, 'int32')
x3 = fluid.layers.data(name='x3', shape=[4], dtype='int16')
self.assertRaises(TypeError, fluid.layers.cast, x3, 'int32') def test_dtype_type():
x4 = fluid.layers.data(name='x4', shape=[4], dtype='int32')
output = fluid.layers.cast(x=x4, dtype='int16')
self.assertRaises(TypeError, test_dtype_type)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -17,6 +17,9 @@ from __future__ import print_function ...@@ -17,6 +17,9 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import Program, program_guard
class TestDiagOp(OpTest): class TestDiagOp(OpTest):
...@@ -39,5 +42,16 @@ class TestDiagOpCase1(TestDiagOp): ...@@ -39,5 +42,16 @@ class TestDiagOpCase1(TestDiagOp):
self.case = np.array([3], dtype='int32') self.case = np.array([3], dtype='int32')
class TestDiagError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
def test_diag_type():
x = [1, 2, 3]
output = fluid.layers.diag(diag=x)
self.assertRaises(TypeError, test_diag_type)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册