未验证 提交 17588bbe 编写于 作者: M mamingjie-China 提交者: GitHub

API(has_inf, has_nan, cumsum, fsp_matrix, increment) error message...

API(has_inf, has_nan, cumsum, fsp_matrix, increment) error message enhancement, test=develop (#23769)

* API(argsort, argmax, argmin, cast, diag) add error message,and the same time add the test cases for those apis.
上级 3acb047a
...@@ -42,7 +42,9 @@ class CumKernel : public framework::OpKernel<typename Functor::ELEMENT_TYPE> { ...@@ -42,7 +42,9 @@ class CumKernel : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
} }
PADDLE_ENFORCE_LT( PADDLE_ENFORCE_LT(
axis, x_dims.size(), axis, x_dims.size(),
"axis should be less than the dimensiotn of the input tensor"); platform::errors::InvalidArgument("axis(%d) should be less than the "
"dimension(%d) of the input tensor.",
axis, x_dims.size()));
Out.template mutable_data<T>(context.GetPlace()); Out.template mutable_data<T>(context.GetPlace());
int pre = 1; int pre = 1;
......
...@@ -23,23 +23,35 @@ class FSPOp : public framework::OperatorWithKernel { ...@@ -23,23 +23,35 @@ class FSPOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of FSPOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "fsp_op");
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of FSPOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "fsp_op");
PADDLE_ENFORCE(ctx->HasOutput("Out"), OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "fsp_op");
"Output(Out) of FSPOp should not be null.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y"); auto y_dims = ctx->GetInputDim("Y");
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
x_dims.size() == 4, x_dims.size(), 4UL,
"The Input(X) must have shape [batch_size, channel, height, width]."); platform::errors::InvalidArgument(
PADDLE_ENFORCE( "The Input(X) must have shape [batch_size, channel, height, width]."
y_dims.size() == 4, "Now the dimension of 'X' is %d.",
"The Input(Y) must have shape [batch_size, channel, height, width]."); x_dims.size()));
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
(x_dims[2] == y_dims[2]) && (x_dims[3] == y_dims[3]), y_dims.size(), 4UL,
"The Input(X) and Input(Y) should have the same height and width."); platform::errors::InvalidArgument(
"The Input(Y) must have shape [batch_size, channel, height, width]."
"Now the dimension of 'Y' is %d.",
y_dims.size()));
PADDLE_ENFORCE_EQ(
x_dims[2], y_dims[2],
platform::errors::InvalidArgument(
"The Input(X)(%d) and Input(Y)(%d) should have the same height.",
x_dims[2], y_dims[2]));
PADDLE_ENFORCE_EQ(
x_dims[3], y_dims[3],
platform::errors::InvalidArgument(
"The Input(X)(%d) and Input(Y)(%d) should have the same width.",
x_dims[3], y_dims[3]));
ctx->SetOutputDim("Out", {x_dims[0], x_dims[1], y_dims[1]}); ctx->SetOutputDim("Out", {x_dims[0], x_dims[1], y_dims[1]});
ctx->ShareLoD("X", "Out"); ctx->ShareLoD("X", "Out");
......
...@@ -27,11 +27,13 @@ class IncrementOp : public framework::OperatorWithKernel { ...@@ -27,11 +27,13 @@ class IncrementOp : public framework::OperatorWithKernel {
: OperatorWithKernel(type, inputs, outputs, attrs) {} : OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE_EQ(framework::product(ctx->GetInputDim("X")), 1UL,
"Input(X) of IncrementOp should not be null."); platform::errors::InvalidArgument(
PADDLE_ENFORCE(ctx->HasOutput("Out"), "The number of elements in Input(X) should be 1."
"Output(Out) of IncrementOp should not be null."); "Now the number is %d.",
PADDLE_ENFORCE_EQ(1, framework::product(ctx->GetInputDim("X"))); framework::product(ctx->GetInputDim("X"))));
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "increment");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "increment");
ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
ctx->ShareLoD("X", "Out"); ctx->ShareLoD("X", "Out");
} }
......
...@@ -27,9 +27,8 @@ class OverflowOp : public framework::OperatorWithKernel { ...@@ -27,9 +27,8 @@ class OverflowOp : public framework::OperatorWithKernel {
: OperatorWithKernel(type, inputs, outputs, attrs) {} : OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) should not be null"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "isfinite");
PADDLE_ENFORCE(ctx->HasOutput("Out"), OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "isfinite");
"Output(Out) of OverflowOp should not be null.");
ctx->SetOutputDim("Out", {1}); ctx->SetOutputDim("Out", {1});
} }
......
...@@ -1235,6 +1235,8 @@ def increment(x, value=1.0, in_place=True): ...@@ -1235,6 +1235,8 @@ def increment(x, value=1.0, in_place=True):
counter = fluid.layers.zeros(shape=[1], dtype='float32') # [0.] counter = fluid.layers.zeros(shape=[1], dtype='float32') # [0.]
fluid.layers.increment(counter) # [1.] fluid.layers.increment(counter) # [1.]
""" """
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'increment')
helper = LayerHelper("increment", **locals()) helper = LayerHelper("increment", **locals())
if not in_place: if not in_place:
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
......
...@@ -13166,6 +13166,8 @@ def fsp_matrix(x, y): ...@@ -13166,6 +13166,8 @@ def fsp_matrix(x, y):
loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1) loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1)
""" """
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fsp_matrix')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'fsp_matrix')
helper = LayerHelper('fsp_matrix', **locals()) helper = LayerHelper('fsp_matrix', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype( out = helper.create_variable_for_type_inference(dtype=helper.input_dtype(
input_param_name='x')) input_param_name='x'))
......
...@@ -16,8 +16,8 @@ from __future__ import print_function ...@@ -16,8 +16,8 @@ from __future__ import print_function
import os import os
from .layer_function_generator import generate_layer_fn, generate_activation_fn from .layer_function_generator import generate_layer_fn, generate_activation_fn
from .. import core from .. import core
from ..framework import convert_np_dtype_to_dtype_ from ..framework import convert_np_dtype_to_dtype_, Variable
from ..data_feeder import check_variable_and_dtype from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__activations_noattr__ = [ __activations_noattr__ = [
'sigmoid', 'sigmoid',
...@@ -136,6 +136,7 @@ _cum_sum_ = generate_layer_fn('cumsum') ...@@ -136,6 +136,7 @@ _cum_sum_ = generate_layer_fn('cumsum')
def cumsum(x, axis=None, exclusive=None, reverse=None): def cumsum(x, axis=None, exclusive=None, reverse=None):
check_type(x, 'x', (Variable), 'cumsum')
locals_var = locals().copy() locals_var = locals().copy()
kwargs = dict() kwargs = dict()
for name, val in locals_var.items(): for name, val in locals_var.items():
......
...@@ -1137,7 +1137,7 @@ def has_inf(x): ...@@ -1137,7 +1137,7 @@ def has_inf(x):
res = fluid.layers.has_inf(data) res = fluid.layers.has_inf(data)
""" """
# check_type(x, 'x', (Variable), 'has_inf') check_type(x, 'x', (Variable), 'has_inf')
helper = LayerHelper("isinf", **locals()) helper = LayerHelper("isinf", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
...@@ -1162,7 +1162,7 @@ def has_nan(x): ...@@ -1162,7 +1162,7 @@ def has_nan(x):
res = fluid.layers.has_nan(data) res = fluid.layers.has_nan(data)
""" """
# check_type(x, 'x', (Variable), 'has_nan') check_type(x, 'x', (Variable), 'has_nan')
helper = LayerHelper("isnan", **locals()) helper = LayerHelper("isnan", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
......
...@@ -17,6 +17,9 @@ from __future__ import print_function ...@@ -17,6 +17,9 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
class TestSumOp1(OpTest): class TestSumOp1(OpTest):
...@@ -125,5 +128,16 @@ class TestSumOp8(OpTest): ...@@ -125,5 +128,16 @@ class TestSumOp8(OpTest):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
class BadInputTest(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_bad_x():
data = [1, 2, 3]
result = fluid.layers.cumsum(data, axis=0)
self.assertRaises(TypeError, test_bad_x)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -15,6 +15,9 @@ ...@@ -15,6 +15,9 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
def fsp_matrix(a, b): def fsp_matrix(a, b):
...@@ -56,5 +59,28 @@ class TestFSPOp(OpTest): ...@@ -56,5 +59,28 @@ class TestFSPOp(OpTest):
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out')
class BadInputTest(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_bad_x():
data = fluid.layers.data(name='data', shape=[3, 32, 32])
feature_map_0 = [1, 2, 3]
feature_map_1 = fluid.layers.conv2d(
data, num_filters=2, filter_size=3)
loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1)
self.assertRaises(TypeError, test_bad_x)
def test_bad_y():
data = fluid.layers.data(name='data', shape=[3, 32, 32])
feature_map_0 = fluid.layers.conv2d(
data, num_filters=2, filter_size=3)
feature_map_1 = [1, 2, 3]
loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1)
self.assertRaises(TypeError, test_bad_y)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -17,7 +17,7 @@ import numpy as np ...@@ -17,7 +17,7 @@ import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from op_test import OpTest from op_test import OpTest
import unittest from paddle.fluid import compiler, Program, program_guard
class TestInf(OpTest): class TestInf(OpTest):
...@@ -116,5 +116,22 @@ class TestFP16Isfinite(TestIsfinite): ...@@ -116,5 +116,22 @@ class TestFP16Isfinite(TestIsfinite):
self.dtype = np.float16 self.dtype = np.float16
class BadInputTest(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_has_inf_bad_x():
data = [1, 2, 3]
result = fluid.layers.has_inf(data)
self.assertRaises(TypeError, test_has_inf_bad_x)
def test_has_nan_bad_x():
data = [1, 2, 3]
result = fluid.layers.has_nan(data)
self.assertRaises(TypeError, test_has_nan_bad_x)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -21,6 +21,7 @@ import paddle.fluid.core as core ...@@ -21,6 +21,7 @@ import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
import numpy import numpy
from paddle.fluid import compiler, Program, program_guard
class TestWhileOp(unittest.TestCase): class TestWhileOp(unittest.TestCase):
...@@ -122,5 +123,16 @@ class TestWhileOp(unittest.TestCase): ...@@ -122,5 +123,16 @@ class TestWhileOp(unittest.TestCase):
layers.While(cond=cond) layers.While(cond=cond)
class BadInputTest(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_bad_x():
x = [1, 2, 3]
fluid.layers.increment(x)
self.assertRaises(TypeError, test_bad_x)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册