未验证 提交 17588bbe 编写于 作者: M mamingjie-China 提交者: GitHub

API(has_inf, has_nan, cumsum, fsp_matrix, increment) error message...

API(has_inf, has_nan, cumsum, fsp_matrix, increment) error message enhancement, test=develop (#23769)

* API(argsort, argmax, argmin, cast, diag) add error message,and the same time add the test cases for those apis.
上级 3acb047a
......@@ -42,7 +42,9 @@ class CumKernel : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
}
PADDLE_ENFORCE_LT(
axis, x_dims.size(),
"axis should be less than the dimensiotn of the input tensor");
platform::errors::InvalidArgument("axis(%d) should be less than the "
"dimension(%d) of the input tensor.",
axis, x_dims.size()));
Out.template mutable_data<T>(context.GetPlace());
int pre = 1;
......
......@@ -23,23 +23,35 @@ class FSPOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of FSPOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of FSPOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of FSPOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "fsp_op");
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "fsp_op");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "fsp_op");
auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y");
PADDLE_ENFORCE(
x_dims.size() == 4,
"The Input(X) must have shape [batch_size, channel, height, width].");
PADDLE_ENFORCE(
y_dims.size() == 4,
"The Input(Y) must have shape [batch_size, channel, height, width].");
PADDLE_ENFORCE(
(x_dims[2] == y_dims[2]) && (x_dims[3] == y_dims[3]),
"The Input(X) and Input(Y) should have the same height and width.");
PADDLE_ENFORCE_EQ(
x_dims.size(), 4UL,
platform::errors::InvalidArgument(
"The Input(X) must have shape [batch_size, channel, height, width]."
"Now the dimension of 'X' is %d.",
x_dims.size()));
PADDLE_ENFORCE_EQ(
y_dims.size(), 4UL,
platform::errors::InvalidArgument(
"The Input(Y) must have shape [batch_size, channel, height, width]."
"Now the dimension of 'Y' is %d.",
y_dims.size()));
PADDLE_ENFORCE_EQ(
x_dims[2], y_dims[2],
platform::errors::InvalidArgument(
"The Input(X)(%d) and Input(Y)(%d) should have the same height.",
x_dims[2], y_dims[2]));
PADDLE_ENFORCE_EQ(
x_dims[3], y_dims[3],
platform::errors::InvalidArgument(
"The Input(X)(%d) and Input(Y)(%d) should have the same width.",
x_dims[3], y_dims[3]));
ctx->SetOutputDim("Out", {x_dims[0], x_dims[1], y_dims[1]});
ctx->ShareLoD("X", "Out");
......
......@@ -27,11 +27,13 @@ class IncrementOp : public framework::OperatorWithKernel {
: OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of IncrementOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of IncrementOp should not be null.");
PADDLE_ENFORCE_EQ(1, framework::product(ctx->GetInputDim("X")));
PADDLE_ENFORCE_EQ(framework::product(ctx->GetInputDim("X")), 1UL,
platform::errors::InvalidArgument(
"The number of elements in Input(X) should be 1."
"Now the number is %d.",
framework::product(ctx->GetInputDim("X"))));
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "increment");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "increment");
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
ctx->ShareLoD("X", "Out");
}
......
......@@ -27,9 +27,8 @@ class OverflowOp : public framework::OperatorWithKernel {
: OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) should not be null");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of OverflowOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "isfinite");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "isfinite");
ctx->SetOutputDim("Out", {1});
}
......
......@@ -1235,6 +1235,8 @@ def increment(x, value=1.0, in_place=True):
counter = fluid.layers.zeros(shape=[1], dtype='float32') # [0.]
fluid.layers.increment(counter) # [1.]
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'increment')
helper = LayerHelper("increment", **locals())
if not in_place:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......
......@@ -13166,6 +13166,8 @@ def fsp_matrix(x, y):
loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fsp_matrix')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'fsp_matrix')
helper = LayerHelper('fsp_matrix', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype(
input_param_name='x'))
......
......@@ -16,8 +16,8 @@ from __future__ import print_function
import os
from .layer_function_generator import generate_layer_fn, generate_activation_fn
from .. import core
from ..framework import convert_np_dtype_to_dtype_
from ..data_feeder import check_variable_and_dtype
from ..framework import convert_np_dtype_to_dtype_, Variable
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__activations_noattr__ = [
'sigmoid',
......@@ -136,6 +136,7 @@ _cum_sum_ = generate_layer_fn('cumsum')
def cumsum(x, axis=None, exclusive=None, reverse=None):
check_type(x, 'x', (Variable), 'cumsum')
locals_var = locals().copy()
kwargs = dict()
for name, val in locals_var.items():
......
......@@ -1137,7 +1137,7 @@ def has_inf(x):
res = fluid.layers.has_inf(data)
"""
# check_type(x, 'x', (Variable), 'has_inf')
check_type(x, 'x', (Variable), 'has_inf')
helper = LayerHelper("isinf", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
......@@ -1162,7 +1162,7 @@ def has_nan(x):
res = fluid.layers.has_nan(data)
"""
# check_type(x, 'x', (Variable), 'has_nan')
check_type(x, 'x', (Variable), 'has_nan')
helper = LayerHelper("isnan", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
......
......@@ -17,6 +17,9 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
class TestSumOp1(OpTest):
......@@ -125,5 +128,16 @@ class TestSumOp8(OpTest):
self.check_grad(['X'], 'Out')
class BadInputTest(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_bad_x():
data = [1, 2, 3]
result = fluid.layers.cumsum(data, axis=0)
self.assertRaises(TypeError, test_bad_x)
if __name__ == '__main__':
unittest.main()
......@@ -15,6 +15,9 @@
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
def fsp_matrix(a, b):
......@@ -56,5 +59,28 @@ class TestFSPOp(OpTest):
self.check_grad(['X', 'Y'], 'Out')
class BadInputTest(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_bad_x():
data = fluid.layers.data(name='data', shape=[3, 32, 32])
feature_map_0 = [1, 2, 3]
feature_map_1 = fluid.layers.conv2d(
data, num_filters=2, filter_size=3)
loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1)
self.assertRaises(TypeError, test_bad_x)
def test_bad_y():
data = fluid.layers.data(name='data', shape=[3, 32, 32])
feature_map_0 = fluid.layers.conv2d(
data, num_filters=2, filter_size=3)
feature_map_1 = [1, 2, 3]
loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1)
self.assertRaises(TypeError, test_bad_y)
if __name__ == '__main__':
unittest.main()
......@@ -17,7 +17,7 @@ import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest
import unittest
from paddle.fluid import compiler, Program, program_guard
class TestInf(OpTest):
......@@ -116,5 +116,22 @@ class TestFP16Isfinite(TestIsfinite):
self.dtype = np.float16
class BadInputTest(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_has_inf_bad_x():
data = [1, 2, 3]
result = fluid.layers.has_inf(data)
self.assertRaises(TypeError, test_has_inf_bad_x)
def test_has_nan_bad_x():
data = [1, 2, 3]
result = fluid.layers.has_nan(data)
self.assertRaises(TypeError, test_has_nan_bad_x)
if __name__ == '__main__':
unittest.main()
......@@ -21,6 +21,7 @@ import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.backward import append_backward
import numpy
from paddle.fluid import compiler, Program, program_guard
class TestWhileOp(unittest.TestCase):
......@@ -122,5 +123,16 @@ class TestWhileOp(unittest.TestCase):
layers.While(cond=cond)
class BadInputTest(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_bad_x():
x = [1, 2, 3]
fluid.layers.increment(x)
self.assertRaises(TypeError, test_bad_x)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册