未验证 提交 729f5846 编写于 作者: A Aurelius84 提交者: GitHub

enhance shape error message of fc API (#20172)

* add api check in fc test=develop

* enforce shape error info of sum op test=develop

* fix spelling test=develop

* print x_dims info test=develop

* enhance shape error info test=develop
上级 6fbf4410
......@@ -33,10 +33,11 @@ class SumOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) should not be null");
PADDLE_ENFORCE_EQ(ctx->HasInputs("X"), true,
"Inputs(X) should not be null");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SumOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of SumOp should not be null.");
if (ctx->IsRuntime() &&
ctx->GetOutputsVarType("Out")[0] ==
framework::proto::VarType::LOD_TENSOR_ARRAY) {
......@@ -46,10 +47,15 @@ class SumOp : public framework::OperatorWithKernel {
auto x_var_types = ctx->GetInputsVarType("X");
auto x_dims = ctx->GetInputsDim("X");
size_t N = x_dims.size();
PADDLE_ENFORCE_GT(N, 0, "Input tensors count should > 0.");
auto N = x_dims.size();
PADDLE_ENFORCE_GT(
N, 0,
"ShapeError: The input tensor X's dimensions of SumOp "
"should be larger than 0. But received X's dimensions %d, "
"X's shape = [%s].",
N, &x_dims);
if (N == 1) {
VLOG(3) << "Warning: sum have only one input, may waste memory";
VLOG(3) << "Warning: SumOp have only one input, may waste memory";
}
framework::DDim in_dim({0});
......@@ -67,18 +73,29 @@ class SumOp : public framework::OperatorWithKernel {
in_dim = x_dim;
} else {
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(in_dim, x_dim,
"Input tensors must have same shape");
PADDLE_ENFORCE_EQ(
in_dim, x_dim,
"ShapeError: The input tensor X of SumOp must have same shape."
"But received X[0]'s shape = [%s], X[%d]'s shape = [%s].",
in_dim, i, x_dim);
} else {
PADDLE_ENFORCE_EQ(in_dim.size(), x_dim.size(),
"Input tensors must have same shape size");
PADDLE_ENFORCE_EQ(
in_dim.size(), x_dim.size(),
"ShapeError: The input tensor X of SumOp must have same "
"dimensions. But received X[0]'s dimensions = %d, X[0]'s shape = "
"[%s], X[%d]'s dimensions = %d, X[%d]'s shape = [%s].",
in_dim.size(), in_dim, i, x_dim.size(), i, x_dim);
// if in_dim or x_dim has -1, not check equal
for (int i = 0; i < x_dim.size(); ++i) {
if (x_dim[i] == -1 || in_dim[i] == -1) {
for (int j = 0; j < x_dim.size(); ++j) {
if (x_dim[j] == -1 || in_dim[j] == -1) {
continue;
}
PADDLE_ENFORCE_EQ(in_dim[i], x_dim[i],
"Input tensors must have same shape if not -1");
PADDLE_ENFORCE_EQ(
in_dim[j], x_dim[j],
"ShapeError: The input tensor X of SumOp must have same shape "
"if not -1."
"But received X[0]'s shape = [%s], X[%d]'s shape = [%s].",
in_dim, i, x_dim);
}
}
}
......@@ -107,8 +124,9 @@ class SumOp : public framework::OperatorWithKernel {
if (x_vars[0]->IsType<framework::LoDTensor>()) {
int dtype = -1;
for (size_t idx = 0; idx < x_vars.size(); ++idx) {
PADDLE_ENFORCE(x_vars[idx] != nullptr,
"Input var[%s] should not be nullptr", x_vars_name[idx]);
PADDLE_ENFORCE_NOT_NULL(x_vars[idx],
"Input var[%s] should not be nullptr",
x_vars_name[idx]);
auto tensor =
framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_vars[idx]);
if (tensor->numel() <= 0 || (!tensor->IsInitialized())) {
......@@ -202,8 +220,8 @@ class SumOpVarTypeInference : public framework::VarTypeInference {
for (auto& each : inputs) {
os << " " << each << " type is " << ctx->GetType(each) << "\n";
}
PADDLE_ENFORCE(all_inputs_are_tensor_array,
"Not all inputs are tensor array:\n%s", os.str());
PADDLE_ENFORCE_EQ(all_inputs_are_tensor_array, true,
"Not all inputs are tensor array:\n%s", os.str());
}
var_type = framework::proto::VarType::LOD_TENSOR_ARRAY;
} else if (any_input_is_lod_tensor) {
......
......@@ -343,8 +343,22 @@ def fc(input,
fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh")
"""
helper = LayerHelper("fc", **locals())
if isinstance(input, (list, tuple)):
for i, input_x in enumerate(input):
if not isinstance(input_x, Variable):
raise TypeError(
"The type of input[%d] in fc must be Variable, but received %s"
% (i, type(input_x)))
else:
if not isinstance(input, Variable):
raise TypeError(
"The type of 'input' in fc must be Variable, but received %s" %
(type(input)))
dtype = helper.input_dtype()
if convert_dtype(dtype) not in ['float32', 'float64']:
raise TypeError(
"The data type of 'input' in fc must be float32 or float64, but received %s."
% (convert_dtype(dtype)))
mul_results = []
for input_var, param_attr in helper.iter_inputs_and_params():
......
......@@ -15,6 +15,8 @@
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
def fc_refer(matrix, with_bias, with_relu=False):
......@@ -122,5 +124,30 @@ class TestFCOpWithBias3(TestFCOp):
self.matrix = MatrixGenerate(1, 64, 32, 3, 3, 1)
class TestFCOpError(OpTest):
def test_errors(self):
with program_guard(Program(), Program()):
input_data = np.random.random((2, 4)).astype("float32")
def test_Variable():
# the input type must be Variable
fluid.layers.fc(input=input_data, size=1)
self.assertRaises(TypeError, test_Variable)
def test_input_list():
# each of input(list) must be Variable
fluid.layers.fc(input=[input_data], size=1)
self.assertRaises(TypeError, test_input_list)
def test_type():
# dtype must be float32 or float64
x2 = fluid.layers.data(name='x2', shape=[4], dtype='int32')
fluid.layers.fc(input=x2, size=1)
self.assertRaises(TypeError, test_type)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册