未验证 提交 479c47f3 编写于 作者: X Xing Wu 提交者: GitHub

fix error info for transpose sequence_conv_pool max_sequence_len sequ… (#24437)

* fix error info for transpose sequence_conv_pool max_sequence_len sequence_erase, test=develop

* fix error info for transpose sequence_conv_pool max_sequence_len sequence_erase, test=develop

* update modify, test=develop

* update modify, test=develop

* fixed some modifications, test=develop
上级 1c00732d
......@@ -57,7 +57,8 @@ class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker {
class MaxSeqenceLenInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("RankTable"));
OP_INOUT_CHECK(context->HasInput("RankTable"), "Input", "RankTable",
"MaxSeqenceLen");
context->SetOutputDim("Out", {1});
}
};
......
......@@ -23,14 +23,15 @@ class SequenceEraseOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SequenceErase operator should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceErase operator should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceErase");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceErase");
auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE(x_dims.size() == 2 && x_dims[1] == 1,
platform::errors::InvalidArgument(
"Input(X) of SequenceEraseOp should be a 2-D LoDTensor "
"with the 2nd dimension equal to 1.");
"with the 2nd dimension equal to 1,"
"but received size %d with the 2nd dimension %d.",
x_dims.size(), x_dims[1]));
ctx->SetOutputDim("Out", x_dims);
// The output LoDTensor's lod_level should be input X's lod_level.
// For compile-time, we call SetLoDLevel to set output's lod_level.
......
......@@ -64,8 +64,10 @@ class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> {
auto* out = ctx.Output<LoDTensor>("Out");
auto lod = in->lod();
PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(),
"The actual size mismatches with the LoD information.");
PADDLE_ENFORCE_EQ(
lod[lod.size() - 1].back(), (size_t)in->numel(),
platform::errors::InvalidArgument(
"The actual size mismatches with the LoD information."));
auto tokens = ctx.Attr<std::vector<int>>("tokens");
auto in_len = in->numel();
auto in_dat = in->data<T>();
......
......@@ -30,9 +30,13 @@ class SequenceEraseKernel : public framework::OpKernel<T> {
auto lod = in->lod();
PADDLE_ENFORCE_EQ(
lod.empty(), false,
"Input(X) Tensor of SequenceEraseOp does not contain LoD information.");
platform::errors::InvalidArgument("Input(X) Tensor of SequenceEraseOp "
"does not contain LoD information."));
PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(),
"The actual size mismatches with the LoD information.");
platform::errors::InvalidArgument(
"The actual input size %d mismatches with the LoD "
"information size %d.",
lod[lod.size() - 1].back(), (size_t)in->numel()));
auto tokens = ctx.Attr<std::vector<int>>("tokens");
auto in_len = in->numel();
auto in_dat = in->data<T>();
......
......@@ -31,31 +31,33 @@ class TransposeOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Transpose");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Transpose");
auto x_dims = ctx->GetInputDim("X");
std::vector<int> axis = ctx->Attrs().Get<std::vector<int>>("axis");
size_t x_rank = x_dims.size();
size_t axis_size = axis.size();
PADDLE_ENFORCE_EQ(x_rank, axis_size,
"ShapeError: The input tensor's dimension "
platform::errors::InvalidArgument(
"The input tensor's dimension "
"should be equal to the axis's size. "
"But received input tensor's dimension is %d, "
"axis's size is %d",
x_rank, axis_size);
x_rank, axis_size));
std::vector<int> count(axis_size, 0);
for (size_t i = 0; i < axis_size; i++) {
PADDLE_ENFORCE(
axis[i] < static_cast<int>(axis_size) && ++count[axis[i]] == 1,
"ValueError: Each element of Attribute axis should "
PADDLE_ENFORCE_EQ(
axis[i] < static_cast<int>(axis_size) && ++count[axis[i]] == 1, true,
platform::errors::InvalidArgument(
"Each element of Attribute axis should "
"be a unique value range from 0 to (dims - 1), "
"where the dims is the axis's size, "
"unique value means this axis value can appear only once. "
"But received axis[%d] is %d, axis_size is %d, "
"count[axis[%d]] is %d",
i, axis[i], axis_size, i, count[axis[i]]);
i, axis[i], axis_size, i, count[axis[i]]));
}
framework::DDim out_dims(x_dims);
......@@ -149,9 +151,9 @@ class TransposeOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "TransposeOpGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "TransposeOpGrad");
auto x_dims = ctx->GetInputDim("X");
ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
if (ctx->HasOutput(framework::GradVarName("X"))) {
......@@ -193,8 +195,7 @@ class Transpose2Op : public TransposeOp {
void InferShape(framework::InferShapeContext *ctx) const override {
TransposeOp::InferShape(ctx);
PADDLE_ENFORCE(ctx->HasOutput("XShape"),
"Output(XShape) should not be null");
OP_INOUT_CHECK(ctx->HasOutput("XShape"), "Output", "XShape", "Transpose2");
const auto &in_dims = ctx->GetInputDim("X");
std::vector<int64_t> x_shape_dim(in_dims.size() + 1);
x_shape_dim[0] = 0;
......@@ -259,9 +260,10 @@ class Transpose2OpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("XShape"), "Input(XShape) should not be null");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
OP_INOUT_CHECK(ctx->HasInput("XShape"), "Input", "XShape",
"Transpose2OpGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "Transpose2OpGrad");
if (ctx->HasOutput(framework::GradVarName("X"))) {
auto xshape_dim = ctx->GetInputDim("XShape");
auto x_shape_dim =
......
......@@ -53,7 +53,10 @@ inline void TransCompute(const int dim, const DeviceContext& dev_ctx,
trans6(dev_ctx, in, out, axis);
break;
default:
PADDLE_THROW("Tensors with rank at most 6 are supported");
PADDLE_THROW(platform::errors::InvalidArgument(
"Tensors with rank at most 6 are supported"
", but received input tensor's rank is %d,",
dim));
}
}
......
......@@ -305,6 +305,8 @@ def sequence_conv_pool(input,
act="tanh",
pool_type="sqrt")
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'input')
conv_out = layers.sequence_conv(
input=input,
num_filters=num_filters,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册