From fd9b7bdb3d9e5b9dcd1c604cb1c6b7b6d0d4df17 Mon Sep 17 00:00:00 2001 From: zhangchunle Date: Sat, 4 Apr 2020 22:49:53 +0800 Subject: [PATCH] Op (FusedEmbeddingSeqPool) error message enhancement. (#23454) --- .../fused/fused_embedding_seq_pool_op.cc | 45 +++++++++++++------ .../fused/fused_embedding_seq_pool_op.h | 23 ++++++++-- 2 files changed, 50 insertions(+), 18 deletions(-) diff --git a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc index 25fc044c71c..8ebeb9cd26d 100644 --- a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc +++ b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc @@ -24,30 +24,47 @@ class FusedEmbeddingSeqPoolOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("W"), - "Input W of FusedEmbeddingSeqPoolOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Ids"), - "Input Ids of FusedEmbeddingSeqPoolOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output of FusedEmbeddingSeqPoolOp should not be null."); - + OP_INOUT_CHECK(ctx->HasInput("W"), "Input", "W", "FusedEmbeddingSeqPool"); + OP_INOUT_CHECK(ctx->HasInput("Ids"), "Input", "Ids", + "FusedEmbeddingSeqPool"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", + "FusedEmbeddingSeqPool"); auto table_dims = ctx->GetInputDim("W"); auto ids_dims = ctx->GetInputDim("Ids"); const std::string& combiner = ctx->Attrs().Get("combiner"); - PADDLE_ENFORCE_EQ(table_dims.size(), 2); - PADDLE_ENFORCE_GE(ids_dims.size(), 1, - "The dim size of the 'Ids' tensor must greater than 1."); - PADDLE_ENFORCE_EQ(ids_dims[ids_dims.size() - 1], 1, - "The last dimension of the 'Ids' tensor must be 1."); + PADDLE_ENFORCE_EQ(table_dims.size(), 2, + platform::errors::InvalidArgument( + "The dim size of the input tensor 'W' should be 2. " + "But received W's size = %d.", + table_dims.size())); + PADDLE_ENFORCE_GE( + ids_dims.size(), 1, + platform::errors::InvalidArgument( + "The dim size of the input tensor 'Ids' should be greater " + "than or equal to 1. But received Ids's size = %d.", + ids_dims.size())); + PADDLE_ENFORCE_EQ( + ids_dims[ids_dims.size() - 1], 1, + platform::errors::InvalidArgument( + "The last dimension of the input tensor 'Ids' should be 1. " + "But received Ids's size in the last dimension = %d.", + ids_dims[ids_dims.size() - 1])); // we only support sum now - PADDLE_ENFORCE_EQ(combiner, "sum"); + PADDLE_ENFORCE_EQ(combiner, "sum", + platform::errors::Unimplemented( + "The pooling type of sequence_pool only support sum " + "now. So the 'combiner' must be 'sum'.")); int64_t last_dim = FusedEmbeddingSeqPoolLastDim(table_dims, ids_dims); // in compile time, the lod level of ids must be 1 framework::VarDesc* ids_desc = boost::get(ctx->GetInputVarPtrs("Ids")[0]); - PADDLE_ENFORCE_EQ(ids_desc->GetLoDLevel(), 1); + PADDLE_ENFORCE_EQ(ids_desc->GetLoDLevel(), 1, + platform::errors::InvalidArgument( + "In compile time, the LoD Level of Ids should be 1. " + "But received the LoD Level of Ids = %d.", + ids_desc->GetLoDLevel())); // in compile time, the shape from Ids -> output // should be [-1, 1] -> [-1, embedding_size] diff --git a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h index 3fffdf7e020..aeaec84ba5c 100644 --- a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h +++ b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h @@ -90,8 +90,17 @@ struct EmbeddingVSumFunctor { int64_t idx_width = ids_t->numel() / ids_lod.back(); auto *output = output_t->mutable_data(context.GetPlace()); - PADDLE_ENFORCE_LE(table_width * idx_width, out_width); - PADDLE_ENFORCE_GT(ids_lod.size(), 1UL, "The LoD[0] could NOT be empty"); + PADDLE_ENFORCE_LE(table_width * idx_width, out_width, + platform::errors::InvalidArgument( + "table_width * idx_width should be less than or " + "equal to out_width. But received " + "table_width * idx_width = %s, out_width = %d.", + table_width * idx_width, out_width)); + PADDLE_ENFORCE_GT(ids_lod.size(), 1UL, + platform::errors::InvalidArgument( + "The tensor ids's LoD[0] should be greater than 1. " + "But received the ids's LoD[0] = %d.", + ids_lod.size())); jit::emb_seq_pool_attr_t attr(table_height, table_width, 0, idx_width, out_width, jit::SeqPoolType::kSum); @@ -130,7 +139,10 @@ class FusedEmbeddingSeqPoolKernel : public framework::OpKernel { const auto &ids_lod = ids_t->lod(); // in run time, the LoD of ids must be 1 PADDLE_ENFORCE_EQ(ids_lod.size(), 1UL, - "The LoD level of Input(Ids) must be 1"); + platform::errors::InvalidArgument( + "The LoD level of Input(Ids) should be 1. But " + "received Ids's LoD level = %d.", + ids_lod.size())); int64_t batch_size = ids_lod[0].size() - 1; // in run time, the shape from Ids -> output // should be [seq_length, 1] -> [batch_size, last_dim] @@ -244,7 +256,10 @@ class FusedEmbeddingSeqPoolGradKernel : public framework::OpKernel { const auto &ids_lod = ids->lod(); PADDLE_ENFORCE_EQ(ids_lod.size(), 1UL, - "The LoD level of Input(Ids) must be 1"); + platform::errors::InvalidArgument( + "The LoD level of Input(Ids) should be 1. But " + "received Ids's LoD level = %d.", + ids_lod.size())); const std::vector offset = ids_lod[0]; auto len = ids->numel(); int idx_width = len / offset.back(); -- GitLab