diff --git a/paddle/fluid/framework/variable.h b/paddle/fluid/framework/variable.h index 75d2f32d7316adccbfa30e40acc92c17bc063ee5..cf788ab013199e1567472864df7e28ae65d03c5a 100644 --- a/paddle/fluid/framework/variable.h +++ b/paddle/fluid/framework/variable.h @@ -30,11 +30,13 @@ class Variable { static_assert( IsRegisteredVarType(), "Not registered type. Please register T inside var_type_traits.h"); - PADDLE_ENFORCE(holder_ != nullptr, "Variable is not initialized."); - PADDLE_ENFORCE(holder_->Type() == VarTypeTrait::kId, - "The Variable type must be %s, but the type it holds is %s.", - ToTypeName(VarTypeTrait::kId), - ToTypeName(holder_->Type())); + PADDLE_ENFORCE_NOT_NULL( + holder_, platform::errors::NotFound("Variable is not initialized.")); + PADDLE_ENFORCE_EQ( + holder_->Type(), VarTypeTrait::kId, + platform::errors::InvalidArgument( + "The Variable type must be %s, but the type it holds is %s.", + ToTypeName(VarTypeTrait::kId), ToTypeName(holder_->Type()))); return *static_cast(holder_->Ptr()); } @@ -45,10 +47,11 @@ class Variable { if (!holder_) { holder_.reset(new PlaceholderImpl()); } else { - PADDLE_ENFORCE( - holder_->Type() == VarTypeTrait::kId, - "The Variable type must be %s, but the type it holds is %s.", - ToTypeName(VarTypeTrait::kId), ToTypeName(holder_->Type())); + PADDLE_ENFORCE_EQ( + holder_->Type(), VarTypeTrait::kId, + platform::errors::InvalidArgument( + "The Variable type must be %s, but the type it holds is %s.", + ToTypeName(VarTypeTrait::kId), ToTypeName(holder_->Type()))); } return static_cast(holder_->Ptr()); } @@ -61,7 +64,8 @@ class Variable { void Clear() { holder_.reset(); } int Type() const { - PADDLE_ENFORCE(holder_ != nullptr, "Variable is not initialized."); + PADDLE_ENFORCE_NOT_NULL( + holder_, platform::errors::NotFound("Variable is not initialized.")); return holder_->Type(); } diff --git a/paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.cc b/paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.cc index 1e25a9490b8c53610879c91583f6b5114a35ba0d..a6c9a137b5438d840ae283b72fc9e85903c83775 100644 --- a/paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.cc +++ b/paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.cc @@ -23,36 +23,53 @@ namespace operators { void FusionSeqConvEltAddReluOp::InferShape( framework::InferShapeContext* ctx) const { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of FusionSeqConvEltAddReluOp should not be null."); - PADDLE_ENFORCE( - ctx->HasInput("Filter"), - "Input(Filter) of FusionSeqConvEltAddReluOp should not be null."); - PADDLE_ENFORCE( - ctx->HasInput("Bias"), - "Input(Bias) of FusionSeqConvEltAddReluOp should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("Out"), - "Output(Out) of FusionSeqConvEltAddReluOp should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("ColMat"), - "Output(ColMat) of FusionSeqConvEltAddReluOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", + "fusion_seqconv_eltadd_relu"); + OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter", + "fusion_seqconv_eltadd_relu"); + OP_INOUT_CHECK(ctx->HasInput("Bias"), "Input", "Bias", + "fusion_seqconv_eltadd_relu"); + + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", + "fusion_seqconv_eltadd_relu"); + OP_INOUT_CHECK(ctx->HasOutput("ColMat"), "Output", "ColMat", + "fusion_seqconv_eltadd_relu"); auto x_dims = ctx->GetInputDim("X"); auto w_dims = ctx->GetInputDim("Filter"); int context_length = ctx->Attrs().Get("contextLength"); - PADDLE_ENFORCE( - ctx->Attrs().Get("contextStride") == 1, - "Currently, FusionSeqConvEltAddReluOp only supports contextStride=1."); - PADDLE_ENFORCE(x_dims.size() == 2 && w_dims.size() == 2, - "Input(X, Filter) should be 2-D tensor."); - PADDLE_ENFORCE(x_dims.size() == 2 && w_dims.size() == 2, - "Input(X, Filter) should be 2-D tensor."); - PADDLE_ENFORCE(w_dims[0] == context_length * x_dims[1], - "Filter's height should be context_length * " - "input_hidden_size ."); - PADDLE_ENFORCE_GT(context_length + ctx->Attrs().Get("contextStart"), 0, - "contextStart size should be smaller than contextLength."); + PADDLE_ENFORCE_EQ(ctx->Attrs().Get("contextStride"), 1, + platform::errors::InvalidArgument( + "Currently, FusionSeqConvEltAddReluOp only supports " + "contextStride=1, but received value is: %d.", + ctx->Attrs().Get("contextStride"))); + + PADDLE_ENFORCE_EQ( + x_dims.size(), 2, + platform::errors::InvalidArgument( + "Input(X) should be 2-D tensor, but reveiced value is: %d.", + x_dims.size())); + + PADDLE_ENFORCE_EQ( + w_dims.size(), 2, + platform::errors::InvalidArgument( + "Filter should be 2-D tensor, but reveiced value is: %d.", + w_dims.size())); + + PADDLE_ENFORCE_EQ(w_dims[0], context_length * x_dims[1], + platform::errors::InvalidArgument( + "Filter's height should be equal to context_length * " + "input_hidden_size, but received Filter height is: %d," + "context_length is: %d, input_hidden_size is: %d.", + w_dims[0], context_length, x_dims[1])); + + PADDLE_ENFORCE_GT( + context_length + ctx->Attrs().Get("contextStart"), 0, + platform::errors::InvalidArgument( + "contextStart size should be smaller than contextLength, " + "but received context_length is: %d, contextStart is: " + "%d.", + context_length, ctx->Attrs().Get("contextStart"))); ctx->SetOutputDim("Out", {x_dims[0], w_dims[1]}); ctx->SetOutputDim("ColMat", {x_dims[0], w_dims[0]}); @@ -130,10 +147,17 @@ class FusionSeqConvEltAddReluKernel : public framework::OpKernel { auto x_lod = x->lod(); auto x_dims = x->dims(); auto w_dims = w->dims(); - PADDLE_ENFORCE_EQ(b->numel(), w_dims[1], - "bias size should be equal to output feature size."); - PADDLE_ENFORCE_EQ(x_lod.size(), 1UL, - "Only support one level sequence now."); + PADDLE_ENFORCE_EQ( + b->numel(), w_dims[1], + platform::errors::InvalidArgument( + "bias size should be equal to weights feature size, but received " + "bias size is: %d, weights feature size is: %d.", + b->numel(), w_dims[1])); + PADDLE_ENFORCE_EQ( + x_lod.size(), 1UL, + platform::errors::InvalidArgument( + "Only support one level sequence now, but received value is: %d.", + x_lod.size())); const T* x_data = x->data(); const T* w_data = w->data(); @@ -183,7 +207,12 @@ class FusionSeqConvEltAddReluKernel : public framework::OpKernel { copy_size -= src_mat_w_sz; } } else { - PADDLE_ENFORCE_GE(context_length, up_pad + down_pad + 1); + PADDLE_ENFORCE_GE(context_length, up_pad + down_pad + 1, + platform::errors::InvalidArgument( + "context length must be bigger or equal than " + "up_pad + down_pad + 1, but received context " + "length is: %d, up_pad is: %d, down_pad is: %d.", + context_length, up_pad, down_pad)); std::memset(dst_data, 0, seq_len * col_mat_w_sz); dst_data = dst_data + up_pad * src_mat_w; int zero_sz = up_pad * src_mat_w_sz; diff --git a/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc b/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc index d79bf7cdcc82f2e3be368023dd87289afeeb8a17..2f52ee226bc5f61bcaec059b5fe2d7876a6feb4a 100644 --- a/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc +++ b/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc @@ -24,38 +24,59 @@ namespace operators { void FusionSeqExpandConcatFCOp::InferShape( framework::InferShapeContext* ctx) const { - PADDLE_ENFORCE_GT( - ctx->Inputs("X").size(), 1UL, - "Inputs(X) of FusionSeqExpandConcatFCOp should larger than 1."); - PADDLE_ENFORCE( - ctx->HasInput("FCWeight"), - "Input(FCWeight) of FusionSeqExpandConcatFCOp should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("Out"), - "Output(Out) of FusionSeqExpandConcatFCOp should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("FCOut"), - "Output(FCOut) of FusionSeqExpandConcatFCOp should not be null."); + PADDLE_ENFORCE_GT(ctx->Inputs("X").size(), 1UL, + platform::errors::InvalidArgument( + "Inputs(X) of FusionSeqExpandConcatFCOp should larger " + "than 1, but received value is: %d.", + ctx->Inputs("X").size())); + OP_INOUT_CHECK(ctx->HasInput("FCWeight"), "Input", "FCWeight", + "fusion_seqexpand_concat_fc"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", + "fusion_seqexpand_concat_fc"); + OP_INOUT_CHECK(ctx->HasOutput("FCOut"), "Output", "FCOut", + "fusion_seqexpand_concat_fc"); auto ins_dims = ctx->GetInputsDim("X"); auto w_dims = ctx->GetInputDim("FCWeight"); // (M0+M1+M2+..) x D - PADDLE_ENFORCE_EQ(w_dims.size(), 2, "Input(FCWeight)'s rank must be 2."); + PADDLE_ENFORCE_EQ( + w_dims.size(), 2, + platform::errors::InvalidArgument( + "Input(FCWeight)'s rank must be 2, but received value is: %d.", + w_dims.size())); const int D = w_dims[1]; int sum = ins_dims[0][1]; for (size_t i = 1; i < ins_dims.size(); ++i) { sum += ins_dims[i][1]; } - PADDLE_ENFORCE_EQ(sum, w_dims[0], - "FC height should be sum of all inputs width."); + PADDLE_ENFORCE_EQ(sum, w_dims[0], platform::errors::InvalidArgument( + "FC height should be sum of all inputs " + "width, but received FC height is: %d, " + "sum of all inputs width is: %d.", + w_dims[0], sum)); if (ctx->HasInput("FCBias")) { auto b_dims = ctx->GetInputDim("FCBias"); - PADDLE_ENFORCE(b_dims.size() == 1 || b_dims.size() == 2, - "b_dims should be 1 or 2, get %d", b_dims.size()); + PADDLE_ENFORCE_EQ( + b_dims.size() == 1 || b_dims.size() == 2, true, + platform::errors::InvalidArgument( + "FCBias dim should be 1 or 2, but received value is: %d.", + b_dims.size())); if (b_dims.size() == 1) { - PADDLE_ENFORCE_EQ(b_dims[0], D, "FCBias shapes must be %d.", D); + PADDLE_ENFORCE_EQ(b_dims[0], D, + platform::errors::InvalidArgument( + "FCBias shapes must be %d when FCBias dim = 1, but " + "received value is: %d.", + D, b_dims[0])); } else { - PADDLE_ENFORCE_EQ(b_dims[0], 1, "FCBias shapes must be 1x%d.", D); - PADDLE_ENFORCE_EQ(b_dims[1], D, "FCBias shapes must be 1x%d.", D); + PADDLE_ENFORCE_EQ(b_dims[0], 1, + platform::errors::InvalidArgument( + "FCBias shapes must be 1x%d, when FCBias dim = 2, " + "but received dim[0] is: %d.", + D, b_dims[0])); + PADDLE_ENFORCE_EQ(b_dims[1], D, + platform::errors::InvalidArgument( + "FCBias shapes must be 1x%d, when FCBias dim = 2, " + "but received dim[1] is: %d.", + D, b_dims[1])); } } @@ -133,18 +154,42 @@ class FusionSeqExpandConcatFCOpKernel : public framework::OpKernel { // some check and fcout should be reshape here // since infershape can not get lod info - PADDLE_ENFORCE_EQ(ref_lod.size(), 1UL, "Only support input lod size is 1."); - PADDLE_ENFORCE_EQ(in1_lod.size(), 1UL, "Only support input lod size is 1."); + PADDLE_ENFORCE_EQ( + ref_lod.size(), 1UL, + platform::errors::InvalidArgument( + "Only support input lod size is 1, but received value is: %d.", + ref_lod.size())); + PADDLE_ENFORCE_EQ( + in1_lod.size(), 1UL, + platform::errors::InvalidArgument( + "Only support input lod size is 1, but received value is: %d.", + in1_lod.size())); PADDLE_ENFORCE_EQ(static_cast(in1_lod[0].size() - 1), N, - "Batch size of all inputs should be equal."); - PADDLE_ENFORCE_EQ(static_cast(in1_lod[0][N]), N, - "Seq_length of other inputs should be 1."); - PADDLE_ENFORCE_EQ(in1_dims[0], N, "input height should be batch size."); + platform::errors::InvalidArgument( + "Batch size of all inputs should be equal to %d, but " + "received value is: %d.", + N, static_cast(in1_lod[0].size() - 1))); + PADDLE_ENFORCE_EQ( + static_cast(in1_lod[0][N]), N, + platform::errors::InvalidArgument("Seq_length of other inputs should " + "be %d, but received value is: %d.", + N, static_cast(in1_lod[0][N]))); + PADDLE_ENFORCE_EQ( + in1_dims[0], N, + platform::errors::InvalidArgument( + "input height should be batch size: %d, but received value is %d.", + N, in1_dims[0])); for (size_t i = 2; i < ins.size(); ++i) { PADDLE_ENFORCE_EQ(ins[i]->dims()[0], N, - "All other inputs height should be equal"); + platform::errors::InvalidArgument( + "All other inputs height should be equal to %d, " + "but received value is: %d.", + N, ins[i]->dims()[0])); PADDLE_ENFORCE_EQ(ins[i]->lod(), in1_lod, - "All other inputs should have same lod"); + platform::errors::InvalidArgument( + "All other inputs should have same lod: %d, but " + "received value is: %d.", + in1_lod, ins[i]->lod())); } fc_out->Resize({N, D});