diff --git a/paddle/fluid/operators/fused/fused_bn_activation_op.cc b/paddle/fluid/operators/fused/fused_bn_activation_op.cc index 97cd4d90be689ac7e891af9fe098b56bea000166..e9ad2895e03db8e77470c490453427a41d8e3bba 100644 --- a/paddle/fluid/operators/fused/fused_bn_activation_op.cc +++ b/paddle/fluid/operators/fused/fused_bn_activation_op.cc @@ -173,7 +173,9 @@ void FusedBatchNormActOpMaker::Make() { .AddCustomChecker([](const float &epsilon) { PADDLE_ENFORCE_EQ(epsilon >= 0.0f && epsilon <= 0.001f, true, platform::errors::InvalidArgument( - "'epsilon' should be between 0.0 and 0.001.")); + "Attr(epsilon) should be between 0.0 and 0.001, " + "but received value is %f.", + epsilon)); }); AddAttr("act_type", "The activation type to be fused.") .SetDefault("relu"); diff --git a/paddle/fluid/operators/fused/fused_embedding_eltwise_layernorm_op.cc b/paddle/fluid/operators/fused/fused_embedding_eltwise_layernorm_op.cc index b53b407d4995da5d548a13fec20ff3b09a5583c4..4d270280d389c6d8c34e3a5691a41a684b537577 100644 --- a/paddle/fluid/operators/fused/fused_embedding_eltwise_layernorm_op.cc +++ b/paddle/fluid/operators/fused/fused_embedding_eltwise_layernorm_op.cc @@ -25,11 +25,13 @@ class EmbeddingEltWiseLayerNormOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* context) const override { - PADDLE_ENFORCE_EQ(context->Inputs("Ids").size(), - context->Inputs("Embs").size(), - platform::errors::InvalidArgument( - "Two inputs of EmbeddingEltWiseLayerNormOp shoube be " - "the same size")); + PADDLE_ENFORCE_EQ( + context->Inputs("Ids").size(), context->Inputs("Embs").size(), + platform::errors::InvalidArgument( + "Two inputs of EmbeddingEltWiseLayerNormOp shoube be " + "the same size, but received the size of input Ids = %d," + " the size of input Embs = %d", + context->Inputs("Ids").size(), context->Inputs("Embs").size())); PADDLE_ENFORCE_GE(context->Inputs("Embs").size(), 2UL, platform::errors::InvalidArgument( "Input Embs of EmbeddingEltWiseLayerNormOp should " @@ -77,7 +79,8 @@ class EmbeddingEltWiseLayerNormOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( embs_dims[i][1], hidden, platform::errors::InvalidArgument( - "The Emb first dim size(%d) shoule equal to hidden (%d).", + "The second dimension size(%d) of the Embedding should be " + "equal to the hidden's size(%d)", embs_dims[i][1], hidden)); } diff --git a/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cc b/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cc index bd376b1e7aaefbf890e174cc86899b990a9fed26..382d01f6a535c76bdd38102a0cb40e5afc345f07 100644 --- a/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cc +++ b/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cc @@ -40,7 +40,9 @@ class TransposeFlattenConcatFusionOp : public framework::OperatorWithKernel { const size_t n = ins.size(); PADDLE_ENFORCE_GT(n, 0, platform::errors::InvalidArgument( - "Input tensors dim size should greater than 0.")); + "The size of Inputs(X)'s dimension should be greater " + " than 0, but received %d.", + n)); std::vector trans_axis = ctx->Attrs().Get>("trans_axis");