From e65c5b8e83ec4469ae5ffc2b76920f9212624b50 Mon Sep 17 00:00:00 2001 From: wanghuancoder Date: Wed, 15 Jul 2020 21:22:05 +0800 Subject: [PATCH] fix some errmsg report, in framework/ir/ (#25471) * fix paddle/fluid/framework/ir/ error msg reoprt, test=develop * modify error msg reoprt in ir/, about errortype, grammar, supplementary infor, test=develop * modified some unclear descriptions, test=develop * Modify the problem that report msg is less than 20 characters, test=develop --- .../framework/ir/attention_lstm_fuse_pass.cc | 14 ++++- .../framework/ir/coalesce_grad_tensor_pass.cc | 63 +++++++++++++------ .../ir/conv_affine_channel_fuse_pass.cc | 19 ++++-- .../fluid/framework/ir/conv_bn_fuse_pass.cc | 32 +++++++--- .../framework/ir/conv_bn_fuse_pass_tester.cc | 12 +++- .../ir/conv_elementwise_add2_act_fuse_pass.cc | 4 +- .../ir/conv_elementwise_add_act_fuse_pass.cc | 4 +- .../ir/conv_elementwise_add_fuse_pass.cc | 4 +- .../ir/embedding_fc_lstm_fuse_pass.cc | 16 +++-- ..._elementwise_layernorm_fuse_pass_tester.cc | 13 +++- paddle/fluid/framework/ir/fc_fuse_pass.cc | 3 +- .../fluid/framework/ir/fc_fuse_pass_tester.cc | 14 ++++- paddle/fluid/framework/ir/fc_gru_fuse_pass.cc | 17 +++-- .../fluid/framework/ir/fc_lstm_fuse_pass.cc | 10 ++- 14 files changed, 169 insertions(+), 56 deletions(-) diff --git a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc index a56fcd1a523..a4b43086785 100644 --- a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc @@ -135,7 +135,9 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input, void PrepareParameters(Graph* graph, const Param& param, ir::Node* lstm_op) { // Check parameters - PADDLE_ENFORCE(graph->Has(kParamScopeAttr)); + PADDLE_ENFORCE_EQ(graph->Has(kParamScopeAttr), true, + platform::errors::InvalidArgument( + "Graph have no attribute: kParamScopeAttr.")); auto& scope = graph->Get(kParamScopeAttr); // Create new parameters. @@ -193,7 +195,10 @@ void PrepareParameters(Graph* graph, const Param& param, ir::Node* lstm_op) { // reshape attention_bias auto* attention_bias_t = scope.FindVar(param.AttentionBias)->GetMutable(); - PADDLE_ENFORCE_EQ(attention_bias_t->dims().size(), 1); + PADDLE_ENFORCE_EQ(attention_bias_t->dims().size(), 1, + platform::errors::InvalidArgument( + "Tensor attention bias dimension size(%d) must be 1.", + attention_bias_t->dims().size())); attention_bias_t->Resize(make_ddim({1, attention_bias_t->dims()[0]})); auto* attention_scalar_bias_t = @@ -252,7 +257,10 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input, B_forget.data(), B_input.data(), B_output.data(), B_cell.data()}; - PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1); + PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1, + platform::errors::InvalidArgument( + "Tensor B forget dimension size(%d) must be 1.", + B_forget.dims().size())); int D = B_forget.dims()[0]; out->Resize(make_ddim({1, 4 * D})); auto* out_data = out->mutable_data(platform::CPUPlace()); diff --git a/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc b/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc index d7faf2ee648..f3634f90e6c 100644 --- a/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc +++ b/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc @@ -119,9 +119,11 @@ class CoalesceGradTensorPass : public ir::Pass { p_g_dense_grad.insert(p_g_dense_grad.end(), group_p_g.begin(), group_p_g.end()); } - PADDLE_ENFORCE_EQ( - p_g_dense_grad.size(), num_of_p_g_dense_grad, - "The number of p_g_dense_grad is not consistent with before."); + PADDLE_ENFORCE_EQ(p_g_dense_grad.size(), num_of_p_g_dense_grad, + platform::errors::InvalidArgument( + "The number of dense grads is not consistent with " + "previous. Previous(%d), now(%d).", + p_g_dense_grad.size(), num_of_p_g_dense_grad)); auto &pinned_var_set = graph->GetOrInit(details::kPinnedVars); @@ -131,8 +133,11 @@ class CoalesceGradTensorPass : public ir::Pass { } else { for (auto &sub_param_grad : group_params_grads) { RecordGradients(p_g_dense_grad, vars_info, &pinned_var_set); - PADDLE_ENFORCE_EQ(IsUnifiedDtype(sub_param_grad, vars_info), true, - "The data type of the same group is not consistent."); + PADDLE_ENFORCE_EQ( + IsUnifiedDtype(sub_param_grad, vars_info), true, + platform::errors::InvalidArgument("All gradient variable in " + "kGroupParamsAndDenseGrads, must " + "have same type.")); CoalesceTensors(vars_info, sub_param_grad, &result); } } @@ -145,15 +150,25 @@ class CoalesceGradTensorPass : public ir::Pass { // The Gradients should not be reused during memory optimization. for (auto &p_g : sub_param_grad) { auto iter = vars_info.find(p_g.second); - PADDLE_ENFORCE_EQ(iter != vars_info.end(), true, "%s is not found.", - p_g.second); - PADDLE_ENFORCE_EQ(!iter->second.empty(), true); + PADDLE_ENFORCE_EQ(iter != vars_info.end(), true, + platform::errors::NotFound( + "Parameter@Grad %s is not found.", p_g.second)); + PADDLE_ENFORCE_EQ( + !iter->second.empty(), true, + platform::errors::InvalidArgument( + "Parameter@Grad %s's var node is empty.", p_g.second)); for (auto it : iter->second) { - PADDLE_ENFORCE_NOT_NULL(it->Var()); + PADDLE_ENFORCE_NOT_NULL( + it->Var(), + platform::errors::InvalidArgument( + "A node of Parameter@Grad %s does not hold variable.", + p_g.second)); pinned_var_set->insert(it->Var()->Name()); } PADDLE_ENFORCE_EQ(IsLoDTensorType(GetTypeOfVar(vars_info, p_g.second)), - true); + true, + platform::errors::InvalidArgument( + "Parameter@Grad %s is not LoDTensor.", p_g.second)); } } @@ -192,8 +207,10 @@ class CoalesceGradTensorPass : public ir::Pass { auto fused_grad_var_name = std::string(details::kFusedVarNamePrefix) + "@GRAD@" + params_grads.begin()->second; auto &fused_var_set = result->Get(details::kFusedVars); - PADDLE_ENFORCE_EQ(fused_var_set.count(fused_grad_var_name), 0, - "%s is duplicate in FusedVars.", fused_grad_var_name); + PADDLE_ENFORCE_EQ( + fused_var_set.count(fused_grad_var_name), 0, + platform::errors::AlreadyExists("Var(%s) is duplicate in FusedVars.", + fused_grad_var_name)); fused_var_set.insert(fused_grad_var_name); result->Get(details::kFusedGrads) .emplace_back(fused_grad_var_name); @@ -420,11 +437,16 @@ class CoalesceGradTensorPass : public ir::Pass { const std::unordered_map> &vars_info, const std::string &var_name) const { auto grad_iter = vars_info.find(var_name); - PADDLE_ENFORCE_EQ(grad_iter != vars_info.end(), true, "%s is not found.", - var_name); - PADDLE_ENFORCE_EQ(!grad_iter->second.empty(), true, "%s is not found.", - var_name); - PADDLE_ENFORCE_NOT_NULL(grad_iter->second.front()->Var()); + PADDLE_ENFORCE_EQ( + grad_iter != vars_info.end(), true, + platform::errors::NotFound("Variable %s is not found.", var_name)); + PADDLE_ENFORCE_EQ(!grad_iter->second.empty(), true, + platform::errors::InvalidArgument( + "Variable %s's node is empty.", var_name)); + PADDLE_ENFORCE_NOT_NULL( + grad_iter->second.front()->Var(), + platform::errors::InvalidArgument( + "A node of %s does not hold variable.", var_name)); return grad_iter->second.front()->Var(); } @@ -464,7 +486,12 @@ class CoalesceGradTensorPass : public ir::Pass { params_name.emplace_back(p_g.first); grads_name.emplace_back(p_g.second); auto next_dtype = GetDtypeOfVar(vars_info, p_g.second); - PADDLE_ENFORCE_EQ(next_dtype, dtype); + PADDLE_ENFORCE_EQ( + next_dtype, dtype, + platform::errors::InvalidArgument( + "All Parameter@Grad should have same dtype, but " + "there are two different type: %s, %s.", + DataTypeToString(next_dtype), DataTypeToString(dtype))); } result->Get(details::kProgramDescs).emplace_back(); diff --git a/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc b/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc index fecc159adef..079fb147986 100644 --- a/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc @@ -50,7 +50,12 @@ void recompute_bias_and_weights(const Scope* scope, ir::Node* conv_weight, Eigen::Array>; // Re-compute bias of conv2d from AffineChannel - PADDLE_ENFORCE_EQ(eltwise_y_in_tensor->dims(), ac_bias_tensor.dims()); + PADDLE_ENFORCE_EQ( + eltwise_y_in_tensor->dims(), ac_bias_tensor.dims(), + platform::errors::InvalidArgument( + "Tensor elementwise y(%d) and activation bias(%d) must have same " + "dimension.", + eltwise_y_in_tensor->dims().size(), ac_bias_tensor.dims().size())); auto* scale_tensor = scope->FindVar(ac_scale.Name())->GetMutable(); @@ -78,11 +83,13 @@ void recompute_bias_and_weights(const Scope* scope, ir::Node* conv_weight, } void ConvAffineChannelFusePass::ApplyImpl(ir::Graph* graph) const { - PADDLE_ENFORCE(graph); + PADDLE_ENFORCE_NOT_NULL( + graph, platform::errors::InvalidArgument("Graph cannot be nullptr.")); FusePassBase::Init(name_scope_, graph); auto* scope = param_scope(); - PADDLE_ENFORCE(scope); + PADDLE_ENFORCE_NOT_NULL( + scope, platform::errors::InvalidArgument("Scope cannot be nullptr.")); GraphPatternDetector gpd; auto* conv_input = @@ -152,11 +159,13 @@ void ConvAffineChannelFusePass::ApplyImpl(ir::Graph* graph) const { } void ConvEltwiseAddAffineChannelFusePass::ApplyImpl(ir::Graph* graph) const { - PADDLE_ENFORCE(graph); + PADDLE_ENFORCE_NOT_NULL( + graph, platform::errors::InvalidArgument("Graph cannot be nullptr.")); FusePassBase::Init(name_scope_, graph); auto* scope = param_scope(); - PADDLE_ENFORCE(scope); + PADDLE_ENFORCE_NOT_NULL( + scope, platform::errors::InvalidArgument("Scope cannot be nullptr.")); GraphPatternDetector gpd; auto* conv_input = diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc index 7313ef2cc35..60e4ac8cbcf 100644 --- a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc @@ -61,7 +61,12 @@ void recompute_bias_and_weights(const Scope* scope, Eigen::Array>; // Re-compute bias of conv2d from BN - PADDLE_ENFORCE_EQ(eltwise_y_in_tensor->dims(), bn_bias_tensor.dims()); + PADDLE_ENFORCE_EQ( + eltwise_y_in_tensor->dims(), bn_bias_tensor.dims(), + platform::errors::InvalidArgument("Tensor elementwise y(%d) and batch " + "norm bias(%d) must have same dims.", + eltwise_y_in_tensor->dims().size(), + bn_bias_tensor.dims().size())); auto* scale_tensor = scope->FindVar(bn_scale.Name())->GetMutable(); auto* variance_tensor = @@ -116,11 +121,13 @@ void recompute_bias_and_weights(const Scope* scope, } void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const { - PADDLE_ENFORCE(graph); + PADDLE_ENFORCE_NOT_NULL( + graph, platform::errors::InvalidArgument("Graph cannot be nullptr.")); FusePassBase::Init(name_scope_, graph); auto* scope = param_scope(); - PADDLE_ENFORCE(scope); + PADDLE_ENFORCE_NOT_NULL( + scope, platform::errors::InvalidArgument("Scope cannot be nullptr.")); GraphPatternDetector gpd; auto* conv_input = @@ -186,11 +193,18 @@ void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const { if (has_bias && conv->Op()->Input("Bias").size() > 0) { // reuse existing conv bias node auto conv_bias_names = conv->Op()->Input("Bias"); - PADDLE_ENFORCE_EQ(conv_bias_names.size(), 1UL); + PADDLE_ENFORCE_EQ( + conv_bias_names.size(), 1UL, + platform::errors::InvalidArgument("Find input var Bais error.")); auto* conv_bias_var = scope->FindVar(conv_bias_names[0]); auto* conv_bias_tensor = conv_bias_var->GetMutable(); - PADDLE_ENFORCE_EQ(conv_bias_tensor->dims(), - eltwise_y_in_tensor->dims()); + PADDLE_ENFORCE_EQ( + conv_bias_tensor->dims(), eltwise_y_in_tensor->dims(), + platform::errors::InvalidArgument( + "Tensor convolution bias(%d) and elementwise y(%d) " + "must have same dims.", + conv_bias_tensor->dims().size(), + eltwise_y_in_tensor->dims().size())); auto eigen_conv_bias = EigenVector::From(*conv_bias_tensor); eigen_conv_bias += EigenVector::From(*eltwise_y_in_tensor); @@ -236,11 +250,13 @@ void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const { } void ConvEltwiseAddBNFusePass::ApplyImpl(ir::Graph* graph) const { - PADDLE_ENFORCE(graph); + PADDLE_ENFORCE_NOT_NULL( + graph, platform::errors::InvalidArgument("Graph cannot be nullptr.")); FusePassBase::Init(name_scope_, graph); auto* scope = param_scope(); - PADDLE_ENFORCE(scope); + PADDLE_ENFORCE_NOT_NULL( + scope, platform::errors::InvalidArgument("Scope cannot be nullptr.")); GraphPatternDetector gpd; auto* conv_input = diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_bn_fuse_pass_tester.cc index 168d0afb26d..74dd6a7cdc5 100644 --- a/paddle/fluid/framework/ir/conv_bn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass_tester.cc @@ -71,8 +71,16 @@ void TestMain(const std::string& conv_type) { int num_bn_nodes_after = GetNumOpNodes(graph, "batch_norm"); VLOG(3) << DebugString(graph); - PADDLE_ENFORCE_EQ(num_bn_nodes_before, 1); - PADDLE_ENFORCE_EQ(num_bn_nodes_after, 0); + PADDLE_ENFORCE_EQ( + num_bn_nodes_before, 1, + platform::errors::InvalidArgument( + "Before conv_bn_fuse_pass, number of batch norm op(%d) must be 1.", + num_bn_nodes_before)); + PADDLE_ENFORCE_EQ( + num_bn_nodes_after, 0, + platform::errors::InvalidArgument( + "After conv_bn_fuse_pass, number of batch norm op(%d) must be 0.", + num_bn_nodes_after)); } TEST(ConvBNFusePass, conv2d) { TestMain("conv"); } diff --git a/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc index b00be79a2a7..2627da7dc40 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc @@ -91,7 +91,9 @@ void ConvElementwiseAdd2ActFusePass::ApplyImpl(ir::Graph* graph) const { auto* new_conv_op = graph->CreateOpNode(&new_op_desc); // Link inputs and outputs. - PADDLE_ENFORCE(subgraph.count(x)); + PADDLE_ENFORCE_NE( + subgraph.count(x), 0, + platform::errors::NotFound("Detector did not find input x of conv2d.")); auto* conv_in_node = subgraph.at(x); IR_NODE_LINK_TO(conv_in_node, new_conv_op); // Input diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc index b15871ef03f..0b454a0407e 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc @@ -78,7 +78,9 @@ void ConvElementwiseAddActFusePass::ApplyImpl(ir::Graph* graph) const { auto* new_conv_op = graph->CreateOpNode(&new_op_desc); // Link inputs and outputs. - PADDLE_ENFORCE(subgraph.count(x)); + PADDLE_ENFORCE_NE( + subgraph.count(x), 0, + platform::errors::NotFound("Detector did not find input x of conv2d.")); auto* conv_in_node = subgraph.at(x); IR_NODE_LINK_TO(conv_in_node, new_conv_op); // Input diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc index 8c491d4f58b..007770cf57d 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc @@ -66,7 +66,9 @@ void ConvElementwiseAddFusePass::ApplyImpl(ir::Graph* graph) const { auto* new_conv_op = graph->CreateOpNode(&new_op_desc); // Link inputs and outputs. - PADDLE_ENFORCE(subgraph.count(x)); + PADDLE_ENFORCE_NE( + subgraph.count(x), 0, + platform::errors::NotFound("Detector did not find input x of conv2d.")); auto* conv_in_node = subgraph.at(x); IR_NODE_LINK_TO(conv_in_node, new_conv_op); // Input diff --git a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc index 85e2f2bad32..c50b7476c6a 100644 --- a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc @@ -64,17 +64,23 @@ static int BuildFusion(Graph* graph, const std::string& name_scope, #undef SET_IN // Multiply embeddings with Weights - PADDLE_ENFORCE(scope); + PADDLE_ENFORCE_NOT_NULL( + scope, platform::errors::InvalidArgument("Scope cannot be nullptr.")); const std::string& embeddings = patterns::UniqueKey("Embeddings"); auto* embeddings_var = scope->Var(embeddings); - PADDLE_ENFORCE(embeddings_var); + PADDLE_ENFORCE_NOT_NULL( + embeddings_var, + platform::errors::InvalidArgument( + "Embeddings variable's pointer cannot be nullptr.")); auto* embeddings_tensor = embeddings_var->GetMutable(); // Get WeightX size: [single_embedding, fc_size] // and embedding size: [dict_size, single_embedding] // and create new size of embeddings eg. [dict_size , hidden_size] auto* embedding_var = scope->FindVar(W->Name()); - PADDLE_ENFORCE(embedding_var); + PADDLE_ENFORCE_NOT_NULL( + embedding_var, platform::errors::InvalidArgument( + "Embedding variable's pointer cannot be nullptr.")); const auto& embedding_tensor = embedding_var->Get(); const auto& weightx_tensor = @@ -90,7 +96,9 @@ static int BuildFusion(Graph* graph, const std::string& name_scope, // Adding biases to GEMM result to be auto* lstm_bias_var = scope->FindVar(bias->Name()); - PADDLE_ENFORCE(lstm_bias_var); + PADDLE_ENFORCE_NOT_NULL(lstm_bias_var, + platform::errors::InvalidArgument( + "Lstm bias var ptr cannot be nullptr.")); const auto& lstm_bias_tensor = lstm_bias_var->Get(); auto alpha = 1.0f; diff --git a/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass_tester.cc b/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass_tester.cc index c1f822d7ca5..51e9545bf92 100644 --- a/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass_tester.cc @@ -56,8 +56,17 @@ TEST(FCElementwiseLayerNormFusePass, basic) { GetNumOpNodes(graph, "fused_fc_elementwise_layernorm"); VLOG(3) << DebugString(graph); - PADDLE_ENFORCE_EQ(num_nodes_before, num_nodes_after + 6); - PADDLE_ENFORCE_EQ(num_fused_nodes_after, 1); + PADDLE_ENFORCE_EQ( + num_nodes_before, num_nodes_after + 6, + platform::errors::InvalidArgument( + "After pass, the number of nodes should be reduced by 6, but the " + "number before pass is %d, after pass is %d.", + num_nodes_before, num_nodes_after)); + PADDLE_ENFORCE_EQ(num_fused_nodes_after, 1, + platform::errors::InvalidArgument( + "After pass, the number of nodes of type " + "'fused_fc_elementwise_layernorm' should be 1, not %d.", + num_fused_nodes_after)); } } // namespace ir diff --git a/paddle/fluid/framework/ir/fc_fuse_pass.cc b/paddle/fluid/framework/ir/fc_fuse_pass.cc index 6a9c64e3a7f..066a8fb9757 100644 --- a/paddle/fluid/framework/ir/fc_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_fuse_pass.cc @@ -25,7 +25,8 @@ namespace framework { namespace ir { void FCFusePass::ApplyImpl(ir::Graph* graph) const { - PADDLE_ENFORCE_NOT_NULL(graph); + PADDLE_ENFORCE_NOT_NULL( + graph, platform::errors::InvalidArgument("Graph cannot be nullptr.")); FusePassBase::Init("fc_fuse", graph); int found_fc_count = 0; diff --git a/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc b/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc index dfae572d463..cf35c1ac772 100644 --- a/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc @@ -79,9 +79,17 @@ TEST(FCFusePass, basic) { int num_fc_nodes_after = GetNumOpNodes(graph, "fc"); VLOG(3) << DebugString(graph); - PADDLE_ENFORCE_EQ(num_nodes_before, num_nodes_after + 6); - PADDLE_ENFORCE_EQ(num_fc_nodes_after, 2); - PADDLE_ENFORCE_EQ(num_mul_nodes_before, num_fc_nodes_after); + PADDLE_ENFORCE_EQ(num_nodes_before, num_nodes_after + 6, + platform::errors::InvalidArgument( + "num_nodes_before=%d, num_nodes_after=%d.", + num_nodes_before, num_nodes_after)); + PADDLE_ENFORCE_EQ(num_fc_nodes_after, 2, + platform::errors::InvalidArgument("num_fc_nodes_after=%d.", + num_fc_nodes_after)); + PADDLE_ENFORCE_EQ(num_mul_nodes_before, num_fc_nodes_after, + platform::errors::InvalidArgument( + "num_mul_nodes_before=%d, num_fc_nodes_after=%d.", + num_mul_nodes_before, num_fc_nodes_after)); } } // namespace ir diff --git a/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc b/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc index d26998e6fc9..08dd0302b4b 100644 --- a/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc @@ -68,18 +68,27 @@ static int BuildFusion(Graph* graph, const std::string& name_scope, #undef SET_IMTERMEDIATE_OUT auto* op = graph->CreateOpNode(&op_desc); - PADDLE_ENFORCE(graph->Has(kParamScopeAttr)); + PADDLE_ENFORCE_EQ(graph->Has(kParamScopeAttr), true, + platform::errors::InvalidArgument( + "Graph have no attr kParamScopeAttr.")); auto& scope = graph->Get(kParamScopeAttr); if (with_fc_bias) { // Fusion GRU bias = fcbias + grubias auto* fusion_bias_var = scope.Var(NEW_NAME(bias) + bias->Name()); auto* out_bias_tensor = fusion_bias_var->GetMutable(); - PADDLE_ENFORCE(fusion_bias_var); + PADDLE_ENFORCE_NOT_NULL( + fusion_bias_var, + platform::errors::InvalidArgument( + "Fusion bias variable's pointer cannot be nullptr.")); auto* gru_bias_var = scope.FindVar(bias->Name()); auto* fc_bias_var = scope.FindVar(fc_bias->Name()); - PADDLE_ENFORCE(gru_bias_var); - PADDLE_ENFORCE(fc_bias_var); + PADDLE_ENFORCE_NOT_NULL(gru_bias_var, + platform::errors::InvalidArgument( + "Gru bias var ptr cannot be nullptr.")); + PADDLE_ENFORCE_NOT_NULL(fc_bias_var, + platform::errors::InvalidArgument( + "Fc bias var ptr cannot be nullptr.")); const auto& gru_bias_tenosr = gru_bias_var->Get(); const auto& fc_bias_tensor = fc_bias_var->Get(); // new bias = fc bias + gru bias diff --git a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc index 44306a72954..12c7fc051e2 100644 --- a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc @@ -52,13 +52,17 @@ int BuildFusion(Graph* graph, const std::string& name_scope, Scope* scope, #undef SET_IN if (with_fc_bias) { // Add FC-bias with LSTM-bias and create a new weight - PADDLE_ENFORCE(scope); + PADDLE_ENFORCE_NOT_NULL( + scope, platform::errors::InvalidArgument("Scope cannot be nullptr.")); const std::string& new_bias_var = patterns::UniqueKey("NewBias"); auto* bias_var = scope->Var(new_bias_var); - PADDLE_ENFORCE(bias_var); + PADDLE_ENFORCE_NOT_NULL(bias_var, platform::errors::InvalidArgument( + "Bias var ptr cannot be nullptr.")); auto* bias_tensor = bias_var->GetMutable(); auto* lstm_bias_var = scope->FindVar(bias->Name()); - PADDLE_ENFORCE(lstm_bias_var); + PADDLE_ENFORCE_NOT_NULL(lstm_bias_var, + platform::errors::InvalidArgument( + "Lstm bias var ptr cannot be nullptr.")); const auto& lstm_bias_tensor = lstm_bias_var->Get(); bias_tensor->Resize(lstm_bias_tensor.dims()); -- GitLab