未验证 提交 e65c5b8e 编写于 作者: W wanghuancoder 提交者: GitHub

fix some errmsg report, in framework/ir/ (#25471)

* fix paddle/fluid/framework/ir/ error msg reoprt, test=develop

* modify error msg reoprt in ir/, about errortype, grammar, supplementary infor, test=develop

* modified some unclear descriptions, test=develop

* Modify the problem that report msg is less than 20 characters, test=develop
上级 71c71e68
......@@ -135,7 +135,9 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input,
void PrepareParameters(Graph* graph, const Param& param, ir::Node* lstm_op) {
// Check parameters
PADDLE_ENFORCE(graph->Has(kParamScopeAttr));
PADDLE_ENFORCE_EQ(graph->Has(kParamScopeAttr), true,
platform::errors::InvalidArgument(
"Graph have no attribute: kParamScopeAttr."));
auto& scope = graph->Get<Scope>(kParamScopeAttr);
// Create new parameters.
......@@ -193,7 +195,10 @@ void PrepareParameters(Graph* graph, const Param& param, ir::Node* lstm_op) {
// reshape attention_bias
auto* attention_bias_t =
scope.FindVar(param.AttentionBias)->GetMutable<LoDTensor>();
PADDLE_ENFORCE_EQ(attention_bias_t->dims().size(), 1);
PADDLE_ENFORCE_EQ(attention_bias_t->dims().size(), 1,
platform::errors::InvalidArgument(
"Tensor attention bias dimension size(%d) must be 1.",
attention_bias_t->dims().size()));
attention_bias_t->Resize(make_ddim({1, attention_bias_t->dims()[0]}));
auto* attention_scalar_bias_t =
......@@ -252,7 +257,10 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input,
B_forget.data<float>(), B_input.data<float>(), B_output.data<float>(),
B_cell.data<float>()};
PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1);
PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1,
platform::errors::InvalidArgument(
"Tensor B forget dimension size(%d) must be 1.",
B_forget.dims().size()));
int D = B_forget.dims()[0];
out->Resize(make_ddim({1, 4 * D}));
auto* out_data = out->mutable_data<float>(platform::CPUPlace());
......
......@@ -119,9 +119,11 @@ class CoalesceGradTensorPass : public ir::Pass {
p_g_dense_grad.insert(p_g_dense_grad.end(), group_p_g.begin(),
group_p_g.end());
}
PADDLE_ENFORCE_EQ(
p_g_dense_grad.size(), num_of_p_g_dense_grad,
"The number of p_g_dense_grad is not consistent with before.");
PADDLE_ENFORCE_EQ(p_g_dense_grad.size(), num_of_p_g_dense_grad,
platform::errors::InvalidArgument(
"The number of dense grads is not consistent with "
"previous. Previous(%d), now(%d).",
p_g_dense_grad.size(), num_of_p_g_dense_grad));
auto &pinned_var_set =
graph->GetOrInit<details::PinnedVars>(details::kPinnedVars);
......@@ -131,8 +133,11 @@ class CoalesceGradTensorPass : public ir::Pass {
} else {
for (auto &sub_param_grad : group_params_grads) {
RecordGradients(p_g_dense_grad, vars_info, &pinned_var_set);
PADDLE_ENFORCE_EQ(IsUnifiedDtype(sub_param_grad, vars_info), true,
"The data type of the same group is not consistent.");
PADDLE_ENFORCE_EQ(
IsUnifiedDtype(sub_param_grad, vars_info), true,
platform::errors::InvalidArgument("All gradient variable in "
"kGroupParamsAndDenseGrads, must "
"have same type."));
CoalesceTensors(vars_info, sub_param_grad, &result);
}
}
......@@ -145,15 +150,25 @@ class CoalesceGradTensorPass : public ir::Pass {
// The Gradients should not be reused during memory optimization.
for (auto &p_g : sub_param_grad) {
auto iter = vars_info.find(p_g.second);
PADDLE_ENFORCE_EQ(iter != vars_info.end(), true, "%s is not found.",
p_g.second);
PADDLE_ENFORCE_EQ(!iter->second.empty(), true);
PADDLE_ENFORCE_EQ(iter != vars_info.end(), true,
platform::errors::NotFound(
"Parameter@Grad %s is not found.", p_g.second));
PADDLE_ENFORCE_EQ(
!iter->second.empty(), true,
platform::errors::InvalidArgument(
"Parameter@Grad %s's var node is empty.", p_g.second));
for (auto it : iter->second) {
PADDLE_ENFORCE_NOT_NULL(it->Var());
PADDLE_ENFORCE_NOT_NULL(
it->Var(),
platform::errors::InvalidArgument(
"A node of Parameter@Grad %s does not hold variable.",
p_g.second));
pinned_var_set->insert(it->Var()->Name());
}
PADDLE_ENFORCE_EQ(IsLoDTensorType(GetTypeOfVar(vars_info, p_g.second)),
true);
true,
platform::errors::InvalidArgument(
"Parameter@Grad %s is not LoDTensor.", p_g.second));
}
}
......@@ -192,8 +207,10 @@ class CoalesceGradTensorPass : public ir::Pass {
auto fused_grad_var_name = std::string(details::kFusedVarNamePrefix) +
"@GRAD@" + params_grads.begin()->second;
auto &fused_var_set = result->Get<details::FusedVars>(details::kFusedVars);
PADDLE_ENFORCE_EQ(fused_var_set.count(fused_grad_var_name), 0,
"%s is duplicate in FusedVars.", fused_grad_var_name);
PADDLE_ENFORCE_EQ(
fused_var_set.count(fused_grad_var_name), 0,
platform::errors::AlreadyExists("Var(%s) is duplicate in FusedVars.",
fused_grad_var_name));
fused_var_set.insert(fused_grad_var_name);
result->Get<details::FusedGrads>(details::kFusedGrads)
.emplace_back(fused_grad_var_name);
......@@ -420,11 +437,16 @@ class CoalesceGradTensorPass : public ir::Pass {
const std::unordered_map<std::string, std::vector<Node *>> &vars_info,
const std::string &var_name) const {
auto grad_iter = vars_info.find(var_name);
PADDLE_ENFORCE_EQ(grad_iter != vars_info.end(), true, "%s is not found.",
var_name);
PADDLE_ENFORCE_EQ(!grad_iter->second.empty(), true, "%s is not found.",
var_name);
PADDLE_ENFORCE_NOT_NULL(grad_iter->second.front()->Var());
PADDLE_ENFORCE_EQ(
grad_iter != vars_info.end(), true,
platform::errors::NotFound("Variable %s is not found.", var_name));
PADDLE_ENFORCE_EQ(!grad_iter->second.empty(), true,
platform::errors::InvalidArgument(
"Variable %s's node is empty.", var_name));
PADDLE_ENFORCE_NOT_NULL(
grad_iter->second.front()->Var(),
platform::errors::InvalidArgument(
"A node of %s does not hold variable.", var_name));
return grad_iter->second.front()->Var();
}
......@@ -464,7 +486,12 @@ class CoalesceGradTensorPass : public ir::Pass {
params_name.emplace_back(p_g.first);
grads_name.emplace_back(p_g.second);
auto next_dtype = GetDtypeOfVar(vars_info, p_g.second);
PADDLE_ENFORCE_EQ(next_dtype, dtype);
PADDLE_ENFORCE_EQ(
next_dtype, dtype,
platform::errors::InvalidArgument(
"All Parameter@Grad should have same dtype, but "
"there are two different type: %s, %s.",
DataTypeToString(next_dtype), DataTypeToString(dtype)));
}
result->Get<details::ProgramDescs>(details::kProgramDescs).emplace_back();
......
......@@ -50,7 +50,12 @@ void recompute_bias_and_weights(const Scope* scope, ir::Node* conv_weight,
Eigen::Array<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>;
// Re-compute bias of conv2d from AffineChannel
PADDLE_ENFORCE_EQ(eltwise_y_in_tensor->dims(), ac_bias_tensor.dims());
PADDLE_ENFORCE_EQ(
eltwise_y_in_tensor->dims(), ac_bias_tensor.dims(),
platform::errors::InvalidArgument(
"Tensor elementwise y(%d) and activation bias(%d) must have same "
"dimension.",
eltwise_y_in_tensor->dims().size(), ac_bias_tensor.dims().size()));
auto* scale_tensor = scope->FindVar(ac_scale.Name())->GetMutable<LoDTensor>();
......@@ -78,11 +83,13 @@ void recompute_bias_and_weights(const Scope* scope, ir::Node* conv_weight,
}
void ConvAffineChannelFusePass::ApplyImpl(ir::Graph* graph) const {
PADDLE_ENFORCE(graph);
PADDLE_ENFORCE_NOT_NULL(
graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
auto* scope = param_scope();
PADDLE_ENFORCE(scope);
PADDLE_ENFORCE_NOT_NULL(
scope, platform::errors::InvalidArgument("Scope cannot be nullptr."));
GraphPatternDetector gpd;
auto* conv_input =
......@@ -152,11 +159,13 @@ void ConvAffineChannelFusePass::ApplyImpl(ir::Graph* graph) const {
}
void ConvEltwiseAddAffineChannelFusePass::ApplyImpl(ir::Graph* graph) const {
PADDLE_ENFORCE(graph);
PADDLE_ENFORCE_NOT_NULL(
graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
auto* scope = param_scope();
PADDLE_ENFORCE(scope);
PADDLE_ENFORCE_NOT_NULL(
scope, platform::errors::InvalidArgument("Scope cannot be nullptr."));
GraphPatternDetector gpd;
auto* conv_input =
......
......@@ -61,7 +61,12 @@ void recompute_bias_and_weights(const Scope* scope,
Eigen::Array<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>;
// Re-compute bias of conv2d from BN
PADDLE_ENFORCE_EQ(eltwise_y_in_tensor->dims(), bn_bias_tensor.dims());
PADDLE_ENFORCE_EQ(
eltwise_y_in_tensor->dims(), bn_bias_tensor.dims(),
platform::errors::InvalidArgument("Tensor elementwise y(%d) and batch "
"norm bias(%d) must have same dims.",
eltwise_y_in_tensor->dims().size(),
bn_bias_tensor.dims().size()));
auto* scale_tensor = scope->FindVar(bn_scale.Name())->GetMutable<LoDTensor>();
auto* variance_tensor =
......@@ -116,11 +121,13 @@ void recompute_bias_and_weights(const Scope* scope,
}
void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const {
PADDLE_ENFORCE(graph);
PADDLE_ENFORCE_NOT_NULL(
graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
auto* scope = param_scope();
PADDLE_ENFORCE(scope);
PADDLE_ENFORCE_NOT_NULL(
scope, platform::errors::InvalidArgument("Scope cannot be nullptr."));
GraphPatternDetector gpd;
auto* conv_input =
......@@ -186,11 +193,18 @@ void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const {
if (has_bias && conv->Op()->Input("Bias").size() > 0) {
// reuse existing conv bias node
auto conv_bias_names = conv->Op()->Input("Bias");
PADDLE_ENFORCE_EQ(conv_bias_names.size(), 1UL);
PADDLE_ENFORCE_EQ(
conv_bias_names.size(), 1UL,
platform::errors::InvalidArgument("Find input var Bais error."));
auto* conv_bias_var = scope->FindVar(conv_bias_names[0]);
auto* conv_bias_tensor = conv_bias_var->GetMutable<LoDTensor>();
PADDLE_ENFORCE_EQ(conv_bias_tensor->dims(),
eltwise_y_in_tensor->dims());
PADDLE_ENFORCE_EQ(
conv_bias_tensor->dims(), eltwise_y_in_tensor->dims(),
platform::errors::InvalidArgument(
"Tensor convolution bias(%d) and elementwise y(%d) "
"must have same dims.",
conv_bias_tensor->dims().size(),
eltwise_y_in_tensor->dims().size()));
auto eigen_conv_bias = EigenVector<float>::From(*conv_bias_tensor);
eigen_conv_bias += EigenVector<float>::From(*eltwise_y_in_tensor);
......@@ -236,11 +250,13 @@ void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const {
}
void ConvEltwiseAddBNFusePass::ApplyImpl(ir::Graph* graph) const {
PADDLE_ENFORCE(graph);
PADDLE_ENFORCE_NOT_NULL(
graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
auto* scope = param_scope();
PADDLE_ENFORCE(scope);
PADDLE_ENFORCE_NOT_NULL(
scope, platform::errors::InvalidArgument("Scope cannot be nullptr."));
GraphPatternDetector gpd;
auto* conv_input =
......
......@@ -71,8 +71,16 @@ void TestMain(const std::string& conv_type) {
int num_bn_nodes_after = GetNumOpNodes(graph, "batch_norm");
VLOG(3) << DebugString(graph);
PADDLE_ENFORCE_EQ(num_bn_nodes_before, 1);
PADDLE_ENFORCE_EQ(num_bn_nodes_after, 0);
PADDLE_ENFORCE_EQ(
num_bn_nodes_before, 1,
platform::errors::InvalidArgument(
"Before conv_bn_fuse_pass, number of batch norm op(%d) must be 1.",
num_bn_nodes_before));
PADDLE_ENFORCE_EQ(
num_bn_nodes_after, 0,
platform::errors::InvalidArgument(
"After conv_bn_fuse_pass, number of batch norm op(%d) must be 0.",
num_bn_nodes_after));
}
TEST(ConvBNFusePass, conv2d) { TestMain("conv"); }
......
......@@ -91,7 +91,9 @@ void ConvElementwiseAdd2ActFusePass::ApplyImpl(ir::Graph* graph) const {
auto* new_conv_op = graph->CreateOpNode(&new_op_desc);
// Link inputs and outputs.
PADDLE_ENFORCE(subgraph.count(x));
PADDLE_ENFORCE_NE(
subgraph.count(x), 0,
platform::errors::NotFound("Detector did not find input x of conv2d."));
auto* conv_in_node = subgraph.at(x);
IR_NODE_LINK_TO(conv_in_node, new_conv_op); // Input
......
......@@ -78,7 +78,9 @@ void ConvElementwiseAddActFusePass::ApplyImpl(ir::Graph* graph) const {
auto* new_conv_op = graph->CreateOpNode(&new_op_desc);
// Link inputs and outputs.
PADDLE_ENFORCE(subgraph.count(x));
PADDLE_ENFORCE_NE(
subgraph.count(x), 0,
platform::errors::NotFound("Detector did not find input x of conv2d."));
auto* conv_in_node = subgraph.at(x);
IR_NODE_LINK_TO(conv_in_node, new_conv_op); // Input
......
......@@ -66,7 +66,9 @@ void ConvElementwiseAddFusePass::ApplyImpl(ir::Graph* graph) const {
auto* new_conv_op = graph->CreateOpNode(&new_op_desc);
// Link inputs and outputs.
PADDLE_ENFORCE(subgraph.count(x));
PADDLE_ENFORCE_NE(
subgraph.count(x), 0,
platform::errors::NotFound("Detector did not find input x of conv2d."));
auto* conv_in_node = subgraph.at(x);
IR_NODE_LINK_TO(conv_in_node, new_conv_op); // Input
......
......@@ -64,17 +64,23 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
#undef SET_IN
// Multiply embeddings with Weights
PADDLE_ENFORCE(scope);
PADDLE_ENFORCE_NOT_NULL(
scope, platform::errors::InvalidArgument("Scope cannot be nullptr."));
const std::string& embeddings = patterns::UniqueKey("Embeddings");
auto* embeddings_var = scope->Var(embeddings);
PADDLE_ENFORCE(embeddings_var);
PADDLE_ENFORCE_NOT_NULL(
embeddings_var,
platform::errors::InvalidArgument(
"Embeddings variable's pointer cannot be nullptr."));
auto* embeddings_tensor =
embeddings_var->GetMutable<framework::LoDTensor>();
// Get WeightX size: [single_embedding, fc_size]
// and embedding size: [dict_size, single_embedding]
// and create new size of embeddings eg. [dict_size , hidden_size]
auto* embedding_var = scope->FindVar(W->Name());
PADDLE_ENFORCE(embedding_var);
PADDLE_ENFORCE_NOT_NULL(
embedding_var, platform::errors::InvalidArgument(
"Embedding variable's pointer cannot be nullptr."));
const auto& embedding_tensor = embedding_var->Get<framework::LoDTensor>();
const auto& weightx_tensor =
......@@ -90,7 +96,9 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
// Adding biases to GEMM result to be
auto* lstm_bias_var = scope->FindVar(bias->Name());
PADDLE_ENFORCE(lstm_bias_var);
PADDLE_ENFORCE_NOT_NULL(lstm_bias_var,
platform::errors::InvalidArgument(
"Lstm bias var ptr cannot be nullptr."));
const auto& lstm_bias_tensor = lstm_bias_var->Get<framework::LoDTensor>();
auto alpha = 1.0f;
......
......@@ -56,8 +56,17 @@ TEST(FCElementwiseLayerNormFusePass, basic) {
GetNumOpNodes(graph, "fused_fc_elementwise_layernorm");
VLOG(3) << DebugString(graph);
PADDLE_ENFORCE_EQ(num_nodes_before, num_nodes_after + 6);
PADDLE_ENFORCE_EQ(num_fused_nodes_after, 1);
PADDLE_ENFORCE_EQ(
num_nodes_before, num_nodes_after + 6,
platform::errors::InvalidArgument(
"After pass, the number of nodes should be reduced by 6, but the "
"number before pass is %d, after pass is %d.",
num_nodes_before, num_nodes_after));
PADDLE_ENFORCE_EQ(num_fused_nodes_after, 1,
platform::errors::InvalidArgument(
"After pass, the number of nodes of type "
"'fused_fc_elementwise_layernorm' should be 1, not %d.",
num_fused_nodes_after));
}
} // namespace ir
......
......@@ -25,7 +25,8 @@ namespace framework {
namespace ir {
void FCFusePass::ApplyImpl(ir::Graph* graph) const {
PADDLE_ENFORCE_NOT_NULL(graph);
PADDLE_ENFORCE_NOT_NULL(
graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init("fc_fuse", graph);
int found_fc_count = 0;
......
......@@ -79,9 +79,17 @@ TEST(FCFusePass, basic) {
int num_fc_nodes_after = GetNumOpNodes(graph, "fc");
VLOG(3) << DebugString(graph);
PADDLE_ENFORCE_EQ(num_nodes_before, num_nodes_after + 6);
PADDLE_ENFORCE_EQ(num_fc_nodes_after, 2);
PADDLE_ENFORCE_EQ(num_mul_nodes_before, num_fc_nodes_after);
PADDLE_ENFORCE_EQ(num_nodes_before, num_nodes_after + 6,
platform::errors::InvalidArgument(
"num_nodes_before=%d, num_nodes_after=%d.",
num_nodes_before, num_nodes_after));
PADDLE_ENFORCE_EQ(num_fc_nodes_after, 2,
platform::errors::InvalidArgument("num_fc_nodes_after=%d.",
num_fc_nodes_after));
PADDLE_ENFORCE_EQ(num_mul_nodes_before, num_fc_nodes_after,
platform::errors::InvalidArgument(
"num_mul_nodes_before=%d, num_fc_nodes_after=%d.",
num_mul_nodes_before, num_fc_nodes_after));
}
} // namespace ir
......
......@@ -68,18 +68,27 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
#undef SET_IMTERMEDIATE_OUT
auto* op = graph->CreateOpNode(&op_desc);
PADDLE_ENFORCE(graph->Has(kParamScopeAttr));
PADDLE_ENFORCE_EQ(graph->Has(kParamScopeAttr), true,
platform::errors::InvalidArgument(
"Graph have no attr kParamScopeAttr."));
auto& scope = graph->Get<Scope>(kParamScopeAttr);
if (with_fc_bias) {
// Fusion GRU bias = fcbias + grubias
auto* fusion_bias_var = scope.Var(NEW_NAME(bias) + bias->Name());
auto* out_bias_tensor =
fusion_bias_var->GetMutable<framework::LoDTensor>();
PADDLE_ENFORCE(fusion_bias_var);
PADDLE_ENFORCE_NOT_NULL(
fusion_bias_var,
platform::errors::InvalidArgument(
"Fusion bias variable's pointer cannot be nullptr."));
auto* gru_bias_var = scope.FindVar(bias->Name());
auto* fc_bias_var = scope.FindVar(fc_bias->Name());
PADDLE_ENFORCE(gru_bias_var);
PADDLE_ENFORCE(fc_bias_var);
PADDLE_ENFORCE_NOT_NULL(gru_bias_var,
platform::errors::InvalidArgument(
"Gru bias var ptr cannot be nullptr."));
PADDLE_ENFORCE_NOT_NULL(fc_bias_var,
platform::errors::InvalidArgument(
"Fc bias var ptr cannot be nullptr."));
const auto& gru_bias_tenosr = gru_bias_var->Get<framework::LoDTensor>();
const auto& fc_bias_tensor = fc_bias_var->Get<framework::LoDTensor>();
// new bias = fc bias + gru bias
......
......@@ -52,13 +52,17 @@ int BuildFusion(Graph* graph, const std::string& name_scope, Scope* scope,
#undef SET_IN
if (with_fc_bias) {
// Add FC-bias with LSTM-bias and create a new weight
PADDLE_ENFORCE(scope);
PADDLE_ENFORCE_NOT_NULL(
scope, platform::errors::InvalidArgument("Scope cannot be nullptr."));
const std::string& new_bias_var = patterns::UniqueKey("NewBias");
auto* bias_var = scope->Var(new_bias_var);
PADDLE_ENFORCE(bias_var);
PADDLE_ENFORCE_NOT_NULL(bias_var, platform::errors::InvalidArgument(
"Bias var ptr cannot be nullptr."));
auto* bias_tensor = bias_var->GetMutable<framework::LoDTensor>();
auto* lstm_bias_var = scope->FindVar(bias->Name());
PADDLE_ENFORCE(lstm_bias_var);
PADDLE_ENFORCE_NOT_NULL(lstm_bias_var,
platform::errors::InvalidArgument(
"Lstm bias var ptr cannot be nullptr."));
const auto& lstm_bias_tensor = lstm_bias_var->Get<framework::LoDTensor>();
bias_tensor->Resize(lstm_bias_tensor.dims());
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册