diff --git a/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc index 119917428997b03ecb0278fac5de677f0017b2bc..45ff275d530857690d1f169bbcf60a99952ae2c2 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc @@ -22,7 +22,8 @@ namespace framework { namespace ir { void ConvActivationFusePass::ApplyImpl(ir::Graph* graph) const { - PADDLE_ENFORCE_NOT_NULL(graph, "graph cannot be nullptr."); + PADDLE_ENFORCE_NOT_NULL( + graph, platform::errors::InvalidArgument("Graph cannot be nullptr.")); FusePassBase::Init("conv_activation_mkldnn_fuse", graph); GraphPatternDetector gpd; @@ -75,7 +76,8 @@ void ConvActivationFusePass::ApplyImpl(ir::Graph* graph) const { GraphSafeRemoveNodes(graph, {activation, conv_out}); PADDLE_ENFORCE_GT(subgraph.count(conv_input), 0UL, - "subgraph has to contain conv_input node."); + platform::errors::InvalidArgument( + "Subgraph has to contain conv input node.")); IR_NODE_LINK_TO(conv, activation_out); found_conv_activation_count++; }; diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc index bbfc8c005580bb949b498e4474c4059cd09f56b3..82e0af3c198750296032769f2f3b04658871adb7 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc @@ -26,7 +26,11 @@ namespace ir { template LoDTensor tensor_apply_eltwise(const LoDTensor& vec_a, const LoDTensor& vec_b, BinaryOperation f) { - PADDLE_ENFORCE_EQ(vec_a.dims(), vec_b.dims()); + PADDLE_ENFORCE_EQ(vec_a.dims(), vec_b.dims(), + platform::errors::InvalidArgument( + "Input two tensors must have same shape, but they are " + "different: %s, %s.", + vec_a.dims(), vec_b.dims())); LoDTensor vec_y; vec_y.Resize(vec_a.dims()); const float* a = vec_a.data(); @@ -39,11 +43,13 @@ LoDTensor tensor_apply_eltwise(const LoDTensor& vec_a, const LoDTensor& vec_b, } void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const { - PADDLE_ENFORCE(graph); + PADDLE_ENFORCE_NOT_NULL( + graph, platform::errors::InvalidArgument("Graph cannot be nullptr.")); FusePassBase::Init(name_scope_, graph); auto* scope = param_scope(); - PADDLE_ENFORCE(scope); + PADDLE_ENFORCE_NOT_NULL( + scope, platform::errors::InvalidArgument("Scope cannot be nullptr.")); GraphPatternDetector gpd; auto* conv_input = @@ -68,7 +74,9 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const { // elementwise_add op GET_IR_NODE_FROM_SUBGRAPH(eltwise, eltwise, conv_bias_pattern); - PADDLE_ENFORCE(subgraph.count(conv_input)); + PADDLE_ENFORCE_NE( + subgraph.count(conv_input), 0, + platform::errors::NotFound("Detector did not find conv input.")); // check if fuse can be done and if MKL-DNN should be used FuseOptions fuse_option = FindFuseOption(*conv, *eltwise); @@ -86,10 +94,16 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const { if (has_bias && conv->Op()->Input("Bias").size() > 0) { auto conv_bias_names = conv->Op()->Input("Bias"); // add eltwise bias to existing conv bias - PADDLE_ENFORCE_EQ(conv_bias_names.size(), 1); + PADDLE_ENFORCE_EQ(conv_bias_names.size(), 1, + platform::errors::NotFound("Can not find var Bias.")); auto* conv_bias_var = scope->FindVar(conv_bias_names[0]); auto* conv_bias_tensor = conv_bias_var->GetMutable(); - PADDLE_ENFORCE_EQ(conv_bias_tensor->dims(), eltwise_bias_tensor->dims()); + PADDLE_ENFORCE_EQ( + conv_bias_tensor->dims(), eltwise_bias_tensor->dims(), + platform::errors::InvalidArgument( + "Conv bias tensor and eltwise bias tensor " + "must have same shape, but they are different: %s, %s.", + conv_bias_tensor->dims(), eltwise_bias_tensor->dims())); *conv_bias_tensor = tensor_apply_eltwise( *conv_bias_tensor, *eltwise_bias_tensor, std::plus()); diff --git a/paddle/fluid/framework/ir/mkldnn/conv_concat_relu_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_concat_relu_mkldnn_fuse_pass.cc index 9e8f0f0c46cee250e4e425cc636467d89171fa84..af64cb22054e9f2ea751bb993a39e8be563ae458 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_concat_relu_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_concat_relu_mkldnn_fuse_pass.cc @@ -39,7 +39,10 @@ void ConvConcatReLUFusePass::FindConcatWithConvs( for (auto node : concat_inputs) { auto prev_op_node = node->inputs; - PADDLE_ENFORCE_EQ(prev_op_node.size(), 1); + PADDLE_ENFORCE_EQ(prev_op_node.size(), 1, + platform::errors::InvalidArgument( + "Node(%s) input size(%d) must be 1.", node->Name(), + prev_op_node.size())); auto* conv_op = prev_op_node[0]; if (conv_op->Op()->Type() != "conv2d") return; @@ -103,7 +106,8 @@ void ConvConcatReLUFusePass::FuseConvConcatReLU( } void ConvConcatReLUFusePass::ApplyImpl(ir::Graph* graph) const { - PADDLE_ENFORCE(graph); + PADDLE_ENFORCE_NOT_NULL( + graph, platform::errors::InvalidArgument("Graph cannot be nullptr.")); FusePassBase::Init(name_scope_, graph); std::unordered_map concat_with_convs_counter; diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc index 9881f7f9e56fd3815896a8b574563e48d998944e..23419d5b9e0a20adcb6245a5a5aa4c5c4b5f3a34 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc @@ -68,10 +68,10 @@ void CPUQuantizePass::QuantizeInput(Graph* g, Node* op, Node* input, auto inputs = op->Op()->InputNames(); bool name_found = std::find(inputs.begin(), inputs.end(), input_name) != inputs.end(); - PADDLE_ENFORCE_EQ( - name_found, true, - platform::errors::InvalidArgument("%s isn't the input of the %s operator", - input_name, op->Op()->Type())); + PADDLE_ENFORCE_EQ(name_found, true, + platform::errors::InvalidArgument( + "Var(%s) isn't the input of the %s operator.", + input_name, op->Op()->Type())); unsigned max = is_unsigned ? U8_MAX : S8_MAX; float scale = scale_to_one * max; @@ -110,8 +110,14 @@ void CPUQuantizePass::QuantizeInputs(Graph* g, Node* op, std::string input_name, std::string scale_attr_name) const { auto inputs = op->inputs; auto output = op->outputs[0]; - PADDLE_ENFORCE_GE(inputs.size(), 1); - PADDLE_ENFORCE_EQ(op->outputs.size(), 1); + PADDLE_ENFORCE_GE(inputs.size(), 1, + platform::errors::InvalidArgument( + "OP(%s)'s inputs(%d) must be equal or greater than 1.", + op->Name(), inputs.size())); + PADDLE_ENFORCE_EQ(op->outputs.size(), 1, + platform::errors::InvalidArgument( + "OP(%s)'s outputs(%d) must be equal to 1.", op->Name(), + op->outputs.size())); // create a quantize op desc prototype OpDesc q_desc; @@ -159,8 +165,8 @@ void CPUQuantizePass::DequantizeOutput(Graph* g, Node* op, Node* output, std::find(outputs.begin(), outputs.end(), output_name) != outputs.end(); PADDLE_ENFORCE_EQ(name_found, true, platform::errors::InvalidArgument( - "%s isn't the output of the %s operator", output_name, - op->Op()->Type())); + "Var(%s) isn't the output of the %s operator.", + output_name, op->Op()->Type())); unsigned max = is_unsigned ? U8_MAX : S8_MAX; float scale = scale_to_one * max; @@ -682,10 +688,12 @@ void CPUQuantizePass::QuantizeMatmul(Graph* graph) const { bool is_x_unsigned{false}, is_y_unsigned{false}; auto input_x_scale = GetScaleValueForNode(matmul_in_x, &is_x_unsigned); auto input_y_scale = GetScaleValueForNode(matmul_in_y, &is_y_unsigned); - PADDLE_ENFORCE_EQ( - is_x_unsigned, is_y_unsigned, - platform::errors::InvalidArgument( - "Matmul inputs should have the same value of is_unsigned")); + PADDLE_ENFORCE_EQ(is_x_unsigned, is_y_unsigned, + platform::errors::InvalidArgument( + "Matmul inputs should have the same " + "attribute of signed/unsigned, but they " + "are different: x(%d), y(%d).", + is_x_unsigned, is_y_unsigned)); QuantizeInput(g, matmul_op, matmul_in_x, "X", input_x_scale, is_x_unsigned, "Scale_x"); QuantizeInput(g, matmul_op, matmul_in_y, "Y", input_y_scale, is_y_unsigned, @@ -785,10 +793,12 @@ void CPUQuantizePass::QuantizeElementwiseAdd(Graph* graph) const { void CPUQuantizePass::ApplyImpl(ir::Graph* graph) const { VLOG(3) << "Quantizing the graph."; - PADDLE_ENFORCE(graph); + PADDLE_ENFORCE_NOT_NULL( + graph, platform::errors::InvalidArgument("Graph cannot be nullptr.")); FusePassBase::Init(name_scope_, graph); - PADDLE_ENFORCE(param_scope()); + PADDLE_ENFORCE_NOT_NULL(param_scope(), platform::errors::InvalidArgument( + "Scope cannot be nullptr.")); QuantizeConv(graph, false /* with_residual_data */); QuantizeConv(graph, true /* with_residual_data */); diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc index 130ba44ff64c77e9a968200f58719b123b6f4b76..bc24c10d9d0ae545d0dc71160d66e02a9fdbd730 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc @@ -75,7 +75,7 @@ void CPUQuantizeSquashPass::DequantQuantSquash( BOOST_GET_CONST(float, quant_op->Op()->GetAttr("Scale")); PADDLE_ENFORCE_NE( nodes_keep_counter->find(dequant_out), nodes_keep_counter->end(), - platform::errors::NotFound("The dequant output node is not found")); + platform::errors::NotFound("The dequant output node is not found.")); // check if dequantize op should be kept or removed, decrease the counter bool keep_dequant = (*nodes_keep_counter)[dequant_out]-- > 1; @@ -153,8 +153,9 @@ void CPUQuantizeSquashPass::OpRequantSquash(Graph* graph) const { PADDLE_ENFORCE_NE( any_op_output_name.empty(), true, - platform::errors::NotFound("Operator before requantize operator " - "should have requantize input as output")); + platform::errors::NotFound("Operator before requantize operator(%s) " + "should have requantize input as output.", + requant_in->Name())); float requant_scale_out = BOOST_GET_CONST(float, requant_op->Op()->GetAttr("Scale_out")); @@ -195,10 +196,11 @@ void CPUQuantizeSquashPass::RequantOpSquash(Graph* graph) const { for (auto input_name : any_op->Op()->Input(name)) if (input_name == requant_out->Name()) any_op_input_name = name; - PADDLE_ENFORCE_NE( - any_op_input_name.empty(), true, - platform::errors::NotFound("The operator after requantize operator " - "should have requantize output as input")); + PADDLE_ENFORCE_NE(any_op_input_name.empty(), true, + platform::errors::NotFound( + "The operator after requantize operator(%s) " + "should have requantize output as input.", + requant_out->Name())); float requant_scale_in = boost::get(requant_op->Op()->GetAttr("Scale_in")); @@ -206,11 +208,14 @@ void CPUQuantizeSquashPass::RequantOpSquash(Graph* graph) const { if (any_op->Op()->Type() == "matmul") scale_name = any_op_input_name == "X" ? "Scale_x" : "Scale_y"; - PADDLE_ENFORCE_EQ(requant_op->Op()->GetAttrIfExists("Scale_out"), - any_op->Op()->GetAttrIfExists(scale_name), - platform::errors::InvalidArgument( - "The operator after requantize should have input " - "scale equal to requantize output scale")); + PADDLE_ENFORCE_EQ( + requant_op->Op()->GetAttrIfExists("Scale_out"), + any_op->Op()->GetAttrIfExists(scale_name), + platform::errors::InvalidArgument( + "The operator after requantize should have input " + "scale(%f) equal to requantize output scale(%f).", + any_op->Op()->GetAttrIfExists(scale_name), + requant_op->Op()->GetAttrIfExists("Scale_out"))); any_op->Op()->SetAttr(scale_name, requant_scale_in); any_op->Op()->SetInput(any_op_input_name, std::vector({requant_in->Name()})); @@ -286,8 +291,9 @@ void CPUQuantizeSquashPass::MultipleQuantizeSquash(Graph* graph) const { auto* first_quant_out = first_quant_op->outputs[0]; float scale = first_quant_op->Op()->GetAttrIfExists("Scale"); - PADDLE_ENFORCE_NE(scale, 0, platform::errors::InvalidArgument( - "Quantize scale should not be equal 0")); + PADDLE_ENFORCE_NE(scale, 0, + platform::errors::InvalidArgument( + "Quantize scale(%f) should not be equal 0.", scale)); for (int iter = prev_out->outputs.size() - 1; iter >= 0; iter--) { auto quant_op = prev_out->outputs[iter]; @@ -304,8 +310,9 @@ void CPUQuantizeSquashPass::MultipleQuantizeSquash(Graph* graph) const { PADDLE_ENFORCE_NE( last_op_input_name.empty(), true, - platform::errors::NotFound("Operator after quantize operator " - "should has quantize output as input")); + platform::errors::NotFound("Operator after quantize operator(%s) " + "should has quantize output as input.", + quant_out->Name())); last_op->Op()->SetInput( last_op_input_name, std::vector({first_quant_out->Name()})); @@ -345,10 +352,12 @@ void CPUQuantizeSquashPass::DequantScaleSquash(Graph* graph) const { PADDLE_ENFORCE_GT(dequant_scale, 0.0f, platform::errors::InvalidArgument( - "Dequantize scale should have positive value")); + "Dequantize scale(%f) should have positive value.", + dequant_scale)); PADDLE_ENFORCE_GT(scale_scale, 0.0f, platform::errors::InvalidArgument( - "Scale of scale op should have positive value")); + "Scale(%f) of scale op should have positive value.", + scale_scale)); dequant_op->Op()->SetAttr("Scale", dequant_scale / scale_scale); dequant_op->Op()->SetOutput( @@ -367,8 +376,8 @@ void CPUQuantizeSquashPass::DequantScaleSquash(Graph* graph) const { void CPUQuantizeSquashPass::ApplyImpl(ir::Graph* graph) const { PADDLE_ENFORCE_NOT_NULL( graph, - platform::errors::NotFound( - "The graph in function CPUQuantizeSquashPass::ApplyImpl is null")); + platform::errors::InvalidArgument( + "The graph in function CPUQuantizeSquashPass::ApplyImpl is null.")); FusePassBase::Init("cpu_quantize_squash_pass", graph); std::unordered_map nodes_keep_counter; diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc index 9b827fdf6fef1788fafd5595a2705e9df1b2e720..37af0274ea8a2046a7c4376f3ffaa1091f3d4b04 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc @@ -57,7 +57,7 @@ void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name, PADDLE_ENFORCE_EQ(inputs.size(), 2UL, platform::errors::InvalidArgument( "The fc inputs should contain input and weights, but " - "now the size of inputs is %d", + "now the size of inputs is %d.", inputs.size())); op->SetInput("W", {inputs[1]}); op->SetOutput("Out", outputs); diff --git a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc index e854559ae7a8765da604c2043e8e4e8cedbbcf88..c5965701a53d4312d89f1e09f17840b09f1bd5f5 100644 --- a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc @@ -19,14 +19,17 @@ namespace paddle { namespace framework { namespace ir { -#define GET_NODE(id, pattern) \ - PADDLE_ENFORCE(subgraph.count(pattern.RetrieveNode(#id)), \ - "pattern has no Node called %s", #id); \ - auto* id = subgraph.at(pattern.RetrieveNode(#id)); \ - PADDLE_ENFORCE_NOT_NULL(id, "subgraph has no node %s", #id); +#define GET_NODE(id, pattern) \ + PADDLE_ENFORCE_NE(subgraph.count(pattern.RetrieveNode(#id)), 0, \ + platform::errors::InvalidArgument( \ + "Pattern has no Node called %s.", #id)); \ + auto* id = subgraph.at(pattern.RetrieveNode(#id)); \ + PADDLE_ENFORCE_NOT_NULL( \ + id, platform::errors::InvalidArgument("Subgraph has no node %s.", #id)); void DepthwiseConvMKLDNNPass::ApplyImpl(ir::Graph* graph) const { - PADDLE_ENFORCE(graph); + PADDLE_ENFORCE_NOT_NULL( + graph, platform::errors::InvalidArgument("Graph cannot be nullptr.")); FusePassBase::Init("depthwise_conv_mkldnn_pass", graph); GraphPatternDetector gpd; diff --git a/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc index 0d720e828b6d02aba253f5d52e8101ca4e7efb89..6c87e437caa1b159c889a68b4d6f5b1790217ca1 100644 --- a/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc @@ -46,12 +46,15 @@ void ScaleMatmulFusePass::ApplyImpl(ir::Graph* graph) const { if (scale_op->Op()->GetAttrIfExists("bias") == 0.0) { auto matmul_alpha = matmul_op->Op()->GetAttrIfExists("alpha"); auto scale_scale = scale_op->Op()->GetAttrIfExists("scale"); - PADDLE_ENFORCE_GT(matmul_alpha, 0.0f, - platform::errors::InvalidArgument( - "Alpha of matmul op should have positive value")); + PADDLE_ENFORCE_GT( + matmul_alpha, 0.0f, + platform::errors::InvalidArgument( + "Alpha(%f) of matmul op should have positive value.", + matmul_alpha)); PADDLE_ENFORCE_GT(scale_scale, 0.0f, platform::errors::InvalidArgument( - "Scale of scale op should have positive value")); + "Scale(%f) of scale op should have positive value.", + scale_scale)); std::string matmul_op_input_name; for (auto name : matmul_op->Op()->InputNames()) @@ -60,8 +63,9 @@ void ScaleMatmulFusePass::ApplyImpl(ir::Graph* graph) const { PADDLE_ENFORCE_NE( matmul_op_input_name.empty(), true, - platform::errors::NotFound("Operator after scale operator " - "should have scale output as input")); + platform::errors::NotFound("Operator after scale operator(%s) " + "should have scale output as input.", + scale_out->Name())); matmul_op->Op()->SetAttr("alpha", matmul_alpha * scale_scale); matmul_op->Op()->SetInput(matmul_op_input_name, std::vector({scale_in->Name()}));