From c07e3bb1cfa1a0a928353f7d6aab92a5d195f823 Mon Sep 17 00:00:00 2001 From: lidanqing Date: Thu, 9 Jun 2022 17:19:10 +0800 Subject: [PATCH] [Bug fix] Do not quantize weights Y when matmul X and Y both other ops outputs (#43297) * fix some matmul that X and Y both other ops outputs, do not dequantize the Y. * fix CI format * fix according to review --- .../fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc b/paddle/fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc index 808d043a4b..28363f8f16 100644 --- a/paddle/fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc @@ -354,10 +354,9 @@ bool QuantDequantMkldnnPass::IsInt8Weight( auto* op_desc = op_node->Op(); auto var_name = op_desc->Input(weight_name)[0]; auto* var = scope->FindVar(var_name); - PADDLE_ENFORCE_NOT_NULL( - var, platform::errors::NotFound( - "The input persistable [%s] var of [%s] op is not found.", - var_name, op_desc->Type())); + if (var == nullptr) { + return false; + } auto* weight_tensor = var->GetMutable(); auto* weight_data = weight_tensor->data(); bool is_int8 = true; -- GitLab