From 3896590b9119894acab56d2be54139d1eefb315b Mon Sep 17 00:00:00 2001 From: hong19860320 <9973393+hong19860320@users.noreply.github.com> Date: Fri, 20 Mar 2020 15:15:37 +0800 Subject: [PATCH] [Core] Fix the output_scale for the last quantized op (#3239) --- lite/core/mir/quantized_op_attributes_inference_pass.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lite/core/mir/quantized_op_attributes_inference_pass.cc b/lite/core/mir/quantized_op_attributes_inference_pass.cc index 54a4e779c6..40cad8f6af 100644 --- a/lite/core/mir/quantized_op_attributes_inference_pass.cc +++ b/lite/core/mir/quantized_op_attributes_inference_pass.cc @@ -58,6 +58,11 @@ void QuantizedOpAttributesInferencePass::Apply( } if (found) { inst.mutable_op_info()->SetAttr("output_scale", output_scale); + } else if (op_info->HasAttr("output_scale")) { + int bit_length = op_info->GetAttr("bit_length"); + int range = (1 << (bit_length - 1)) - 1; + output_scale = op_info->GetAttr("output_scale"); + inst.mutable_op_info()->SetAttr("output_scale", output_scale / range); } if (op_info->HasAttr("output_scale")) { inst.mutable_op_info()->SetAttr("enable_int8", true); -- GitLab