From cb6c0e213339a59728e9571d3dc62686e2acf879 Mon Sep 17 00:00:00 2001 From: XGZhang <46363693+XGZhang11@users.noreply.github.com> Date: Thu, 4 Nov 2021 19:48:31 +0800 Subject: [PATCH] Fix a bug of quantization (#36982) * fix a quantization bug --- .../fluid/contrib/slim/quantization/quantization_pass.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py index 90caee6c7a..9b2954b13f 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py @@ -1292,10 +1292,11 @@ class QuantizationFreezePass(object): var_type=output_var_node.type(), shape=output_var_node.shape(), var_dtype=output_var_node.dtype()) + x_num_col_dims = 1 + if op_node.name() in ['matmul', 'matmul_v2', 'mul']: + x_num_col_dims = len(op_node.outputs[0].shape()) - 1 if op_node.op().has_attr("x_num_col_dims"): x_num_col_dims = op_node.op().attr("x_num_col_dims") - else: - x_num_col_dims = 1 dequant_op_node = graph.create_op_node( op_type='fake_channel_wise_dequantize_max_abs', attrs={ -- GitLab