From 9ffb43bef35865432469ba2b8707d923f2b0e5a7 Mon Sep 17 00:00:00 2001 From: ceci3 Date: Mon, 22 Nov 2021 11:28:52 +0800 Subject: [PATCH] Fix a bug of quantization (#36982) (#37381) * fix a quantization bug Co-authored-by: XGZhang <46363693+XGZhang11@users.noreply.github.com> --- .../fluid/contrib/slim/quantization/quantization_pass.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py index 90caee6c7a9..9b2954b13f2 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py @@ -1292,10 +1292,11 @@ class QuantizationFreezePass(object): var_type=output_var_node.type(), shape=output_var_node.shape(), var_dtype=output_var_node.dtype()) + x_num_col_dims = 1 + if op_node.name() in ['matmul', 'matmul_v2', 'mul']: + x_num_col_dims = len(op_node.outputs[0].shape()) - 1 if op_node.op().has_attr("x_num_col_dims"): x_num_col_dims = op_node.op().attr("x_num_col_dims") - else: - x_num_col_dims = 1 dequant_op_node = graph.create_op_node( op_type='fake_channel_wise_dequantize_max_abs', attrs={ -- GitLab