From 5784dfe14bf273f00879c5451a35ee71caea932a Mon Sep 17 00:00:00 2001 From: Guanghua Yu <742925032@qq.com> Date: Wed, 10 Aug 2022 18:16:13 +0800 Subject: [PATCH] support conv2d_transpose quant in act (#1332) --- paddleslim/auto_compression/auto_strategy.py | 6 ++++-- paddleslim/auto_compression/strategy_config.py | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/paddleslim/auto_compression/auto_strategy.py b/paddleslim/auto_compression/auto_strategy.py index 451f6007..eab962ad 100644 --- a/paddleslim/auto_compression/auto_strategy.py +++ b/paddleslim/auto_compression/auto_strategy.py @@ -47,8 +47,10 @@ default_hpo_config = { # default quant config, can be used by ptq&hpo and qat&distillation default_quant_config = { - 'quantize_op_types': - ['conv2d', 'depthwise_conv2d', 'mul', 'matmul', 'matmul_v2'], + 'quantize_op_types': [ + 'conv2d', 'depthwise_conv2d', 'conv2d_transpose', 'mul', 'matmul', + 'matmul_v2' + ], 'weight_bits': 8, 'activation_bits': 8, "is_full_quantize": False, diff --git a/paddleslim/auto_compression/strategy_config.py b/paddleslim/auto_compression/strategy_config.py index aad5e23e..d8b3e90c 100644 --- a/paddleslim/auto_compression/strategy_config.py +++ b/paddleslim/auto_compression/strategy_config.py @@ -53,7 +53,8 @@ class BaseStrategy: class Quantization(BaseStrategy): def __init__(self, quantize_op_types=[ - 'conv2d', 'depthwise_conv2d', 'mul', 'matmul', 'matmul_v2' + 'conv2d', 'depthwise_conv2d', 'conv2d_transpose', 'mul', + 'matmul', 'matmul_v2' ], weight_bits=8, activation_bits=8, -- GitLab