diff --git a/paddleslim/auto_compression/auto_strategy.py b/paddleslim/auto_compression/auto_strategy.py index 451f6007579dd96a15a753b97d29b165c930f97b..eab962add5804c2b3bb1bf965e1f97b2890f0041 100644 --- a/paddleslim/auto_compression/auto_strategy.py +++ b/paddleslim/auto_compression/auto_strategy.py @@ -47,8 +47,10 @@ default_hpo_config = { # default quant config, can be used by ptq&hpo and qat&distillation default_quant_config = { - 'quantize_op_types': - ['conv2d', 'depthwise_conv2d', 'mul', 'matmul', 'matmul_v2'], + 'quantize_op_types': [ + 'conv2d', 'depthwise_conv2d', 'conv2d_transpose', 'mul', 'matmul', + 'matmul_v2' + ], 'weight_bits': 8, 'activation_bits': 8, "is_full_quantize": False, diff --git a/paddleslim/auto_compression/strategy_config.py b/paddleslim/auto_compression/strategy_config.py index aad5e23e1a5d8dae0c39d9f392a885db1d64696a..d8b3e90ce73762fd7365e9b0951e6c404f484dd2 100644 --- a/paddleslim/auto_compression/strategy_config.py +++ b/paddleslim/auto_compression/strategy_config.py @@ -53,7 +53,8 @@ class BaseStrategy: class Quantization(BaseStrategy): def __init__(self, quantize_op_types=[ - 'conv2d', 'depthwise_conv2d', 'mul', 'matmul', 'matmul_v2' + 'conv2d', 'depthwise_conv2d', 'conv2d_transpose', 'mul', + 'matmul', 'matmul_v2' ], weight_bits=8, activation_bits=8,