diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py index f4620ff00013c85084484cb330f685a1b83c4cfc..66b11d1f17ad412de616f7053665a2045c09359e 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py @@ -468,7 +468,7 @@ class ImperativeQuantizeOutputs(object): """ Whether the layer needs to calculate output scales. """ - return isinstance(layer, tuple(utils.quant_output_layers_map.values())) \ + return isinstance(layer, utils.quant_output_layers) \ or ('quantized' in layer.full_name() and \ 'quantized_noweight' not in layer.full_name()) diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/utils.py b/python/paddle/fluid/contrib/slim/quantization/imperative/utils.py index f45eb8c97f419e4cfb331ca99ea30c27514b19cd..004e1c1aa9bc501c4a91be5b821ce505592f6910 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/utils.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/utils.py @@ -43,28 +43,18 @@ fake_quantize_dequantize_types = [ "fake_quantize_dequantize_moving_average_abs_max" ] -quant_output_layers_map = { - 'Conv2D': paddle.nn.Conv2D, - 'Conv2DTranspose': paddle.nn.Conv2DTranspose, - 'Linear': paddle.nn.Linear, - 'AdaptiveAvgPool2D': paddle.nn.AdaptiveAvgPool2D, - 'AdaptiveMaxPool2D': paddle.nn.AdaptiveMaxPool2D, - 'AvgPool2D': paddle.nn.AvgPool2D, - 'MaxPool2D': paddle.nn.MaxPool2D, - 'BatchNorm': paddle.nn.BatchNorm, - 'BatchNorm2D': paddle.nn.BatchNorm2D, - 'SyncBatchNorm': paddle.nn.SyncBatchNorm, - 'ELU': paddle.nn.ELU, - 'GELU': paddle.nn.GELU, - 'LeakyReLU': paddle.nn.LeakyReLU, - 'PReLU': paddle.nn.PReLU, - 'ReLU': paddle.nn.ReLU, - 'ReLU6': paddle.nn.ReLU6, - 'Sigmoid': paddle.nn.Sigmoid, - 'Softmax': paddle.nn.Softmax, - 'Tanh': paddle.nn.Tanh, - 'Swish': paddle.nn.Swish, -} +quant_output_layers = ( + paddle.nn.Conv2D, paddle.nn.Conv2DTranspose, paddle.nn.Linear, + paddle.nn.AdaptiveAvgPool2D, paddle.nn.AdaptiveMaxPool2D, + paddle.nn.AvgPool2D, paddle.nn.MaxPool2D, paddle.nn.BatchNorm, + paddle.nn.BatchNorm2D, paddle.nn.LayerNorm, paddle.nn.SyncBatchNorm, + paddle.nn.ELU, paddle.nn.GELU, paddle.nn.Hardshrink, paddle.nn.Hardsigmoid, + paddle.nn.Hardswish, paddle.nn.Hardtanh, paddle.nn.LeakyReLU, + paddle.nn.LogSigmoid, paddle.nn.LogSoftmax, paddle.nn.Maxout, + paddle.nn.PReLU, paddle.nn.ReLU, paddle.nn.ReLU6, paddle.nn.SELU, + paddle.nn.Sigmoid, paddle.nn.Softmax, paddle.nn.Softplus, + paddle.nn.Softshrink, paddle.nn.Softsign, paddle.nn.Swish, paddle.nn.Tanh, + paddle.nn.Tanhshrink, paddle.nn.ThresholdedReLU, paddle.nn.Upsample) weight_op_types = [ "conv2d", "depthwise_conv2d", "matmul", "conv2d_transpose",