From 0bfd05d4ad4187a2295a0e232e73b4dee0d346e7 Mon Sep 17 00:00:00 2001 From: Guanghua Yu <742925032@qq.com> Date: Wed, 19 Oct 2022 17:30:52 +0800 Subject: [PATCH] quant matmul op in image classification act example (#1473) --- .../image_classification/configs/EfficientNetB0/qat_dis.yaml | 2 ++ .../image_classification/configs/GhostNet_x1_0/qat_dis.yaml | 2 ++ .../image_classification/configs/InceptionV3/qat_dis.yaml | 2 ++ .../image_classification/configs/MobileNetV1/qat_dis.yaml | 2 ++ .../configs/MobileNetV3_large_x1_0/qat_dis.yaml | 1 + .../image_classification/configs/PPHGNet_tiny/qat_dis.yaml | 2 ++ .../image_classification/configs/PPLCNetV2_base/qat_dis.yaml | 2 ++ .../image_classification/configs/PPLCNet_x1_0/qat_dis.yaml | 2 ++ .../image_classification/configs/ResNet50_vd/qat_dis.yaml | 2 ++ .../image_classification/configs/ShuffleNetV2_x1_0/qat_dis.yaml | 2 ++ .../image_classification/configs/SqueezeNet1_0/qat_dis.yaml | 2 ++ .../SwinTransformer_base_patch4_window7_224/qat_dis.yaml | 2 ++ 12 files changed, 23 insertions(+) diff --git a/example/auto_compression/image_classification/configs/EfficientNetB0/qat_dis.yaml b/example/auto_compression/image_classification/configs/EfficientNetB0/qat_dis.yaml index c74e0321..a1fe00d7 100644 --- a/example/auto_compression/image_classification/configs/EfficientNetB0/qat_dis.yaml +++ b/example/auto_compression/image_classification/configs/EfficientNetB0/qat_dis.yaml @@ -24,6 +24,8 @@ Quantization: quantize_op_types: - conv2d - depthwise_conv2d + - matmul + - matmul_v2 weight_bits: 8 TrainConfig: diff --git a/example/auto_compression/image_classification/configs/GhostNet_x1_0/qat_dis.yaml b/example/auto_compression/image_classification/configs/GhostNet_x1_0/qat_dis.yaml index 4d6bb699..1b96e09a 100644 --- a/example/auto_compression/image_classification/configs/GhostNet_x1_0/qat_dis.yaml +++ b/example/auto_compression/image_classification/configs/GhostNet_x1_0/qat_dis.yaml @@ -22,6 +22,8 @@ Quantization: quantize_op_types: - conv2d - depthwise_conv2d + - matmul + - matmul_v2 weight_bits: 8 TrainConfig: epochs: 1 diff --git a/example/auto_compression/image_classification/configs/InceptionV3/qat_dis.yaml b/example/auto_compression/image_classification/configs/InceptionV3/qat_dis.yaml index a4615c20..e3f3384d 100644 --- a/example/auto_compression/image_classification/configs/InceptionV3/qat_dis.yaml +++ b/example/auto_compression/image_classification/configs/InceptionV3/qat_dis.yaml @@ -22,6 +22,8 @@ Quantization: quantize_op_types: - conv2d - depthwise_conv2d + - matmul + - matmul_v2 weight_bits: 8 TrainConfig: diff --git a/example/auto_compression/image_classification/configs/MobileNetV1/qat_dis.yaml b/example/auto_compression/image_classification/configs/MobileNetV1/qat_dis.yaml index b0fff145..e83c3fee 100644 --- a/example/auto_compression/image_classification/configs/MobileNetV1/qat_dis.yaml +++ b/example/auto_compression/image_classification/configs/MobileNetV1/qat_dis.yaml @@ -22,6 +22,8 @@ Quantization: quantize_op_types: - conv2d - depthwise_conv2d + - matmul + - matmul_v2 weight_bits: 8 TrainConfig: epochs: 1 diff --git a/example/auto_compression/image_classification/configs/MobileNetV3_large_x1_0/qat_dis.yaml b/example/auto_compression/image_classification/configs/MobileNetV3_large_x1_0/qat_dis.yaml index f4780e0b..7f5eceb4 100644 --- a/example/auto_compression/image_classification/configs/MobileNetV3_large_x1_0/qat_dis.yaml +++ b/example/auto_compression/image_classification/configs/MobileNetV3_large_x1_0/qat_dis.yaml @@ -23,6 +23,7 @@ Quantization: - conv2d - depthwise_conv2d - matmul + - matmul_v2 weight_bits: 8 TrainConfig: diff --git a/example/auto_compression/image_classification/configs/PPHGNet_tiny/qat_dis.yaml b/example/auto_compression/image_classification/configs/PPHGNet_tiny/qat_dis.yaml index fb1535e7..661c1bca 100644 --- a/example/auto_compression/image_classification/configs/PPHGNet_tiny/qat_dis.yaml +++ b/example/auto_compression/image_classification/configs/PPHGNet_tiny/qat_dis.yaml @@ -24,6 +24,8 @@ Quantization: quantize_op_types: - conv2d - depthwise_conv2d + - matmul + - matmul_v2 weight_bits: 8 TrainConfig: diff --git a/example/auto_compression/image_classification/configs/PPLCNetV2_base/qat_dis.yaml b/example/auto_compression/image_classification/configs/PPLCNetV2_base/qat_dis.yaml index 4be08fe3..018e48b4 100644 --- a/example/auto_compression/image_classification/configs/PPLCNetV2_base/qat_dis.yaml +++ b/example/auto_compression/image_classification/configs/PPLCNetV2_base/qat_dis.yaml @@ -24,6 +24,8 @@ Quantization: quantize_op_types: - conv2d - depthwise_conv2d + - matmul + - matmul_v2 weight_bits: 8 TrainConfig: diff --git a/example/auto_compression/image_classification/configs/PPLCNet_x1_0/qat_dis.yaml b/example/auto_compression/image_classification/configs/PPLCNet_x1_0/qat_dis.yaml index 394455b3..8c823f25 100644 --- a/example/auto_compression/image_classification/configs/PPLCNet_x1_0/qat_dis.yaml +++ b/example/auto_compression/image_classification/configs/PPLCNet_x1_0/qat_dis.yaml @@ -22,6 +22,8 @@ Quantization: quantize_op_types: - conv2d - depthwise_conv2d + - matmul + - matmul_v2 weight_bits: 8 TrainConfig: epochs: 1 diff --git a/example/auto_compression/image_classification/configs/ResNet50_vd/qat_dis.yaml b/example/auto_compression/image_classification/configs/ResNet50_vd/qat_dis.yaml index 4cc375ad..e94d509b 100644 --- a/example/auto_compression/image_classification/configs/ResNet50_vd/qat_dis.yaml +++ b/example/auto_compression/image_classification/configs/ResNet50_vd/qat_dis.yaml @@ -24,6 +24,8 @@ Quantization: quantize_op_types: - conv2d - depthwise_conv2d + - matmul + - matmul_v2 weight_bits: 8 TrainConfig: diff --git a/example/auto_compression/image_classification/configs/ShuffleNetV2_x1_0/qat_dis.yaml b/example/auto_compression/image_classification/configs/ShuffleNetV2_x1_0/qat_dis.yaml index b0d3ffb4..4792f490 100644 --- a/example/auto_compression/image_classification/configs/ShuffleNetV2_x1_0/qat_dis.yaml +++ b/example/auto_compression/image_classification/configs/ShuffleNetV2_x1_0/qat_dis.yaml @@ -22,6 +22,8 @@ Quantization: quantize_op_types: - conv2d - depthwise_conv2d + - matmul + - matmul_v2 weight_bits: 8 TrainConfig: epochs: 1 diff --git a/example/auto_compression/image_classification/configs/SqueezeNet1_0/qat_dis.yaml b/example/auto_compression/image_classification/configs/SqueezeNet1_0/qat_dis.yaml index 08685537..01a0b153 100644 --- a/example/auto_compression/image_classification/configs/SqueezeNet1_0/qat_dis.yaml +++ b/example/auto_compression/image_classification/configs/SqueezeNet1_0/qat_dis.yaml @@ -21,6 +21,8 @@ Quantization: quantize_op_types: - conv2d - depthwise_conv2d + - matmul + - matmul_v2 weight_bits: 8 TrainConfig: epochs: 1 diff --git a/example/auto_compression/image_classification/configs/SwinTransformer_base_patch4_window7_224/qat_dis.yaml b/example/auto_compression/image_classification/configs/SwinTransformer_base_patch4_window7_224/qat_dis.yaml index d07e734a..f1fc3a1f 100644 --- a/example/auto_compression/image_classification/configs/SwinTransformer_base_patch4_window7_224/qat_dis.yaml +++ b/example/auto_compression/image_classification/configs/SwinTransformer_base_patch4_window7_224/qat_dis.yaml @@ -22,6 +22,8 @@ Quantization: quantize_op_types: - conv2d - depthwise_conv2d + - matmul + - matmul_v2 weight_bits: 8 TrainConfig: epochs: 1 -- GitLab