diff --git a/index.html b/index.html index 438b8a94289113de9f978ab60646ab766d9a4d2c..0ab9fb24f66b73f438af3ff5ee1ffe23eb0698b9 100644 --- a/index.html +++ b/index.html @@ -298,5 +298,5 @@ diff --git a/model_zoo2/index.html b/model_zoo2/index.html index f60446b131415c9ec12e49be0c8ad2e2e2b2e10e..c571c0c30a35b7475830ba29062540f19dff835b 100644 --- a/model_zoo2/index.html +++ b/model_zoo2/index.html @@ -212,6 +212,7 @@ Model +压缩方法 Top-1/Top-5 模型大小(MB) 下载 @@ -219,55 +220,64 @@ -MobileNetV1 FP32 +MobileNetV1 +- 70.99%/89.68% xx 下载链接 -MobileNetV1 quant_post +MobileNetV1 +quant_psot xx%/xx% xx 下载链接 -MobileNetV1 quant_aware +MobileNetV1 +quant_aware xx%/xx% xx 下载链接 -MobileNetV2 FP32 +MobileNetV2 +- 72.15%/90.65% xx 下载链接 -MobileNetV2 quant_post +MobileNetV2 +quant_post xx%/xx% xx 下载链接 -MobileNetV2 quant_aware +MobileNetV2 +quant_aware xx%/xx% xx 下载链接 -ResNet50 FP32 +ResNet50 +- 76.50%/93.00% xx 下载链接 -ResNet50 quant_post +ResNet50 +quant_post xx%/xx% xx 下载链接 -ResNet50 quant_aware +ResNet50 +quant_aware xx%/xx% xx 下载链接 @@ -280,6 +290,7 @@ Model +压缩方法 Image/GPU 输入608 Box AP 输入416 Box AP @@ -290,7 +301,8 @@ -MobileNet-V1-YOLOv3 FP32 +MobileNet-V1-YOLOv3 +- 8 29.3 29.3 @@ -299,7 +311,8 @@ 下载链接 -MobileNet-V1-YOLOv3 quant_post +MobileNet-V1-YOLOv3 +quant_post 8 xx xx @@ -308,7 +321,8 @@ 下载链接 -MobileNet-V1-YOLOv3 quant_aware +MobileNet-V1-YOLOv3 +quant_aware 8 xx xx @@ -318,6 +332,7 @@ R50-dcn-YOLOv3 FP32 +- 8 41.4 - @@ -326,7 +341,8 @@ 下载链接 -R50-dcn-YOLOv3 quant_post +R50-dcn-YOLOv3 +quant_post 8 xx - @@ -335,7 +351,8 @@ 下载链接 -R50-dcn-YOLOv3 quant_aware +R50-dcn-YOLOv3 +quant_aware 8 xx - @@ -350,6 +367,7 @@ Model +压缩方法 Image/GPU 输入尺寸 Easy/Medium/Hard @@ -359,7 +377,8 @@ -BlazeFace FP32 +BlazeFace +- 8 640 0.915/0.892/0.797 @@ -367,7 +386,8 @@ 下载链接 -BlazeFace quant_post +BlazeFace +quant_post 8 640 xx/xx/xx @@ -375,7 +395,8 @@ 下载链接 -BlazeFace quant_aware +BlazeFace +quant_aware 8 640 xx/xx/xx @@ -383,7 +404,8 @@ 下载链接 -BlazeFace-Lite FP32 +BlazeFace-Lite +- 8 640 0.909/0.885/0.781 @@ -391,7 +413,8 @@ 下载链接 -BlazeFace-Lite quant_post +BlazeFace-Lite +quant_post 8 640 xx/xx/xx @@ -399,7 +422,8 @@ 下载链接 -BlazeFace-Lite quant_aware +BlazeFace-Lite +quant_aware 8 640 xx/xx/xx @@ -407,7 +431,8 @@ 下载链接 -BlazeFace-NAS FP32 +BlazeFace-NAS +- 8 640 0.837/0.807/0.658 @@ -415,7 +440,8 @@ 下载链接 -BlazeFace-NAS quant_post +BlazeFace-NAS +quant_post 8 640 xx/xx/xx @@ -423,7 +449,8 @@ 下载链接 -BlazeFace-NAS quant_aware +BlazeFace-NAS +quant_aware 8 640 xx/xx/xx @@ -432,13 +459,13 @@ -

#

1.3 图像分割#

数据集:Cityscapes

+ @@ -447,43 +474,48 @@ + - + + - + + + - + + - + +
Model压缩方法 mIoU 模型大小(MB) 下载
DeepLabv3+/MobileNetv1- 63.26 xx 下载链接
DeepLabv3+/MobileNetv1 quant_postDeepLabv3+/MobileNetv1quant_post xx xx 下载链接
DeepLabv3+/MobileNetv1 quant_awareDeepLabv3+/MobileNetv1quant_aware xx xx 下载链接
DeepLabv3+/MobileNetv2- 69.81 xx 下载链接
DeepLabv3+/MobileNetv2 quant_postDeepLabv3+/MobileNetv2quant_post xx xx 下载链接
DeepLabv3+/MobileNetv2 quant_awareDeepLabv3+/MobileNetv2quant_aware xx xx 下载链接
-

#

2. 剪枝#

2.1 图像分类#

数据集:ImageNet1000类

@@ -491,6 +523,7 @@ Model +压缩方法 Top-1/Top-5 模型大小(MB) FLOPs @@ -500,20 +533,23 @@ MobileNetV1 +- 70.99%/89.68% xx xx 下载链接 -MobileNetV1 uniform -50% +MobileNetV1 +uniform -xx% xx%/xx% xx xx 下载链接 -MobileNetV1 sensitive -xx% +MobileNetV1 +sensitive -xx% xx%/xx% xx xx @@ -521,20 +557,23 @@ MobileNetV2 +- 72.15%/90.65% xx xx 下载链接 -MobileNetV2 uniform -50% +MobileNetV2 +uniform -xx% xx%/xx% xx xx 下载链接 -MobileNetV2 sensitive -xx% +MobileNetV2 +sensitive -xx% xx%/xx% xx xx @@ -542,20 +581,23 @@ ResNet34 +- 74.57%/92.14% xx xx 下载链接 -ResNet34 uniform -50% +ResNet34 +uniform -xx% xx%/xx% xx xx 下载链接 -ResNet34 auto -50% +ResNet34 +auto -xx% xx%/xx% xx xx @@ -563,13 +605,13 @@ -

#

2.2 目标检测#

数据集:Pasacl VOC & COCO 2017

+ @@ -583,6 +625,7 @@ + @@ -593,7 +636,8 @@ - + + @@ -605,6 +649,7 @@ + @@ -615,7 +660,8 @@ - + + @@ -627,6 +673,7 @@ + @@ -637,7 +684,8 @@ - + + @@ -655,6 +703,7 @@ + @@ -664,13 +713,15 @@ + - + + @@ -685,6 +736,7 @@ + @@ -692,31 +744,37 @@ + - + + + - + + + - + + @@ -734,6 +792,7 @@ + @@ -745,6 +804,7 @@ + @@ -753,7 +813,8 @@ - + + @@ -763,6 +824,7 @@ + @@ -771,7 +833,8 @@ - + + diff --git a/search/search_index.json b/search/search_index.json index ff227474075226cd12700cb3452c65d9432b47a1..fb6abce5939f9b002fba6a7e84ef437785f759de 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"PaddleSlim # PaddleSlim\u662fPaddlePaddle\u6846\u67b6\u7684\u4e00\u4e2a\u5b50\u6a21\u5757\uff0c\u4e3b\u8981\u7528\u4e8e\u538b\u7f29\u56fe\u50cf\u9886\u57df\u6a21\u578b\u3002\u5728PaddleSlim\u4e2d\uff0c\u4e0d\u4ec5\u5b9e\u73b0\u4e86\u76ee\u524d\u4e3b\u6d41\u7684\u7f51\u7edc\u526a\u679d\u3001\u91cf\u5316\u3001\u84b8\u998f\u4e09\u79cd\u538b\u7f29\u7b56\u7565\uff0c\u8fd8\u5b9e\u73b0\u4e86\u8d85\u53c2\u6570\u641c\u7d22\u548c\u5c0f\u6a21\u578b\u7f51\u7edc\u7ed3\u6784\u641c\u7d22\u529f\u80fd\u3002\u5728\u540e\u7eed\u7248\u672c\u4e2d\uff0c\u4f1a\u6dfb\u52a0\u66f4\u591a\u7684\u538b\u7f29\u7b56\u7565\uff0c\u4ee5\u53ca\u5b8c\u5584\u5bf9NLP\u9886\u57df\u6a21\u578b\u7684\u652f\u6301\u3002 \u529f\u80fd # \u6a21\u578b\u526a\u88c1 \u652f\u6301\u901a\u9053\u5747\u5300\u6a21\u578b\u526a\u88c1\uff08uniform pruning) \u57fa\u4e8e\u654f\u611f\u5ea6\u7684\u6a21\u578b\u526a\u88c1 \u57fa\u4e8e\u8fdb\u5316\u7b97\u6cd5\u7684\u81ea\u52a8\u6a21\u578b\u526a\u88c1\u4e09\u79cd\u65b9\u5f0f \u91cf\u5316\u8bad\u7ec3 \u5728\u7ebf\u91cf\u5316\u8bad\u7ec3\uff08training aware\uff09 \u79bb\u7ebf\u91cf\u5316\uff08post training\uff09 \u652f\u6301\u5bf9\u6743\u91cd\u5168\u5c40\u91cf\u5316\u548cChannel-Wise\u91cf\u5316 \u84b8\u998f \u8f7b\u91cf\u795e\u7ecf\u7f51\u7edc\u7ed3\u6784\u81ea\u52a8\u641c\u7d22\uff08Light-NAS\uff09 \u652f\u6301\u57fa\u4e8e\u8fdb\u5316\u7b97\u6cd5\u7684\u8f7b\u91cf\u795e\u7ecf\u7f51\u7edc\u7ed3\u6784\u81ea\u52a8\u641c\u7d22\uff08Light-NAS\uff09 \u652f\u6301 FLOPS / \u786c\u4ef6\u5ef6\u65f6\u7ea6\u675f \u652f\u6301\u591a\u5e73\u53f0\u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30 \u5b89\u88c5 # \u5b89\u88c5PaddleSlim\u524d\uff0c\u8bf7\u786e\u8ba4\u5df2\u6b63\u786e\u5b89\u88c5Paddle1.6\u7248\u672c\u6216\u66f4\u65b0\u7248\u672c\u3002Paddle\u5b89\u88c5\u8bf7\u53c2\u8003\uff1a Paddle\u5b89\u88c5\u6559\u7a0b \u3002 \u5b89\u88c5develop\u7248\u672c 1 2 3 git clone https : // github . com / PaddlePaddle / PaddleSlim . git cd PaddleSlim python setup . py install \u5b89\u88c5\u5b98\u65b9\u53d1\u5e03\u7684\u6700\u65b0\u7248\u672c 1 pip install paddleslim - i https : // pypi . org / simple \u5b89\u88c5\u5386\u53f2\u7248\u672c \u8bf7\u70b9\u51fb pypi.org \u67e5\u770b\u53ef\u5b89\u88c5\u5386\u53f2\u7248\u672c\u3002 \u4f7f\u7528 # API\u6587\u6863 \uff1aAPI\u4f7f\u7528\u4ecb\u7ecd\uff0c\u5305\u62ec \u84b8\u998f \u3001 \u526a\u88c1 \u3001 \u91cf\u5316 \u548c \u6a21\u578b\u7ed3\u6784\u641c\u7d22 \u3002 \u793a\u4f8b \uff1a\u57fa\u4e8emnist\u548ccifar10\u7b49\u7b80\u5355\u5206\u7c7b\u4efb\u52a1\u7684\u6a21\u578b\u538b\u7f29\u793a\u4f8b\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u8be5\u90e8\u5206\u5feb\u901f\u4f53\u9a8c\u548c\u4e86\u89e3PaddleSlim\u7684\u529f\u80fd\u3002 \u5b9e\u8df5\u6559\u7a0b \uff1a\u7ecf\u5178\u6a21\u578b\u7684\u5206\u6790\u548c\u538b\u7f29\u5b9e\u9a8c\u6559\u7a0b\u3002 \u6a21\u578b\u5e93 \uff1a\u7ecf\u8fc7\u538b\u7f29\u7684\u5206\u7c7b\u3001\u68c0\u6d4b\u3001\u8bed\u4e49\u5206\u5272\u6a21\u578b\uff0c\u5305\u62ec\u6743\u91cd\u6587\u4ef6\u3001\u7f51\u7edc\u7ed3\u6784\u6587\u4ef6\u548c\u6027\u80fd\u6570\u636e\u3002 Paddle\u68c0\u6d4b\u5e93 \uff1a\u4ecb\u7ecd\u5982\u4f55\u5728\u68c0\u6d4b\u5e93\u4e2d\u4f7f\u7528PaddleSlim\u3002 Paddle\u5206\u5272\u5e93 \uff1a\u4ecb\u7ecd\u5982\u4f55\u5728\u5206\u5272\u5e93\u4e2d\u4f7f\u7528PaddleSlim\u3002 PaddleLite \uff1a\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u9884\u6d4b\u5e93PaddleLite\u90e8\u7f72PaddleSlim\u4ea7\u51fa\u7684\u6a21\u578b\u3002 \u8d21\u732e\u4e0e\u53cd\u9988 #","title":"Home"},{"location":"#paddleslim","text":"PaddleSlim\u662fPaddlePaddle\u6846\u67b6\u7684\u4e00\u4e2a\u5b50\u6a21\u5757\uff0c\u4e3b\u8981\u7528\u4e8e\u538b\u7f29\u56fe\u50cf\u9886\u57df\u6a21\u578b\u3002\u5728PaddleSlim\u4e2d\uff0c\u4e0d\u4ec5\u5b9e\u73b0\u4e86\u76ee\u524d\u4e3b\u6d41\u7684\u7f51\u7edc\u526a\u679d\u3001\u91cf\u5316\u3001\u84b8\u998f\u4e09\u79cd\u538b\u7f29\u7b56\u7565\uff0c\u8fd8\u5b9e\u73b0\u4e86\u8d85\u53c2\u6570\u641c\u7d22\u548c\u5c0f\u6a21\u578b\u7f51\u7edc\u7ed3\u6784\u641c\u7d22\u529f\u80fd\u3002\u5728\u540e\u7eed\u7248\u672c\u4e2d\uff0c\u4f1a\u6dfb\u52a0\u66f4\u591a\u7684\u538b\u7f29\u7b56\u7565\uff0c\u4ee5\u53ca\u5b8c\u5584\u5bf9NLP\u9886\u57df\u6a21\u578b\u7684\u652f\u6301\u3002","title":"PaddleSlim"},{"location":"#_1","text":"\u6a21\u578b\u526a\u88c1 \u652f\u6301\u901a\u9053\u5747\u5300\u6a21\u578b\u526a\u88c1\uff08uniform pruning) \u57fa\u4e8e\u654f\u611f\u5ea6\u7684\u6a21\u578b\u526a\u88c1 \u57fa\u4e8e\u8fdb\u5316\u7b97\u6cd5\u7684\u81ea\u52a8\u6a21\u578b\u526a\u88c1\u4e09\u79cd\u65b9\u5f0f \u91cf\u5316\u8bad\u7ec3 \u5728\u7ebf\u91cf\u5316\u8bad\u7ec3\uff08training aware\uff09 \u79bb\u7ebf\u91cf\u5316\uff08post training\uff09 \u652f\u6301\u5bf9\u6743\u91cd\u5168\u5c40\u91cf\u5316\u548cChannel-Wise\u91cf\u5316 \u84b8\u998f \u8f7b\u91cf\u795e\u7ecf\u7f51\u7edc\u7ed3\u6784\u81ea\u52a8\u641c\u7d22\uff08Light-NAS\uff09 \u652f\u6301\u57fa\u4e8e\u8fdb\u5316\u7b97\u6cd5\u7684\u8f7b\u91cf\u795e\u7ecf\u7f51\u7edc\u7ed3\u6784\u81ea\u52a8\u641c\u7d22\uff08Light-NAS\uff09 \u652f\u6301 FLOPS / \u786c\u4ef6\u5ef6\u65f6\u7ea6\u675f \u652f\u6301\u591a\u5e73\u53f0\u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30","title":"\u529f\u80fd"},{"location":"#_2","text":"\u5b89\u88c5PaddleSlim\u524d\uff0c\u8bf7\u786e\u8ba4\u5df2\u6b63\u786e\u5b89\u88c5Paddle1.6\u7248\u672c\u6216\u66f4\u65b0\u7248\u672c\u3002Paddle\u5b89\u88c5\u8bf7\u53c2\u8003\uff1a Paddle\u5b89\u88c5\u6559\u7a0b \u3002 \u5b89\u88c5develop\u7248\u672c 1 2 3 git clone https : // github . com / PaddlePaddle / PaddleSlim . git cd PaddleSlim python setup . py install \u5b89\u88c5\u5b98\u65b9\u53d1\u5e03\u7684\u6700\u65b0\u7248\u672c 1 pip install paddleslim - i https : // pypi . org / simple \u5b89\u88c5\u5386\u53f2\u7248\u672c \u8bf7\u70b9\u51fb pypi.org \u67e5\u770b\u53ef\u5b89\u88c5\u5386\u53f2\u7248\u672c\u3002","title":"\u5b89\u88c5"},{"location":"#_3","text":"API\u6587\u6863 \uff1aAPI\u4f7f\u7528\u4ecb\u7ecd\uff0c\u5305\u62ec \u84b8\u998f \u3001 \u526a\u88c1 \u3001 \u91cf\u5316 \u548c \u6a21\u578b\u7ed3\u6784\u641c\u7d22 \u3002 \u793a\u4f8b \uff1a\u57fa\u4e8emnist\u548ccifar10\u7b49\u7b80\u5355\u5206\u7c7b\u4efb\u52a1\u7684\u6a21\u578b\u538b\u7f29\u793a\u4f8b\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u8be5\u90e8\u5206\u5feb\u901f\u4f53\u9a8c\u548c\u4e86\u89e3PaddleSlim\u7684\u529f\u80fd\u3002 \u5b9e\u8df5\u6559\u7a0b \uff1a\u7ecf\u5178\u6a21\u578b\u7684\u5206\u6790\u548c\u538b\u7f29\u5b9e\u9a8c\u6559\u7a0b\u3002 \u6a21\u578b\u5e93 \uff1a\u7ecf\u8fc7\u538b\u7f29\u7684\u5206\u7c7b\u3001\u68c0\u6d4b\u3001\u8bed\u4e49\u5206\u5272\u6a21\u578b\uff0c\u5305\u62ec\u6743\u91cd\u6587\u4ef6\u3001\u7f51\u7edc\u7ed3\u6784\u6587\u4ef6\u548c\u6027\u80fd\u6570\u636e\u3002 Paddle\u68c0\u6d4b\u5e93 \uff1a\u4ecb\u7ecd\u5982\u4f55\u5728\u68c0\u6d4b\u5e93\u4e2d\u4f7f\u7528PaddleSlim\u3002 Paddle\u5206\u5272\u5e93 \uff1a\u4ecb\u7ecd\u5982\u4f55\u5728\u5206\u5272\u5e93\u4e2d\u4f7f\u7528PaddleSlim\u3002 PaddleLite \uff1a\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u9884\u6d4b\u5e93PaddleLite\u90e8\u7f72PaddleSlim\u4ea7\u51fa\u7684\u6a21\u578b\u3002","title":"\u4f7f\u7528"},{"location":"#_4","text":"","title":"\u8d21\u732e\u4e0e\u53cd\u9988"},{"location":"model_zoo/","text":"1. \u91cf\u5316 # 1.1 \u56fe\u8c61\u5206\u7c7b # \u6570\u636e\u96c6\uff1aImageNet1000\u7c7b \u8bc4\u4ef7\u6307\u6807\uff1aTop-1/Top-5\u51c6\u786e\u7387 Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 MobileNetV1 FP32 70.99%/89.68% xx%/xx% xx%/xx% MobileNetV2 FP32 72.15%/90.65% ResNet50 FP32 76.50%/93.00% \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 MobileNetV1 17M xxM xxM MobileNetV2 xxM ResNet50 99M 1.2 \u76ee\u6807\u68c0\u6d4b # \u6570\u636e\u96c6\uff1aCOCO 2017 Model \u8f93\u5165\u5c3a\u5bf8 Image/GPU FP32 BoxAP \u79bb\u7ebf\u91cf\u5316 BoxAP \u91cf\u5316\u8bad\u7ec3 BoxAP MobileNet-V1-YOLOv3 608 8 29.3 xx xx MobileNet-V1-YOLOv3 416 MobileNet-V1-YOLOv3 320 R50-dcn-YOLOv3 608 41.4 R50-dcn-YOLOv3 416 R50-dcn-YOLOv3 320 \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 MobileNet-V1-YOLOv3 xxM xxM xxM R50-dcn-YOLOv3 xxM # \u6570\u636e\u96c6\uff1aWIDER-FACE \u8bc4\u4ef7\u6307\u6807\uff1aEasy/Medium/Hard mAP Model \u8f93\u5165\u5c3a\u5bf8 Image/GPU FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 BlazeFace 640 8 0.915/0.892/0.797 xx/xx/xx xx/xx/xx BlazeFace-Lite 640 0.909/0.885/0.781 BlazeFace-NAS 640 0.837/0.807/0.658 \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 BlazeFace xxM xxM xxM BlazeFace-Lite xxM BlazeFace-NAS xxM # 1.3 \u56fe\u50cf\u5206\u5272 # \u6570\u636e\u96c6\uff1aCityscapes Model FP32 mIoU \u79bb\u7ebf\u91cf\u5316 mIoU \u91cf\u5316\u8bad\u7ec3 mIoU DeepLabv3+/MobileNetv1 63.26 xx xx DeepLabv3+/MobileNetv2 69.81 \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 DeepLabv3+/MobileNetv1 xxM xxM xxM DeepLabv3+/MobileNetv2 xxM # 2. \u526a\u679d # 2.1 \u56fe\u50cf\u5206\u7c7b # \u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model Top-1/Top-5 MobileNetV1 70.99%/89.68% MobileNetV1 uniform -50% MobileNetV1 sensitive -xx% MobileNetV2 72.15%/90.65% MobileNetV2 uniform -50% MobileNetV2 sensitive -xx% ResNet34 74.57%/92.14% ResNet34 uniform -50% ResNet34 auto -50% \u526a\u679d\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u548c\u8ba1\u7b97\u91cf\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model baseline FLOPs baseline size \u526a\u679d\u540e FlOPs \u526a\u679d\u540e size MobileNetV1 xx xx xx xx MobileNetV2 xx ResNet34 xx # 2.2 \u76ee\u6807\u68c0\u6d4b # \u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u6570\u636e\u96c6 \u8f93\u5165\u5c3a\u5bf8 Image/GPU baseline mAP \u654f\u611f\u5ea6\u526a\u679d mAP MobileNet-V1-YOLOv3 Pasacl VOC 608 8 76.2 77.59 (-50%) MobileNet-V1-YOLOv3 Pasacl VOC 416 76.7 xx MobileNet-V1-YOLOv3 Pasacl VOC 320 75.2 xx MobileNet-V1-YOLOv3 COCO 608 29.3 29.56 (-20%) MobileNet-V1-YOLOv3 COCO 416 29.3 xx MobileNet-V1-YOLOv3 COCO 320 27.1 xx R50-dcn-YOLOv3 COCO 608 41.4 37.8 (-30%) R50-dcn-YOLOv3 COCO 416 R50-dcn-YOLOv3 COCO 320 \u526a\u679d\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u548c\u8ba1\u7b97\u91cf\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model baseline FLOPs baseline size \u526a\u679d\u540e FlOPs \u526a\u679d\u540e size MobileNet-V1-YOLOv3-VOC xx xxM xx xxM MobileNet-V1-YOLOv3-COCO xx R50-dcn-YOLOv3-COCO xx 2.3 \u56fe\u50cf\u5206\u5272 # \u6570\u636e\u96c6\uff1aCityscapes Model Baseline mIoU xx\u526a\u679d mIoU DeepLabv3+/MobileNetv2 69.81 xx \u526a\u679d\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u548c\u8ba1\u7b97\u91cf\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model baseline FLOPs baseline size \u526a\u679d\u540e FlOPs \u526a\u679d\u540e size DeepLabv3+/MobileNetv2 xx xxM xx xxM 3. \u84b8\u998f # 3.1 \u56fe\u8c61\u5206\u7c7b # \u6570\u636e\u96c6\uff1aImageNet1000\u7c7b \u8bc4\u4ef7\u6307\u6807\uff1aTop-1/Top-5\u51c6\u786e\u7387 Model baseline \u84b8\u998f\u540e MobileNetV1 70.99%/89.68% 72.79%/90.69% (teacher: ResNet50_vd 1 ) MobileNetV2 72.15%/90.65% 74.30%/91.52% (teacher: ResNet50_vd) ResNet50 76.50%/93.00% 77.40%/93.48% (teacher: ResNet101 2 ) Note [1] \uff1a ResNet50_vd \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a79.12%/94.44% \u5e26_vd\u540e\u7f00\u4ee3\u8868\u5f00\u542f\u4e86Mixup\u8bad\u7ec3\uff0cMixup\u76f8\u5173\u4ecb\u7ecd\u53c2\u8003 mixup: Beyond Empirical Risk Minimization [2] \uff1a ResNet101 \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a77.56%/93.64% 3.2 \u76ee\u6807\u68c0\u6d4b # \u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u6570\u636e\u96c6 \u8f93\u5165\u5c3a\u5bf8 Image/GPU baseline \u84b8\u998f\u540e mAP MobileNet-V1-YOLOv3 Pasacl VOC 640 16 76.2 79.0 (teacher: ResNet34-YOLOv3-VOC 3 ) MobileNet-V1-YOLOv3 Pasacl VOC 416 76.7 78.2 MobileNet-V1-YOLOv3 Pasacl VOC 320 75.2 75.5 MobileNet-V1-YOLOv3 COCO 640 29.3 31.0 (teacher: ResNet34-YOLOv3-COCO 4 ) MobileNet-V1-YOLOv3 COCO 416 MobileNet-V1-YOLOv3 COCO 320 Note [3] \uff1a ResNet34-YOLOv3-VOC \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a82.6 [4] \uff1a ResNet34-YOLOv3-COCO \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a36.2","title":"\u6a21\u578b\u5e93"},{"location":"model_zoo/#1","text":"","title":"1. \u91cf\u5316"},{"location":"model_zoo/#11","text":"\u6570\u636e\u96c6\uff1aImageNet1000\u7c7b \u8bc4\u4ef7\u6307\u6807\uff1aTop-1/Top-5\u51c6\u786e\u7387 Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 MobileNetV1 FP32 70.99%/89.68% xx%/xx% xx%/xx% MobileNetV2 FP32 72.15%/90.65% ResNet50 FP32 76.50%/93.00% \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 MobileNetV1 17M xxM xxM MobileNetV2 xxM ResNet50 99M","title":"1.1 \u56fe\u8c61\u5206\u7c7b"},{"location":"model_zoo/#12","text":"\u6570\u636e\u96c6\uff1aCOCO 2017 Model \u8f93\u5165\u5c3a\u5bf8 Image/GPU FP32 BoxAP \u79bb\u7ebf\u91cf\u5316 BoxAP \u91cf\u5316\u8bad\u7ec3 BoxAP MobileNet-V1-YOLOv3 608 8 29.3 xx xx MobileNet-V1-YOLOv3 416 MobileNet-V1-YOLOv3 320 R50-dcn-YOLOv3 608 41.4 R50-dcn-YOLOv3 416 R50-dcn-YOLOv3 320 \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 MobileNet-V1-YOLOv3 xxM xxM xxM R50-dcn-YOLOv3 xxM","title":"1.2 \u76ee\u6807\u68c0\u6d4b"},{"location":"model_zoo/#13","text":"\u6570\u636e\u96c6\uff1aCityscapes Model FP32 mIoU \u79bb\u7ebf\u91cf\u5316 mIoU \u91cf\u5316\u8bad\u7ec3 mIoU DeepLabv3+/MobileNetv1 63.26 xx xx DeepLabv3+/MobileNetv2 69.81 \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 DeepLabv3+/MobileNetv1 xxM xxM xxM DeepLabv3+/MobileNetv2 xxM","title":"1.3 \u56fe\u50cf\u5206\u5272"},{"location":"model_zoo/#2","text":"","title":"2. \u526a\u679d"},{"location":"model_zoo/#21","text":"\u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model Top-1/Top-5 MobileNetV1 70.99%/89.68% MobileNetV1 uniform -50% MobileNetV1 sensitive -xx% MobileNetV2 72.15%/90.65% MobileNetV2 uniform -50% MobileNetV2 sensitive -xx% ResNet34 74.57%/92.14% ResNet34 uniform -50% ResNet34 auto -50% \u526a\u679d\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u548c\u8ba1\u7b97\u91cf\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model baseline FLOPs baseline size \u526a\u679d\u540e FlOPs \u526a\u679d\u540e size MobileNetV1 xx xx xx xx MobileNetV2 xx ResNet34 xx","title":"2.1 \u56fe\u50cf\u5206\u7c7b"},{"location":"model_zoo/#22","text":"\u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u6570\u636e\u96c6 \u8f93\u5165\u5c3a\u5bf8 Image/GPU baseline mAP \u654f\u611f\u5ea6\u526a\u679d mAP MobileNet-V1-YOLOv3 Pasacl VOC 608 8 76.2 77.59 (-50%) MobileNet-V1-YOLOv3 Pasacl VOC 416 76.7 xx MobileNet-V1-YOLOv3 Pasacl VOC 320 75.2 xx MobileNet-V1-YOLOv3 COCO 608 29.3 29.56 (-20%) MobileNet-V1-YOLOv3 COCO 416 29.3 xx MobileNet-V1-YOLOv3 COCO 320 27.1 xx R50-dcn-YOLOv3 COCO 608 41.4 37.8 (-30%) R50-dcn-YOLOv3 COCO 416 R50-dcn-YOLOv3 COCO 320 \u526a\u679d\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u548c\u8ba1\u7b97\u91cf\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model baseline FLOPs baseline size \u526a\u679d\u540e FlOPs \u526a\u679d\u540e size MobileNet-V1-YOLOv3-VOC xx xxM xx xxM MobileNet-V1-YOLOv3-COCO xx R50-dcn-YOLOv3-COCO xx","title":"2.2 \u76ee\u6807\u68c0\u6d4b"},{"location":"model_zoo/#23","text":"\u6570\u636e\u96c6\uff1aCityscapes Model Baseline mIoU xx\u526a\u679d mIoU DeepLabv3+/MobileNetv2 69.81 xx \u526a\u679d\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u548c\u8ba1\u7b97\u91cf\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model baseline FLOPs baseline size \u526a\u679d\u540e FlOPs \u526a\u679d\u540e size DeepLabv3+/MobileNetv2 xx xxM xx xxM","title":"2.3 \u56fe\u50cf\u5206\u5272"},{"location":"model_zoo/#3","text":"","title":"3. \u84b8\u998f"},{"location":"model_zoo/#31","text":"\u6570\u636e\u96c6\uff1aImageNet1000\u7c7b \u8bc4\u4ef7\u6307\u6807\uff1aTop-1/Top-5\u51c6\u786e\u7387 Model baseline \u84b8\u998f\u540e MobileNetV1 70.99%/89.68% 72.79%/90.69% (teacher: ResNet50_vd 1 ) MobileNetV2 72.15%/90.65% 74.30%/91.52% (teacher: ResNet50_vd) ResNet50 76.50%/93.00% 77.40%/93.48% (teacher: ResNet101 2 ) Note [1] \uff1a ResNet50_vd \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a79.12%/94.44% \u5e26_vd\u540e\u7f00\u4ee3\u8868\u5f00\u542f\u4e86Mixup\u8bad\u7ec3\uff0cMixup\u76f8\u5173\u4ecb\u7ecd\u53c2\u8003 mixup: Beyond Empirical Risk Minimization [2] \uff1a ResNet101 \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a77.56%/93.64%","title":"3.1 \u56fe\u8c61\u5206\u7c7b"},{"location":"model_zoo/#32","text":"\u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u6570\u636e\u96c6 \u8f93\u5165\u5c3a\u5bf8 Image/GPU baseline \u84b8\u998f\u540e mAP MobileNet-V1-YOLOv3 Pasacl VOC 640 16 76.2 79.0 (teacher: ResNet34-YOLOv3-VOC 3 ) MobileNet-V1-YOLOv3 Pasacl VOC 416 76.7 78.2 MobileNet-V1-YOLOv3 Pasacl VOC 320 75.2 75.5 MobileNet-V1-YOLOv3 COCO 640 29.3 31.0 (teacher: ResNet34-YOLOv3-COCO 4 ) MobileNet-V1-YOLOv3 COCO 416 MobileNet-V1-YOLOv3 COCO 320 Note [3] \uff1a ResNet34-YOLOv3-VOC \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a82.6 [4] \uff1a ResNet34-YOLOv3-COCO \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a36.2","title":"3.2 \u76ee\u6807\u68c0\u6d4b"},{"location":"model_zoo2/","text":"1. \u91cf\u5316 # 1.1 \u56fe\u8c61\u5206\u7c7b # \u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model Top-1/Top-5 \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 \u4e0b\u8f7d MobileNetV1 FP32 70.99%/89.68% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 quant_post xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 quant_aware xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 FP32 72.15%/90.65% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 quant_post xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 quant_aware xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 ResNet50 FP32 76.50%/93.00% xx \u4e0b\u8f7d\u94fe\u63a5 ResNet50 quant_post xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 ResNet50 quant_aware xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 1.2 \u76ee\u6807\u68c0\u6d4b # \u6570\u636e\u96c6\uff1aCOCO 2017 Model Image/GPU \u8f93\u5165608 Box AP \u8f93\u5165416 Box AP \u8f93\u5165320 Box AP \u6a21\u578b\u5927\u5c0f(MB) \u4e0b\u8f7d MobileNet-V1-YOLOv3 FP32 8 29.3 29.3 27.1 xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 quant_post 8 xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 quant_aware 8 xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 FP32 8 41.4 - - xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 quant_post 8 xx - - xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 quant_aware 8 xx - - xx \u4e0b\u8f7d\u94fe\u63a5 \u6570\u636e\u96c6\uff1aWIDER-FACE Model Image/GPU \u8f93\u5165\u5c3a\u5bf8 Easy/Medium/Hard \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 \u4e0b\u8f7d BlazeFace FP32 8 640 0.915/0.892/0.797 xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace quant_post 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace quant_aware 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-Lite FP32 8 640 0.909/0.885/0.781 xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-Lite quant_post 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-Lite quant_aware 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-NAS FP32 8 640 0.837/0.807/0.658 xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-NAS quant_post 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-NAS quant_aware 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 # 1.3 \u56fe\u50cf\u5206\u5272 # \u6570\u636e\u96c6\uff1aCityscapes Model mIoU \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 \u4e0b\u8f7d DeepLabv3+/MobileNetv1 63.26 xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv1 quant_post xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv1 quant_aware xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 69.81 xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 quant_post xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 quant_aware xx xx \u4e0b\u8f7d\u94fe\u63a5 # 2. \u526a\u679d # 2.1 \u56fe\u50cf\u5206\u7c7b # \u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model Top-1/Top-5 \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 FLOPs \u4e0b\u8f7d MobileNetV1 70.99%/89.68% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 uniform -50% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 sensitive -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 72.15%/90.65% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 uniform -50% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 sensitive -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 ResNet34 74.57%/92.14% xx xx \u4e0b\u8f7d\u94fe\u63a5 ResNet34 uniform -50% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 ResNet34 auto -50% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 # 2.2 \u76ee\u6807\u68c0\u6d4b # \u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u6570\u636e\u96c6 Image/GPU \u8f93\u5165608 mAP \u8f93\u5165416 mAP \u8f93\u5165320 mAP \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 FLOPs \u4e0b\u8f7d MobileNet-V1-YOLOv3 Pasacl VOC 8 76.2 76.7 75.3 xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 prune xx% Pasacl VOC 8 xx xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 COCO 8 29.3 29.3 27.1 xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 prune xx% COCO 8 xx xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 COCO 8 41.4 - - xx xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 prune xx% COCO 8 xx - - xx xx \u4e0b\u8f7d\u94fe\u63a5 2.3 \u56fe\u50cf\u5206\u5272 # \u6570\u636e\u96c6\uff1aCityscapes Model mIoU \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 FLOPs \u4e0b\u8f7d DeepLabv3+/MobileNetv2 69.81 xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 prune xx% xx xx xx \u4e0b\u8f7d\u94fe\u63a5 3. \u84b8\u998f # 3.1 \u56fe\u8c61\u5206\u7c7b # \u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model baseline \u4e0b\u8f7d MobileNetV1 70.99%/89.68% \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 distilled (teacher: ResNet50_vd 1 ) 72.79%/90.69% \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 72.15%/90.65% \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 distilled (teacher: ResNet50_vd) 74.30%/91.52% \u4e0b\u8f7d\u94fe\u63a5 ResNet50 76.50%/93.00% \u4e0b\u8f7d\u94fe\u63a5 ResNet50 distilled (teacher: ResNet101 2 ) 77.40%/93.48% \u4e0b\u8f7d\u94fe\u63a5 Note [1] \uff1a ResNet50_vd \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a79.12%/94.44% \u5e26_vd\u540e\u7f00\u4ee3\u8868\u5f00\u542f\u4e86Mixup\u8bad\u7ec3\uff0cMixup\u76f8\u5173\u4ecb\u7ecd\u53c2\u8003 mixup: Beyond Empirical Risk Minimization [2] \uff1a ResNet101 \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a77.56%/93.64% 3.2 \u76ee\u6807\u68c0\u6d4b # \u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u6570\u636e\u96c6 Image/GPU \u8f93\u5165640 mAP \u8f93\u5165416 mAP \u8f93\u5165320 mAP \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 Pasacl VOC 16 76.2 76.7 75.3 \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 distilled (teacher: ResNet34-YOLOv3-VOC 3 ) Pasacl VOC 16 xx xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 COCO 16 29.3 29.3 27.1 \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 distilled (teacher: ResNet34-YOLOv3-COCO 4 ) COCO 16 xx xx xx \u4e0b\u8f7d\u94fe\u63a5 Note [3] \uff1a ResNet34-YOLOv3-VOC \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a82.6 [4] \uff1a ResNet34-YOLOv3-COCO \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a36.2","title":"\u6a21\u578b\u5e932"},{"location":"model_zoo2/#1","text":"","title":"1. \u91cf\u5316"},{"location":"model_zoo2/#11","text":"\u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model Top-1/Top-5 \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 \u4e0b\u8f7d MobileNetV1 FP32 70.99%/89.68% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 quant_post xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 quant_aware xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 FP32 72.15%/90.65% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 quant_post xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 quant_aware xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 ResNet50 FP32 76.50%/93.00% xx \u4e0b\u8f7d\u94fe\u63a5 ResNet50 quant_post xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 ResNet50 quant_aware xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5","title":"1.1 \u56fe\u8c61\u5206\u7c7b"},{"location":"model_zoo2/#12","text":"\u6570\u636e\u96c6\uff1aCOCO 2017 Model Image/GPU \u8f93\u5165608 Box AP \u8f93\u5165416 Box AP \u8f93\u5165320 Box AP \u6a21\u578b\u5927\u5c0f(MB) \u4e0b\u8f7d MobileNet-V1-YOLOv3 FP32 8 29.3 29.3 27.1 xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 quant_post 8 xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 quant_aware 8 xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 FP32 8 41.4 - - xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 quant_post 8 xx - - xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 quant_aware 8 xx - - xx \u4e0b\u8f7d\u94fe\u63a5 \u6570\u636e\u96c6\uff1aWIDER-FACE Model Image/GPU \u8f93\u5165\u5c3a\u5bf8 Easy/Medium/Hard \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 \u4e0b\u8f7d BlazeFace FP32 8 640 0.915/0.892/0.797 xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace quant_post 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace quant_aware 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-Lite FP32 8 640 0.909/0.885/0.781 xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-Lite quant_post 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-Lite quant_aware 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-NAS FP32 8 640 0.837/0.807/0.658 xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-NAS quant_post 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-NAS quant_aware 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5","title":"1.2 \u76ee\u6807\u68c0\u6d4b"},{"location":"model_zoo2/#13","text":"\u6570\u636e\u96c6\uff1aCityscapes Model mIoU \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 \u4e0b\u8f7d DeepLabv3+/MobileNetv1 63.26 xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv1 quant_post xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv1 quant_aware xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 69.81 xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 quant_post xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 quant_aware xx xx \u4e0b\u8f7d\u94fe\u63a5","title":"1.3 \u56fe\u50cf\u5206\u5272"},{"location":"model_zoo2/#2","text":"","title":"2. \u526a\u679d"},{"location":"model_zoo2/#21","text":"\u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model Top-1/Top-5 \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 FLOPs \u4e0b\u8f7d MobileNetV1 70.99%/89.68% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 uniform -50% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 sensitive -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 72.15%/90.65% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 uniform -50% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 sensitive -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 ResNet34 74.57%/92.14% xx xx \u4e0b\u8f7d\u94fe\u63a5 ResNet34 uniform -50% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 ResNet34 auto -50% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5","title":"2.1 \u56fe\u50cf\u5206\u7c7b"},{"location":"model_zoo2/#22","text":"\u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u6570\u636e\u96c6 Image/GPU \u8f93\u5165608 mAP \u8f93\u5165416 mAP \u8f93\u5165320 mAP \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 FLOPs \u4e0b\u8f7d MobileNet-V1-YOLOv3 Pasacl VOC 8 76.2 76.7 75.3 xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 prune xx% Pasacl VOC 8 xx xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 COCO 8 29.3 29.3 27.1 xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 prune xx% COCO 8 xx xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 COCO 8 41.4 - - xx xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 prune xx% COCO 8 xx - - xx xx \u4e0b\u8f7d\u94fe\u63a5","title":"2.2 \u76ee\u6807\u68c0\u6d4b"},{"location":"model_zoo2/#23","text":"\u6570\u636e\u96c6\uff1aCityscapes Model mIoU \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 FLOPs \u4e0b\u8f7d DeepLabv3+/MobileNetv2 69.81 xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 prune xx% xx xx xx \u4e0b\u8f7d\u94fe\u63a5","title":"2.3 \u56fe\u50cf\u5206\u5272"},{"location":"model_zoo2/#3","text":"","title":"3. \u84b8\u998f"},{"location":"model_zoo2/#31","text":"\u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model baseline \u4e0b\u8f7d MobileNetV1 70.99%/89.68% \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 distilled (teacher: ResNet50_vd 1 ) 72.79%/90.69% \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 72.15%/90.65% \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 distilled (teacher: ResNet50_vd) 74.30%/91.52% \u4e0b\u8f7d\u94fe\u63a5 ResNet50 76.50%/93.00% \u4e0b\u8f7d\u94fe\u63a5 ResNet50 distilled (teacher: ResNet101 2 ) 77.40%/93.48% \u4e0b\u8f7d\u94fe\u63a5 Note [1] \uff1a ResNet50_vd \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a79.12%/94.44% \u5e26_vd\u540e\u7f00\u4ee3\u8868\u5f00\u542f\u4e86Mixup\u8bad\u7ec3\uff0cMixup\u76f8\u5173\u4ecb\u7ecd\u53c2\u8003 mixup: Beyond Empirical Risk Minimization [2] \uff1a ResNet101 \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a77.56%/93.64%","title":"3.1 \u56fe\u8c61\u5206\u7c7b"},{"location":"model_zoo2/#32","text":"\u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u6570\u636e\u96c6 Image/GPU \u8f93\u5165640 mAP \u8f93\u5165416 mAP \u8f93\u5165320 mAP \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 Pasacl VOC 16 76.2 76.7 75.3 \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 distilled (teacher: ResNet34-YOLOv3-VOC 3 ) Pasacl VOC 16 xx xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 COCO 16 29.3 29.3 27.1 \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 distilled (teacher: ResNet34-YOLOv3-COCO 4 ) COCO 16 xx xx xx \u4e0b\u8f7d\u94fe\u63a5 Note [3] \uff1a ResNet34-YOLOv3-VOC \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a82.6 [4] \uff1a ResNet34-YOLOv3-COCO \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a36.2","title":"3.2 \u76ee\u6807\u68c0\u6d4b"},{"location":"table_latency/","text":"\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868 # \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u7528\u4e8e\u5feb\u901f\u8bc4\u4f30\u4e00\u4e2a\u6a21\u578b\u5728\u7279\u5b9a\u786c\u4ef6\u73af\u5883\u548c\u63a8\u7406\u5f15\u64ce\u4e0a\u7684\u63a8\u7406\u901f\u5ea6\u3002 \u8be5\u6587\u6863\u4e3b\u8981\u7528\u4e8e\u5b9a\u4e49PaddleSlim\u652f\u6301\u7684\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u7684\u683c\u5f0f\u3002 \u6982\u8ff0 # \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u4e2d\u5b58\u653e\u7740\u6240\u6709\u53ef\u80fd\u7684\u64cd\u4f5c\u5bf9\u5e94\u7684\u5ef6\u65f6\u4fe1\u606f\uff0c\u8be5\u8868\u4e2d\u7684\u4e00\u4e2a\u64cd\u4f5c\u5305\u62ec\u64cd\u4f5c\u7c7b\u578b\u548c\u64cd\u4f5c\u53c2\u6570\uff0c\u6bd4\u5982\uff1a\u64cd\u4f5c\u7c7b\u578b\u53ef\u4ee5\u662f conv2d \uff0c\u5bf9\u5e94\u7684\u64cd\u4f5c\u53c2\u6570\u6709\u8f93\u5165\u7279\u5f81\u56fe\u7684\u5927\u5c0f\u3001\u5377\u79ef\u6838\u4e2a\u6570\u3001\u5377\u79ef\u6838\u5927\u5c0f\u7b49\u3002 \u7ed9\u5b9a\u64cd\u4f5c\u7684\u5ef6\u65f6\u4f9d\u8d56\u4e8e\u786c\u4ef6\u73af\u5883\u548c\u63a8\u7406\u5f15\u64ce\u3002 \u6574\u4f53\u683c\u5f0f # \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u4ee5\u6587\u4ef6\u6216\u591a\u884c\u5b57\u7b26\u4e32\u7684\u5f62\u5f0f\u4fdd\u5b58\u3002 \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u7b2c\u4e00\u884c\u4fdd\u5b58\u7248\u672c\u4fe1\u606f\uff0c\u540e\u7eed\u6bcf\u884c\u4e3a\u4e00\u4e2a\u64cd\u4f5c\u548c\u5bf9\u5e94\u7684\u5ef6\u65f6\u4fe1\u606f\u3002 \u7248\u672c\u4fe1\u606f # \u7248\u672c\u4fe1\u606f\u4ee5\u82f1\u6587\u5b57\u7b26\u9017\u53f7\u5206\u5272\uff0c\u5185\u5bb9\u4f9d\u6b21\u4e3a\u786c\u4ef6\u73af\u5883\u540d\u79f0\u3001\u63a8\u7406\u5f15\u64ce\u540d\u79f0\u548c\u65f6\u95f4\u6233\u3002 \u786c\u4ef6\u73af\u5883\u540d\u79f0\uff1a \u7528\u4e8e\u6807\u8bc6\u786c\u4ef6\u73af\u5883\uff0c\u53ef\u4ee5\u5305\u542b\u8ba1\u7b97\u67b6\u6784\u7c7b\u578b\u3001\u7248\u672c\u53f7\u7b49\u4fe1\u606f\u3002 \u63a8\u7406\u5f15\u64ce\u540d\u79f0\uff1a \u7528\u4e8e\u6807\u8bc6\u63a8\u7406\u5f15\u64ce\uff0c\u53ef\u4ee5\u5305\u542b\u63a8\u7406\u5f15\u64ce\u540d\u79f0\u3001\u7248\u672c\u53f7\u3001\u4f18\u5316\u9009\u9879\u7b49\u4fe1\u606f\u3002 \u65f6\u95f4\u6233\uff1a \u8be5\u8bc4\u4f30\u8868\u7684\u521b\u5efa\u65f6\u95f4\u3002 \u64cd\u4f5c\u4fe1\u606f # \u64cd\u4f5c\u4fe1\u606f\u5b57\u6bb5\u4e4b\u95f4\u4ee5\u9017\u53f7\u5206\u5272\u3002\u64cd\u4f5c\u4fe1\u606f\u4e0e\u5ef6\u8fdf\u4fe1\u606f\u4e4b\u95f4\u4ee5\u5236\u8868\u7b26\u5206\u5272\u3002 conv2d # \u683c\u5f0f 1 op_type , flag_bias , flag_relu , n_in , c_in , h_in , w_in , c_out , groups , kernel , padding , stride , dilation \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 flag_bias (int) - \u662f\u5426\u6709 bias\uff080\uff1a\u65e0\uff0c1\uff1a\u6709\uff09\u3002 flag_relu (int) - \u662f\u5426\u6709 relu\uff080\uff1a\u65e0\uff0c1\uff1a\u6709\uff09\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 c_out (int) - \u8f93\u51fa Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 groups (int) - \u5377\u79ef\u4e8c\u7ef4\u5c42\uff08Conv2D Layer\uff09\u7684\u7ec4\u6570\u3002 kernel (int) - \u5377\u79ef\u6838\u5927\u5c0f\u3002 padding (int) - \u586b\u5145 (padding) \u5927\u5c0f\u3002 stride (int) - \u6b65\u957f (stride) \u5927\u5c0f\u3002 dilation (int) - \u81a8\u80c0 (dilation) \u5927\u5c0f\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4 activation # \u683c\u5f0f 1 op_type , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4 batch_norm # \u683c\u5f0f 1 op_type , active_type , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 active_type (string|None) - \u6fc0\u6d3b\u51fd\u6570\u7c7b\u578b\uff0c\u5305\u542b\uff1arelu, prelu, sigmoid, relu6, tanh\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4 eltwise # \u683c\u5f0f 1 op_type , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4 pooling # \u683c\u5f0f 1 op_type , flag_global_pooling , n_in , c_in , h_in , w_in , kernel , padding , stride , ceil_mode , pool_type \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 flag_global_pooling (int) - \u662f\u5426\u4e3a\u5168\u5c40\u6c60\u5316\uff080\uff1a\u4e0d\u662f\uff0c1\uff1a\u662f\uff09\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 kernel (int) - \u5377\u79ef\u6838\u5927\u5c0f\u3002 padding (int) - \u586b\u5145 (padding) \u5927\u5c0f\u3002 stride (int) - \u6b65\u957f (stride) \u5927\u5c0f\u3002 ceil_mode (int) - \u662f\u5426\u7528 ceil \u51fd\u6570\u8ba1\u7b97\u8f93\u51fa\u9ad8\u5ea6\u548c\u5bbd\u5ea6\u30020 \u8868\u793a\u4f7f\u7528 floor \u51fd\u6570\uff0c1 \u8868\u793a\u4f7f\u7528 ceil \u51fd\u6570\u3002 pool_type (int) - \u6c60\u5316\u7c7b\u578b\uff0c\u5176\u4e2d 1 \u8868\u793a pooling_max\uff0c2 \u8868\u793a pooling_average_include_padding\uff0c3 \u8868\u793a pooling_average_exclude_padding\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4 softmax # \u683c\u5f0f 1 op_type , axis , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 axis (int) - \u6267\u884c softmax \u8ba1\u7b97\u7684\u7ef4\u5ea6\u7d22\u5f15\uff0c\u5e94\u8be5\u5728 [\u22121\uff0crank \u2212 1] \u8303\u56f4\u5185\uff0c\u5176\u4e2d rank \u662f\u8f93\u5165\u53d8\u91cf\u7684\u79e9\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868"},{"location":"table_latency/#_1","text":"\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u7528\u4e8e\u5feb\u901f\u8bc4\u4f30\u4e00\u4e2a\u6a21\u578b\u5728\u7279\u5b9a\u786c\u4ef6\u73af\u5883\u548c\u63a8\u7406\u5f15\u64ce\u4e0a\u7684\u63a8\u7406\u901f\u5ea6\u3002 \u8be5\u6587\u6863\u4e3b\u8981\u7528\u4e8e\u5b9a\u4e49PaddleSlim\u652f\u6301\u7684\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u7684\u683c\u5f0f\u3002","title":"\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868"},{"location":"table_latency/#_2","text":"\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u4e2d\u5b58\u653e\u7740\u6240\u6709\u53ef\u80fd\u7684\u64cd\u4f5c\u5bf9\u5e94\u7684\u5ef6\u65f6\u4fe1\u606f\uff0c\u8be5\u8868\u4e2d\u7684\u4e00\u4e2a\u64cd\u4f5c\u5305\u62ec\u64cd\u4f5c\u7c7b\u578b\u548c\u64cd\u4f5c\u53c2\u6570\uff0c\u6bd4\u5982\uff1a\u64cd\u4f5c\u7c7b\u578b\u53ef\u4ee5\u662f conv2d \uff0c\u5bf9\u5e94\u7684\u64cd\u4f5c\u53c2\u6570\u6709\u8f93\u5165\u7279\u5f81\u56fe\u7684\u5927\u5c0f\u3001\u5377\u79ef\u6838\u4e2a\u6570\u3001\u5377\u79ef\u6838\u5927\u5c0f\u7b49\u3002 \u7ed9\u5b9a\u64cd\u4f5c\u7684\u5ef6\u65f6\u4f9d\u8d56\u4e8e\u786c\u4ef6\u73af\u5883\u548c\u63a8\u7406\u5f15\u64ce\u3002","title":"\u6982\u8ff0"},{"location":"table_latency/#_3","text":"\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u4ee5\u6587\u4ef6\u6216\u591a\u884c\u5b57\u7b26\u4e32\u7684\u5f62\u5f0f\u4fdd\u5b58\u3002 \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u7b2c\u4e00\u884c\u4fdd\u5b58\u7248\u672c\u4fe1\u606f\uff0c\u540e\u7eed\u6bcf\u884c\u4e3a\u4e00\u4e2a\u64cd\u4f5c\u548c\u5bf9\u5e94\u7684\u5ef6\u65f6\u4fe1\u606f\u3002","title":"\u6574\u4f53\u683c\u5f0f"},{"location":"table_latency/#_4","text":"\u7248\u672c\u4fe1\u606f\u4ee5\u82f1\u6587\u5b57\u7b26\u9017\u53f7\u5206\u5272\uff0c\u5185\u5bb9\u4f9d\u6b21\u4e3a\u786c\u4ef6\u73af\u5883\u540d\u79f0\u3001\u63a8\u7406\u5f15\u64ce\u540d\u79f0\u548c\u65f6\u95f4\u6233\u3002 \u786c\u4ef6\u73af\u5883\u540d\u79f0\uff1a \u7528\u4e8e\u6807\u8bc6\u786c\u4ef6\u73af\u5883\uff0c\u53ef\u4ee5\u5305\u542b\u8ba1\u7b97\u67b6\u6784\u7c7b\u578b\u3001\u7248\u672c\u53f7\u7b49\u4fe1\u606f\u3002 \u63a8\u7406\u5f15\u64ce\u540d\u79f0\uff1a \u7528\u4e8e\u6807\u8bc6\u63a8\u7406\u5f15\u64ce\uff0c\u53ef\u4ee5\u5305\u542b\u63a8\u7406\u5f15\u64ce\u540d\u79f0\u3001\u7248\u672c\u53f7\u3001\u4f18\u5316\u9009\u9879\u7b49\u4fe1\u606f\u3002 \u65f6\u95f4\u6233\uff1a \u8be5\u8bc4\u4f30\u8868\u7684\u521b\u5efa\u65f6\u95f4\u3002","title":"\u7248\u672c\u4fe1\u606f"},{"location":"table_latency/#_5","text":"\u64cd\u4f5c\u4fe1\u606f\u5b57\u6bb5\u4e4b\u95f4\u4ee5\u9017\u53f7\u5206\u5272\u3002\u64cd\u4f5c\u4fe1\u606f\u4e0e\u5ef6\u8fdf\u4fe1\u606f\u4e4b\u95f4\u4ee5\u5236\u8868\u7b26\u5206\u5272\u3002","title":"\u64cd\u4f5c\u4fe1\u606f"},{"location":"table_latency/#conv2d","text":"\u683c\u5f0f 1 op_type , flag_bias , flag_relu , n_in , c_in , h_in , w_in , c_out , groups , kernel , padding , stride , dilation \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 flag_bias (int) - \u662f\u5426\u6709 bias\uff080\uff1a\u65e0\uff0c1\uff1a\u6709\uff09\u3002 flag_relu (int) - \u662f\u5426\u6709 relu\uff080\uff1a\u65e0\uff0c1\uff1a\u6709\uff09\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 c_out (int) - \u8f93\u51fa Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 groups (int) - \u5377\u79ef\u4e8c\u7ef4\u5c42\uff08Conv2D Layer\uff09\u7684\u7ec4\u6570\u3002 kernel (int) - \u5377\u79ef\u6838\u5927\u5c0f\u3002 padding (int) - \u586b\u5145 (padding) \u5927\u5c0f\u3002 stride (int) - \u6b65\u957f (stride) \u5927\u5c0f\u3002 dilation (int) - \u81a8\u80c0 (dilation) \u5927\u5c0f\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"conv2d"},{"location":"table_latency/#activation","text":"\u683c\u5f0f 1 op_type , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"activation"},{"location":"table_latency/#batch_norm","text":"\u683c\u5f0f 1 op_type , active_type , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 active_type (string|None) - \u6fc0\u6d3b\u51fd\u6570\u7c7b\u578b\uff0c\u5305\u542b\uff1arelu, prelu, sigmoid, relu6, tanh\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"batch_norm"},{"location":"table_latency/#eltwise","text":"\u683c\u5f0f 1 op_type , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"eltwise"},{"location":"table_latency/#pooling","text":"\u683c\u5f0f 1 op_type , flag_global_pooling , n_in , c_in , h_in , w_in , kernel , padding , stride , ceil_mode , pool_type \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 flag_global_pooling (int) - \u662f\u5426\u4e3a\u5168\u5c40\u6c60\u5316\uff080\uff1a\u4e0d\u662f\uff0c1\uff1a\u662f\uff09\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 kernel (int) - \u5377\u79ef\u6838\u5927\u5c0f\u3002 padding (int) - \u586b\u5145 (padding) \u5927\u5c0f\u3002 stride (int) - \u6b65\u957f (stride) \u5927\u5c0f\u3002 ceil_mode (int) - \u662f\u5426\u7528 ceil \u51fd\u6570\u8ba1\u7b97\u8f93\u51fa\u9ad8\u5ea6\u548c\u5bbd\u5ea6\u30020 \u8868\u793a\u4f7f\u7528 floor \u51fd\u6570\uff0c1 \u8868\u793a\u4f7f\u7528 ceil \u51fd\u6570\u3002 pool_type (int) - \u6c60\u5316\u7c7b\u578b\uff0c\u5176\u4e2d 1 \u8868\u793a pooling_max\uff0c2 \u8868\u793a pooling_average_include_padding\uff0c3 \u8868\u793a pooling_average_exclude_padding\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"pooling"},{"location":"table_latency/#softmax","text":"\u683c\u5f0f 1 op_type , axis , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 axis (int) - \u6267\u884c softmax \u8ba1\u7b97\u7684\u7ef4\u5ea6\u7d22\u5f15\uff0c\u5e94\u8be5\u5728 [\u22121\uff0crank \u2212 1] \u8303\u56f4\u5185\uff0c\u5176\u4e2d rank \u662f\u8f93\u5165\u53d8\u91cf\u7684\u79e9\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"softmax"},{"location":"algo/algo/","text":"\u76ee\u5f55 # \u91cf\u5316\u539f\u7406\u4ecb\u7ecd \u526a\u88c1\u539f\u7406\u4ecb\u7ecd \u84b8\u998f\u539f\u7406\u4ecb\u7ecd \u8f7b\u91cf\u7ea7\u6a21\u578b\u7ed3\u6784\u641c\u7d22\u539f\u7406\u4ecb\u7ecd 1. Quantization Aware Training\u91cf\u5316\u4ecb\u7ecd # 1.1 \u80cc\u666f # \u8fd1\u5e74\u6765\uff0c\u5b9a\u70b9\u91cf\u5316\u4f7f\u7528\u66f4\u5c11\u7684\u6bd4\u7279\u6570\uff08\u59828-bit\u30013-bit\u30012-bit\u7b49\uff09\u8868\u793a\u795e\u7ecf\u7f51\u7edc\u7684\u6743\u91cd\u548c\u6fc0\u6d3b\u5df2\u88ab\u9a8c\u8bc1\u662f\u6709\u6548\u7684\u3002\u5b9a\u70b9\u91cf\u5316\u7684\u4f18\u70b9\u5305\u62ec\u4f4e\u5185\u5b58\u5e26\u5bbd\u3001\u4f4e\u529f\u8017\u3001\u4f4e\u8ba1\u7b97\u8d44\u6e90\u5360\u7528\u4ee5\u53ca\u4f4e\u6a21\u578b\u5b58\u50a8\u9700\u6c42\u7b49\u3002 \u88681: \u4e0d\u540c\u7c7b\u578b\u64cd\u4f5c\u7684\u5f00\u9500\u5bf9\u6bd4 \u7531\u88681\u53ef\u77e5\uff0c\u4f4e\u7cbe\u5ea6\u5b9a\u70b9\u6570\u64cd\u4f5c\u7684\u786c\u4ef6\u9762\u79ef\u5927\u5c0f\u53ca\u80fd\u8017\u6bd4\u9ad8\u7cbe\u5ea6\u6d6e\u70b9\u6570\u8981\u5c11\u51e0\u4e2a\u6570\u91cf\u7ea7\u3002 \u4f7f\u7528\u5b9a\u70b9\u91cf\u5316\u53ef\u5e26\u67654\u500d\u7684\u6a21\u578b\u538b\u7f29\u30014\u500d\u7684\u5185\u5b58\u5e26\u5bbd\u63d0\u5347\uff0c\u4ee5\u53ca\u66f4\u9ad8\u6548\u7684cache\u5229\u7528(\u5f88\u591a\u786c\u4ef6\u8bbe\u5907\uff0c\u5185\u5b58\u8bbf\u95ee\u662f\u4e3b\u8981\u80fd\u8017)\u3002\u9664\u6b64\u4e4b\u5916\uff0c\u8ba1\u7b97\u901f\u5ea6\u4e5f\u4f1a\u66f4\u5feb(\u901a\u5e38\u5177\u67092x-3x\u7684\u6027\u80fd\u63d0\u5347)\u3002\u7531\u88682\u53ef\u77e5\uff0c\u5728\u5f88\u591a\u573a\u666f\u4e0b\uff0c\u5b9a\u70b9\u91cf\u5316\u64cd\u4f5c\u5bf9\u7cbe\u5ea6\u5e76\u4e0d\u4f1a\u9020\u6210\u635f\u5931\u3002\u53e6\u5916\uff0c\u5b9a\u70b9\u91cf\u5316\u5bf9\u795e\u7ecf\u7f51\u7edc\u4e8e\u5d4c\u5165\u5f0f\u8bbe\u5907\u4e0a\u7684\u63a8\u65ad\u6765\u8bf4\u662f\u6781\u5176\u91cd\u8981\u7684\u3002 \u88682\uff1a\u6a21\u578b\u91cf\u5316\u524d\u540e\u7cbe\u5ea6\u5bf9\u6bd4 \u76ee\u524d\uff0c\u5b66\u672f\u754c\u4e3b\u8981\u5c06\u91cf\u5316\u5206\u4e3a\u4e24\u5927\u7c7b\uff1a Post Training Quantization \u548c Quantization Aware Training \u3002 Post Training Quantization \u662f\u6307\u4f7f\u7528KL\u6563\u5ea6\u3001\u6ed1\u52a8\u5e73\u5747\u7b49\u65b9\u6cd5\u786e\u5b9a\u91cf\u5316\u53c2\u6570\u4e14\u4e0d\u9700\u8981\u91cd\u65b0\u8bad\u7ec3\u7684\u5b9a\u70b9\u91cf\u5316\u65b9\u6cd5\u3002 Quantization Aware Training \u662f\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u5bf9\u91cf\u5316\u8fdb\u884c\u5efa\u6a21\u4ee5\u786e\u5b9a\u91cf\u5316\u53c2\u6570\uff0c\u5b83\u4e0e Post Training Quantization \u6a21\u5f0f\u76f8\u6bd4\u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7684\u9884\u6d4b\u7cbe\u5ea6\u3002 1.2 \u91cf\u5316\u539f\u7406 # 1.2.1 \u91cf\u5316\u65b9\u5f0f # \u76ee\u524d\uff0c\u5b58\u5728\u7740\u8bb8\u591a\u65b9\u6cd5\u53ef\u4ee5\u5c06\u6d6e\u70b9\u6570\u91cf\u5316\u6210\u5b9a\u70b9\u6570\u3002\u4f8b\u5982\uff1a r = min(max(x, a), b) s = \\frac{b - a}{n - 1} q = \\left \\lfloor \\frac{r - a}{s} \\right \\rceil \u5f0f\u4e2d\uff0c x x \u662f\u5f85\u91cf\u5316\u7684\u6d6e\u70b9\u503c\uff0c [a, b] [a, b] \u662f\u91cf\u5316\u8303\u56f4\uff0c a a \u662f\u5f85\u91cf\u5316\u6d6e\u70b9\u6570\u4e2d\u7684\u6700\u5c0f\u503c\uff0c b b \u662f\u5f85\u91cf\u5316\u6d6e\u70b9\u6570\u4e2d\u7684\u6700\u5927\u503c\u3002 \\left \\lfloor \\right \\rceil \\left \\lfloor \\right \\rceil \u8868\u793a\u5c06\u7ed3\u679c\u56db\u820d\u4e94\u5165\u5230\u6700\u8fd1\u7684\u6574\u6570\u3002\u5982\u679c\u91cf\u5316\u7ea7\u522b\u4e3a k k \uff0c\u5219 n n \u4e3a 2^k 2^k \u3002\u4f8b\u5982\uff0c\u82e5 k k \u4e3a8\uff0c\u5219 n n \u4e3a256\u3002 q q \u662f\u91cf\u5316\u5f97\u5230\u7684\u6574\u6570\u3002 PaddleSlim\u6846\u67b6\u4e2d\u9009\u62e9\u7684\u91cf\u5316\u65b9\u6cd5\u4e3a\u6700\u5927\u7edd\u5bf9\u503c\u91cf\u5316( max-abs )\uff0c\u5177\u4f53\u63cf\u8ff0\u5982\u4e0b\uff1a M = max(abs(x)) q = \\left \\lfloor \\frac{x}{M} * (n - 1) \\right \\rceil \u5f0f\u4e2d\uff0c x x \u662f\u5f85\u88ab\u91cf\u5316\u7684\u6d6e\u70b9\u503c\uff0c M M \u662f\u5f85\u91cf\u5316\u6d6e\u70b9\u6570\u4e2d\u7684\u7edd\u5bf9\u503c\u6700\u5927\u503c\u3002 \\left \\lfloor \\right \\rceil \\left \\lfloor \\right \\rceil \u8868\u793a\u5c06\u7ed3\u679c\u56db\u820d\u4e94\u5165\u5230\u6700\u8fd1\u7684\u6574\u6570\u3002\u5bf9\u4e8e8bit\u91cf\u5316\uff0cPaddleSlim\u91c7\u7528 int8_t \uff0c\u5373 n=2^7=128 n=2^7=128 \u3002 q q \u662f\u91cf\u5316\u5f97\u5230\u7684\u6574\u6570\u3002 \u65e0\u8bba\u662f min-max\u91cf\u5316 \u8fd8\u662f max-abs\u91cf\u5316 \uff0c\u4ed6\u4eec\u90fd\u53ef\u4ee5\u8868\u793a\u4e3a\u5982\u4e0b\u5f62\u5f0f\uff1a q = scale * r + b q = scale * r + b \u5176\u4e2d min-max \u548c max-abs \u88ab\u79f0\u4e3a\u91cf\u5316\u53c2\u6570\u6216\u8005\u91cf\u5316\u6bd4\u4f8b\u6216\u8005\u91cf\u5316\u8303\u56f4\u3002 1.2.2 \u91cf\u5316\u8bad\u7ec3 # 1.2.2.1 \u524d\u5411\u4f20\u64ad # \u524d\u5411\u4f20\u64ad\u8fc7\u7a0b\u91c7\u7528\u6a21\u62df\u91cf\u5316\u7684\u65b9\u5f0f\uff0c\u5177\u4f53\u63cf\u8ff0\u5982\u4e0b\uff1a \u56fe1\uff1a\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u7684\u524d\u5411\u8fc7\u7a0b \u7531\u56fe1\u53ef\u77e5\uff0c\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u7684\u524d\u5411\u8fc7\u7a0b\u53ef\u88ab\u63cf\u8ff0\u4e3a\u4ee5\u4e0b\u56db\u4e2a\u90e8\u5206\uff1a 1) \u8f93\u5165\u548c\u6743\u91cd\u5747\u88ab\u91cf\u5316\u62108-bit\u6574\u6570\u3002 2) \u57288-bit\u6574\u6570\u4e0a\u6267\u884c\u77e9\u9635\u4e58\u6cd5\u6216\u8005\u5377\u79ef\u64cd\u4f5c\u3002 3) \u53cd\u91cf\u5316\u77e9\u9635\u4e58\u6cd5\u6216\u8005\u5377\u79ef\u64cd\u4f5c\u7684\u8f93\u51fa\u7ed3\u679c\u4e3a32-bit\u6d6e\u70b9\u578b\u6570\u636e\u3002 4) \u572832-bit\u6d6e\u70b9\u578b\u6570\u636e\u4e0a\u6267\u884c\u504f\u7f6e\u52a0\u6cd5\u64cd\u4f5c\u3002\u6b64\u5904\uff0c\u504f\u7f6e\u5e76\u672a\u88ab\u91cf\u5316\u3002 \u5bf9\u4e8e\u901a\u7528\u77e9\u9635\u4e58\u6cd5( GEMM )\uff0c\u8f93\u5165 X X \u548c\u6743\u91cd W W \u7684\u91cf\u5316\u64cd\u4f5c\u53ef\u88ab\u8868\u8ff0\u4e3a\u5982\u4e0b\u8fc7\u7a0b\uff1a X_q = \\left \\lfloor \\frac{X}{X_m} * (n - 1) \\right \\rceil W_q = \\left \\lfloor \\frac{W}{W_m} * (n - 1) \\right \\rceil \u6267\u884c\u901a\u7528\u77e9\u9635\u4e58\u6cd5\uff1a Y_q = X_q * W_q \u5bf9\u91cf\u5316\u4e58\u79ef\u7ed3\u679c Yq Yq \u8fdb\u884c\u53cd\u91cf\u5316: \\begin{align} Y_{dq} = \\frac{Y_q}{(n - 1) * (n - 1)} * X_m * W_m \\ =\\frac{X_q * W_q}{(n - 1) * (n - 1)} * X_m * W_m \\ =(\\frac{X_q}{n - 1} * X_m) * (\\frac{W_q}{n - 1} * W_m) \\ \\end{align} \u4e0a\u8ff0\u516c\u5f0f\u8868\u660e\u53cd\u91cf\u5316\u64cd\u4f5c\u53ef\u4ee5\u88ab\u79fb\u52a8\u5230 GEMM \u4e4b\u524d\uff0c\u5373\u5148\u5bf9 Xq Xq \u548c Wq Wq \u6267\u884c\u53cd\u91cf\u5316\u64cd\u4f5c\u518d\u505a GEMM \u64cd\u4f5c\u3002\u56e0\u6b64\uff0c\u524d\u5411\u4f20\u64ad\u7684\u5de5\u4f5c\u6d41\u4ea6\u53ef\u8868\u793a\u4e3a\u5982\u4e0b\u65b9\u5f0f\uff1a \u56fe2\uff1a\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u524d\u5411\u8fc7\u7a0b\u7684\u7b49\u4ef7\u5de5\u4f5c\u6d41 \u8bad\u7ec3\u8fc7\u7a0b\u4e2d\uff0cPaddleSlim\u4f7f\u7528\u56fe2\u4e2d\u6240\u793a\u7684\u7b49\u4ef7\u5de5\u4f5c\u6d41\u3002\u5728\u8bbe\u8ba1\u4e2d\uff0c\u91cf\u5316Pass\u5728IrGraph\u4e2d\u63d2\u5165\u91cf\u5316op\u548c\u53cd\u91cf\u5316op\u3002\u56e0\u4e3a\u5728\u8fde\u7eed\u7684\u91cf\u5316\u3001\u53cd\u91cf\u5316\u64cd\u4f5c\u4e4b\u540e\u8f93\u5165\u4ecd\u7136\u4e3a32-bit\u6d6e\u70b9\u578b\u6570\u636e\u3002\u56e0\u6b64\uff0cPaddleSlim\u91cf\u5316\u8bad\u7ec3\u6846\u67b6\u6240\u91c7\u7528\u7684\u91cf\u5316\u65b9\u5f0f\u88ab\u79f0\u4e3a\u6a21\u62df\u91cf\u5316\u3002 1.2.2.2 \u53cd\u5411\u4f20\u64ad # \u7531\u56fe3\u53ef\u77e5\uff0c\u6743\u91cd\u66f4\u65b0\u6240\u9700\u7684\u68af\u5ea6\u503c\u53ef\u4ee5\u7531\u91cf\u5316\u540e\u7684\u6743\u91cd\u548c\u91cf\u5316\u540e\u7684\u6fc0\u6d3b\u6c42\u5f97\u3002\u53cd\u5411\u4f20\u64ad\u8fc7\u7a0b\u4e2d\u7684\u6240\u6709\u8f93\u5165\u548c\u8f93\u51fa\u5747\u4e3a32-bit\u6d6e\u70b9\u578b\u6570\u636e\u3002\u6ce8\u610f\uff0c\u68af\u5ea6\u66f4\u65b0\u64cd\u4f5c\u9700\u8981\u5728\u539f\u59cb\u6743\u91cd\u4e0a\u8fdb\u884c\uff0c\u5373\u8ba1\u7b97\u51fa\u7684\u68af\u5ea6\u5c06\u88ab\u52a0\u5230\u539f\u59cb\u6743\u91cd\u4e0a\u800c\u975e\u91cf\u5316\u540e\u6216\u53cd\u91cf\u5316\u540e\u7684\u6743\u91cd\u4e0a\u3002 \u56fe3\uff1a\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u7684\u53cd\u5411\u4f20\u64ad\u548c\u6743\u91cd\u66f4\u65b0\u8fc7\u7a0b \u56e0\u6b64\uff0c\u91cf\u5316Pass\u4e5f\u4f1a\u6539\u53d8\u76f8\u5e94\u53cd\u5411\u7b97\u5b50\u7684\u67d0\u4e9b\u8f93\u5165\u3002 1.2.2.3 \u786e\u5b9a\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570 # \u5b58\u5728\u7740\u4e24\u79cd\u7b56\u7565\u53ef\u4ee5\u8ba1\u7b97\u6c42\u53d6\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\uff0c\u5373\u52a8\u6001\u7b56\u7565\u548c\u9759\u6001\u7b56\u7565\u3002\u52a8\u6001\u7b56\u7565\u4f1a\u5728\u6bcf\u6b21\u8fed\u4ee3\u8fc7\u7a0b\u4e2d\u8ba1\u7b97\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u7684\u503c\u3002\u9759\u6001\u7b56\u7565\u5219\u5bf9\u4e0d\u540c\u7684\u8f93\u5165\u91c7\u7528\u76f8\u540c\u7684\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u3002 \u5bf9\u4e8e\u6743\u91cd\u800c\u8a00\uff0c\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u91c7\u7528\u52a8\u6001\u7b56\u7565\u3002\u6362\u53e5\u8bdd\u8bf4\uff0c\u5728\u6bcf\u6b21\u8fed\u4ee3\u8fc7\u7a0b\u4e2d\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u5747\u4f1a\u88ab\u91cd\u65b0\u8ba1\u7b97\u5f97\u5230\u76f4\u81f3\u8bad\u7ec3\u8fc7\u7a0b\u7ed3\u675f\u3002 \u5bf9\u4e8e\u6fc0\u6d3b\u800c\u8a00\uff0c\u53ef\u4ee5\u9009\u62e9\u52a8\u6001\u7b56\u7565\u4e5f\u53ef\u4ee5\u9009\u62e9\u9759\u6001\u7b56\u7565\u3002\u82e5\u9009\u62e9\u4f7f\u7528\u9759\u6001\u7b56\u7565\uff0c\u5219\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u4f1a\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u88ab\u8bc4\u4f30\u6c42\u5f97\uff0c\u4e14\u5728\u63a8\u65ad\u8fc7\u7a0b\u4e2d\u88ab\u4f7f\u7528(\u4e0d\u540c\u7684\u8f93\u5165\u5747\u4fdd\u6301\u4e0d\u53d8)\u3002\u9759\u6001\u7b56\u7565\u4e2d\u7684\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u53ef\u4e8e\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u901a\u8fc7\u5982\u4e0b\u4e09\u79cd\u65b9\u5f0f\u8fdb\u884c\u8bc4\u4f30\uff1a \u5728\u4e00\u4e2a\u7a97\u53e3\u4e2d\u8ba1\u7b97\u6fc0\u6d3b\u6700\u5927\u7edd\u5bf9\u503c\u7684\u5e73\u5747\u503c\u3002 \u5728\u4e00\u4e2a\u7a97\u53e3\u4e2d\u8ba1\u7b97\u6fc0\u6d3b\u6700\u5927\u7edd\u5bf9\u503c\u7684\u6700\u5927\u503c\u3002 \u5728\u4e00\u4e2a\u7a97\u53e3\u4e2d\u8ba1\u7b97\u6fc0\u6d3b\u6700\u5927\u7edd\u5bf9\u503c\u7684\u6ed1\u52a8\u5e73\u5747\u503c\uff0c\u8ba1\u7b97\u516c\u5f0f\u5982\u4e0b\uff1a Vt = (1 - k) * V + k * V_{t-1} Vt = (1 - k) * V + k * V_{t-1} \u5f0f\u4e2d\uff0c V V \u662f\u5f53\u524dbatch\u7684\u6700\u5927\u7edd\u5bf9\u503c\uff0c Vt Vt \u662f\u6ed1\u52a8\u5e73\u5747\u503c\u3002 k k \u662f\u4e00\u4e2a\u56e0\u5b50\uff0c\u4f8b\u5982\u5176\u503c\u53ef\u53d6\u4e3a0.9\u3002 1.2.4 \u8bad\u7ec3\u540e\u91cf\u5316 # \u8bad\u7ec3\u540e\u91cf\u5316\u662f\u57fa\u4e8e\u91c7\u6837\u6570\u636e\uff0c\u91c7\u7528KL\u6563\u5ea6\u7b49\u65b9\u6cd5\u8ba1\u7b97\u91cf\u5316\u6bd4\u4f8b\u56e0\u5b50\u7684\u65b9\u6cd5\u3002\u76f8\u6bd4\u91cf\u5316\u8bad\u7ec3\uff0c\u8bad\u7ec3\u540e\u91cf\u5316\u4e0d\u9700\u8981\u91cd\u65b0\u8bad\u7ec3\uff0c\u53ef\u4ee5\u5feb\u901f\u5f97\u5230\u91cf\u5316\u6a21\u578b\u3002 \u8bad\u7ec3\u540e\u91cf\u5316\u7684\u76ee\u6807\u662f\u6c42\u53d6\u91cf\u5316\u6bd4\u4f8b\u56e0\u5b50\uff0c\u4e3b\u8981\u6709\u4e24\u79cd\u65b9\u6cd5\uff1a\u975e\u9971\u548c\u91cf\u5316\u65b9\u6cd5 ( No Saturation) \u548c\u9971\u548c\u91cf\u5316\u65b9\u6cd5 (Saturation)\u3002\u975e\u9971\u548c\u91cf\u5316\u65b9\u6cd5\u8ba1\u7b97FP32\u7c7b\u578bTensor\u4e2d\u7edd\u5bf9\u503c\u7684\u6700\u5927\u503c abs_max \uff0c\u5c06\u5176\u6620\u5c04\u4e3a127\uff0c\u5219\u91cf\u5316\u6bd4\u4f8b\u56e0\u5b50\u7b49\u4e8e abs_max/127 \u3002\u9971\u548c\u91cf\u5316\u65b9\u6cd5\u4f7f\u7528KL\u6563\u5ea6\u8ba1\u7b97\u4e00\u4e2a\u5408\u9002\u7684\u9608\u503c T ( 0=r \\end{cases} \\end{equation} \\begin{equation} P(r_k) = \\begin{cases} e^{\\frac{(r_k-r)}{T_k}} & r_k < r\\\\ 1 & r_k>=r \\end{cases} \\end{equation} \u5728\u7b2ck\u6b21\u8fed\u4ee3\uff0c\u641c\u5230\u7684\u7f51\u7edc\u4e3a N_k N_k , \u5bf9 N_k N_k \u8bad\u7ec3\u82e5\u5e72epoch\u540e\uff0c\u5728\u6d4b\u8bd5\u96c6\u4e0a\u5f97\u5230reward\u4e3a r_k r_k , \u4ee5\u6982\u7387 P(r_k) P(r_k) \u63a5\u53d7 r_k r_k \uff0c\u5373\u6267\u884c r=r_k r=r_k \u3002 r r \u5728\u641c\u7d22\u8fc7\u7a0b\u8d77\u59cb\u65f6\u88ab\u521d\u59cb\u5316\u4e3a0. T_0 T_0 \u4e3a\u521d\u59cb\u5316\u6e29\u5ea6\uff0c \\theta \\theta \u4e3a\u6e29\u5ea6\u8870\u51cf\u7cfb\u6570\uff0c T_k T_k \u4e3a\u7b2ck\u6b21\u8fed\u4ee3\u7684\u6e29\u5ea6\u3002 \u5728\u6211\u4eec\u7684NAS\u4efb\u52a1\u4e2d\uff0c\u533a\u522b\u4e8eRL\u6bcf\u6b21\u91cd\u65b0\u751f\u6210\u4e00\u4e2a\u5b8c\u6574\u7684\u7f51\u7edc\uff0c\u6211\u4eec\u5c06\u7f51\u7edc\u7ed3\u6784\u6620\u5c04\u6210\u4e00\u6bb5\u7f16\u7801\uff0c\u7b2c\u4e00\u6b21\u968f\u673a\u521d\u59cb\u5316\uff0c\u7136\u540e\u6bcf\u6b21\u968f\u673a\u4fee\u6539\u7f16\u7801\u4e2d\u7684\u4e00\u90e8\u5206\uff08\u5bf9\u5e94\u4e8e\u7f51\u7edc\u7ed3\u6784\u7684\u4e00\u90e8\u5206\uff09\u751f\u6210\u4e00\u4e2a\u65b0\u7684\u7f16\u7801\uff0c\u7136\u540e\u5c06\u8fd9\u4e2a\u7f16\u7801\u518d\u6620\u5c04\u56de\u7f51\u7edc\u7ed3\u6784\uff0c\u901a\u8fc7\u5728\u8bad\u7ec3\u96c6\u4e0a\u8bad\u7ec3\u4e00\u5b9a\u7684epochs\u540e\u7684\u7cbe\u5ea6\u4ee5\u53ca\u7f51\u7edc\u5ef6\u65f6\u878d\u5408\u83b7\u5f97reward\uff0c\u6765\u6307\u5bfc\u9000\u706b\u7b97\u6cd5\u7684\u6536\u655b\u3002 4.2 \u641c\u7d22\u7a7a\u95f4 # \u641c\u7d22\u7a7a\u95f4\u5b9a\u4e49\u4e86\u4f18\u5316\u95ee\u9898\u7684\u53d8\u91cf\uff0c\u53d8\u91cf\u89c4\u6a21\u51b3\u5b9a\u4e86\u641c\u7d22\u7b97\u6cd5\u7684\u96be\u5ea6\u548c\u641c\u7d22\u65f6\u95f4\u3002\u56e0\u6b64\u4e3a\u4e86\u52a0\u5feb\u641c\u7d22\u901f\u5ea6\uff0c\u5b9a\u4e49\u4e00\u4e2a\u5408\u7406\u7684\u641c\u7d22\u7a7a\u95f4\u81f3\u5173\u91cd\u8981\u3002\u5728Light-NAS\u4e2d\uff0c\u4e3a\u4e86\u52a0\u901f\u641c\u7d22\u901f\u5ea6\uff0c\u6211\u4eec\u5c06\u4e00\u4e2a\u7f51\u7edc\u5212\u5206\u4e3a\u591a\u4e2ablock\uff0c\u5148\u624b\u52a8\u6309\u94fe\u72b6\u5c42\u7ea7\u7ed3\u6784\u5806\u53e0c\uff0c\u518d \u4f7f\u7528\u641c\u7d22\u7b97\u6cd5\u81ea\u52a8\u641c\u7d22\u6bcf\u4e2ablock\u5185\u90e8\u7684\u7ed3\u6784\u3002 \u56e0\u4e3a\u8981\u641c\u7d22\u51fa\u5728\u79fb\u52a8\u7aef\u8fd0\u884c\u901f\u5ea6\u5feb\u7684\u6a21\u578b\uff0c\u6211\u4eec\u53c2\u8003\u4e86MobileNetV2\u4e2d\u7684Linear Bottlenecks\u548cInverted residuals\u7ed3\u6784\uff0c\u641c\u7d22\u6bcf\u4e00\u4e2aInverted residuals\u4e2d\u7684\u5177\u4f53\u53c2\u6570\uff0c\u5305\u62eckernelsize\u3001channel\u6269\u5f20\u500d\u6570\u3001\u91cd\u590d\u6b21\u6570\u3001channels number\u3002\u5982\u56fe10\u6240\u793a\uff1a \u56fe10 4.3 \u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30 # \u641c\u7d22\u8fc7\u7a0b\u652f\u6301 FLOPS \u7ea6\u675f\u548c\u6a21\u578b\u5ef6\u65f6\u7ea6\u675f\u3002\u800c\u57fa\u4e8e Android/iOS \u79fb\u52a8\u7aef\u3001\u5f00\u53d1\u677f\u7b49\u786c\u4ef6\u5e73\u53f0\uff0c\u8fed\u4ee3\u641c\u7d22\u8fc7\u7a0b\u4e2d\u4e0d\u65ad\u6d4b\u8bd5\u6a21\u578b\u7684\u5ef6\u65f6\u4e0d\u4ec5\u6d88\u8017\u65f6\u95f4\u800c\u4e14\u975e\u5e38\u4e0d\u65b9\u4fbf\uff0c\u56e0\u6b64\u6211\u4eec\u5f00\u53d1\u4e86\u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30\u5668\u6765\u8bc4\u4f30\u641c\u7d22\u5f97\u5230\u6a21\u578b\u7684\u5ef6\u65f6\u3002\u901a\u8fc7\u5ef6\u65f6\u8bc4\u4f30\u5668\u8bc4\u4f30\u5f97\u5230\u7684\u5ef6\u65f6\u4e0e\u6a21\u578b\u5b9e\u9645\u6d4b\u8bd5\u7684\u5ef6\u65f6\u6ce2\u52a8\u504f\u5dee\u5c0f\u4e8e 10%\u3002 \u5ef6\u65f6\u8bc4\u4f30\u5668\u5206\u4e3a\u914d\u7f6e\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668\u548c\u8bc4\u4f30\u6a21\u578b\u5ef6\u65f6\u4e24\u4e2a\u9636\u6bb5\uff0c\u914d\u7f6e\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668\u53ea\u9700\u8981\u6267\u884c\u4e00\u6b21\uff0c\u800c\u8bc4\u4f30\u6a21\u578b\u5ef6\u65f6\u5219\u5728\u641c\u7d22\u8fc7\u7a0b\u4e2d\u4e0d\u65ad\u8bc4\u4f30\u641c\u7d22\u5f97\u5230\u7684\u6a21\u578b\u5ef6\u65f6\u3002 \u914d\u7f6e\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668 \u83b7\u53d6\u641c\u7d22\u7a7a\u95f4\u4e2d\u6240\u6709\u4e0d\u91cd\u590d\u7684 op \u53ca\u5176\u53c2\u6570 \u83b7\u53d6\u6bcf\u7ec4 op \u53ca\u5176\u53c2\u6570\u7684\u5ef6\u65f6 \u8bc4\u4f30\u6a21\u578b\u5ef6\u65f6 \u83b7\u53d6\u7ed9\u5b9a\u6a21\u578b\u7684\u6240\u6709 op \u53ca\u5176\u53c2\u6570 \u6839\u636e\u7ed9\u5b9a\u6a21\u578b\u7684\u6240\u6709 op \u53ca\u53c2\u6570\uff0c\u5229\u7528\u5ef6\u65f6\u8bc4\u4f30\u5668\u53bb\u4f30\u8ba1\u6a21\u578b\u7684\u5ef6\u65f6 5. \u53c2\u8003\u6587\u732e # High-Performance Hardware for Machine Learning Quantizing deep convolutional networks for efficient inference: A whitepaper Pruning Filters for Efficient ConvNets Distilling the Knowledge in a Neural Network A Gift from Knowledge Distillation: Fast Optimization, Network Minimization and Transfer Learning","title":"\u7b97\u6cd5\u539f\u7406"},{"location":"algo/algo/#_1","text":"\u91cf\u5316\u539f\u7406\u4ecb\u7ecd \u526a\u88c1\u539f\u7406\u4ecb\u7ecd \u84b8\u998f\u539f\u7406\u4ecb\u7ecd \u8f7b\u91cf\u7ea7\u6a21\u578b\u7ed3\u6784\u641c\u7d22\u539f\u7406\u4ecb\u7ecd","title":"\u76ee\u5f55"},{"location":"algo/algo/#1-quantization-aware-training","text":"","title":"1. Quantization Aware Training\u91cf\u5316\u4ecb\u7ecd"},{"location":"algo/algo/#11","text":"\u8fd1\u5e74\u6765\uff0c\u5b9a\u70b9\u91cf\u5316\u4f7f\u7528\u66f4\u5c11\u7684\u6bd4\u7279\u6570\uff08\u59828-bit\u30013-bit\u30012-bit\u7b49\uff09\u8868\u793a\u795e\u7ecf\u7f51\u7edc\u7684\u6743\u91cd\u548c\u6fc0\u6d3b\u5df2\u88ab\u9a8c\u8bc1\u662f\u6709\u6548\u7684\u3002\u5b9a\u70b9\u91cf\u5316\u7684\u4f18\u70b9\u5305\u62ec\u4f4e\u5185\u5b58\u5e26\u5bbd\u3001\u4f4e\u529f\u8017\u3001\u4f4e\u8ba1\u7b97\u8d44\u6e90\u5360\u7528\u4ee5\u53ca\u4f4e\u6a21\u578b\u5b58\u50a8\u9700\u6c42\u7b49\u3002 \u88681: \u4e0d\u540c\u7c7b\u578b\u64cd\u4f5c\u7684\u5f00\u9500\u5bf9\u6bd4 \u7531\u88681\u53ef\u77e5\uff0c\u4f4e\u7cbe\u5ea6\u5b9a\u70b9\u6570\u64cd\u4f5c\u7684\u786c\u4ef6\u9762\u79ef\u5927\u5c0f\u53ca\u80fd\u8017\u6bd4\u9ad8\u7cbe\u5ea6\u6d6e\u70b9\u6570\u8981\u5c11\u51e0\u4e2a\u6570\u91cf\u7ea7\u3002 \u4f7f\u7528\u5b9a\u70b9\u91cf\u5316\u53ef\u5e26\u67654\u500d\u7684\u6a21\u578b\u538b\u7f29\u30014\u500d\u7684\u5185\u5b58\u5e26\u5bbd\u63d0\u5347\uff0c\u4ee5\u53ca\u66f4\u9ad8\u6548\u7684cache\u5229\u7528(\u5f88\u591a\u786c\u4ef6\u8bbe\u5907\uff0c\u5185\u5b58\u8bbf\u95ee\u662f\u4e3b\u8981\u80fd\u8017)\u3002\u9664\u6b64\u4e4b\u5916\uff0c\u8ba1\u7b97\u901f\u5ea6\u4e5f\u4f1a\u66f4\u5feb(\u901a\u5e38\u5177\u67092x-3x\u7684\u6027\u80fd\u63d0\u5347)\u3002\u7531\u88682\u53ef\u77e5\uff0c\u5728\u5f88\u591a\u573a\u666f\u4e0b\uff0c\u5b9a\u70b9\u91cf\u5316\u64cd\u4f5c\u5bf9\u7cbe\u5ea6\u5e76\u4e0d\u4f1a\u9020\u6210\u635f\u5931\u3002\u53e6\u5916\uff0c\u5b9a\u70b9\u91cf\u5316\u5bf9\u795e\u7ecf\u7f51\u7edc\u4e8e\u5d4c\u5165\u5f0f\u8bbe\u5907\u4e0a\u7684\u63a8\u65ad\u6765\u8bf4\u662f\u6781\u5176\u91cd\u8981\u7684\u3002 \u88682\uff1a\u6a21\u578b\u91cf\u5316\u524d\u540e\u7cbe\u5ea6\u5bf9\u6bd4 \u76ee\u524d\uff0c\u5b66\u672f\u754c\u4e3b\u8981\u5c06\u91cf\u5316\u5206\u4e3a\u4e24\u5927\u7c7b\uff1a Post Training Quantization \u548c Quantization Aware Training \u3002 Post Training Quantization \u662f\u6307\u4f7f\u7528KL\u6563\u5ea6\u3001\u6ed1\u52a8\u5e73\u5747\u7b49\u65b9\u6cd5\u786e\u5b9a\u91cf\u5316\u53c2\u6570\u4e14\u4e0d\u9700\u8981\u91cd\u65b0\u8bad\u7ec3\u7684\u5b9a\u70b9\u91cf\u5316\u65b9\u6cd5\u3002 Quantization Aware Training \u662f\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u5bf9\u91cf\u5316\u8fdb\u884c\u5efa\u6a21\u4ee5\u786e\u5b9a\u91cf\u5316\u53c2\u6570\uff0c\u5b83\u4e0e Post Training Quantization \u6a21\u5f0f\u76f8\u6bd4\u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7684\u9884\u6d4b\u7cbe\u5ea6\u3002","title":"1.1 \u80cc\u666f"},{"location":"algo/algo/#12","text":"","title":"1.2 \u91cf\u5316\u539f\u7406"},{"location":"algo/algo/#121","text":"\u76ee\u524d\uff0c\u5b58\u5728\u7740\u8bb8\u591a\u65b9\u6cd5\u53ef\u4ee5\u5c06\u6d6e\u70b9\u6570\u91cf\u5316\u6210\u5b9a\u70b9\u6570\u3002\u4f8b\u5982\uff1a r = min(max(x, a), b) s = \\frac{b - a}{n - 1} q = \\left \\lfloor \\frac{r - a}{s} \\right \\rceil \u5f0f\u4e2d\uff0c x x \u662f\u5f85\u91cf\u5316\u7684\u6d6e\u70b9\u503c\uff0c [a, b] [a, b] \u662f\u91cf\u5316\u8303\u56f4\uff0c a a \u662f\u5f85\u91cf\u5316\u6d6e\u70b9\u6570\u4e2d\u7684\u6700\u5c0f\u503c\uff0c b b \u662f\u5f85\u91cf\u5316\u6d6e\u70b9\u6570\u4e2d\u7684\u6700\u5927\u503c\u3002 \\left \\lfloor \\right \\rceil \\left \\lfloor \\right \\rceil \u8868\u793a\u5c06\u7ed3\u679c\u56db\u820d\u4e94\u5165\u5230\u6700\u8fd1\u7684\u6574\u6570\u3002\u5982\u679c\u91cf\u5316\u7ea7\u522b\u4e3a k k \uff0c\u5219 n n \u4e3a 2^k 2^k \u3002\u4f8b\u5982\uff0c\u82e5 k k \u4e3a8\uff0c\u5219 n n \u4e3a256\u3002 q q \u662f\u91cf\u5316\u5f97\u5230\u7684\u6574\u6570\u3002 PaddleSlim\u6846\u67b6\u4e2d\u9009\u62e9\u7684\u91cf\u5316\u65b9\u6cd5\u4e3a\u6700\u5927\u7edd\u5bf9\u503c\u91cf\u5316( max-abs )\uff0c\u5177\u4f53\u63cf\u8ff0\u5982\u4e0b\uff1a M = max(abs(x)) q = \\left \\lfloor \\frac{x}{M} * (n - 1) \\right \\rceil \u5f0f\u4e2d\uff0c x x \u662f\u5f85\u88ab\u91cf\u5316\u7684\u6d6e\u70b9\u503c\uff0c M M \u662f\u5f85\u91cf\u5316\u6d6e\u70b9\u6570\u4e2d\u7684\u7edd\u5bf9\u503c\u6700\u5927\u503c\u3002 \\left \\lfloor \\right \\rceil \\left \\lfloor \\right \\rceil \u8868\u793a\u5c06\u7ed3\u679c\u56db\u820d\u4e94\u5165\u5230\u6700\u8fd1\u7684\u6574\u6570\u3002\u5bf9\u4e8e8bit\u91cf\u5316\uff0cPaddleSlim\u91c7\u7528 int8_t \uff0c\u5373 n=2^7=128 n=2^7=128 \u3002 q q \u662f\u91cf\u5316\u5f97\u5230\u7684\u6574\u6570\u3002 \u65e0\u8bba\u662f min-max\u91cf\u5316 \u8fd8\u662f max-abs\u91cf\u5316 \uff0c\u4ed6\u4eec\u90fd\u53ef\u4ee5\u8868\u793a\u4e3a\u5982\u4e0b\u5f62\u5f0f\uff1a q = scale * r + b q = scale * r + b \u5176\u4e2d min-max \u548c max-abs \u88ab\u79f0\u4e3a\u91cf\u5316\u53c2\u6570\u6216\u8005\u91cf\u5316\u6bd4\u4f8b\u6216\u8005\u91cf\u5316\u8303\u56f4\u3002","title":"1.2.1 \u91cf\u5316\u65b9\u5f0f"},{"location":"algo/algo/#122","text":"","title":"1.2.2 \u91cf\u5316\u8bad\u7ec3"},{"location":"algo/algo/#1221","text":"\u524d\u5411\u4f20\u64ad\u8fc7\u7a0b\u91c7\u7528\u6a21\u62df\u91cf\u5316\u7684\u65b9\u5f0f\uff0c\u5177\u4f53\u63cf\u8ff0\u5982\u4e0b\uff1a \u56fe1\uff1a\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u7684\u524d\u5411\u8fc7\u7a0b \u7531\u56fe1\u53ef\u77e5\uff0c\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u7684\u524d\u5411\u8fc7\u7a0b\u53ef\u88ab\u63cf\u8ff0\u4e3a\u4ee5\u4e0b\u56db\u4e2a\u90e8\u5206\uff1a 1) \u8f93\u5165\u548c\u6743\u91cd\u5747\u88ab\u91cf\u5316\u62108-bit\u6574\u6570\u3002 2) \u57288-bit\u6574\u6570\u4e0a\u6267\u884c\u77e9\u9635\u4e58\u6cd5\u6216\u8005\u5377\u79ef\u64cd\u4f5c\u3002 3) \u53cd\u91cf\u5316\u77e9\u9635\u4e58\u6cd5\u6216\u8005\u5377\u79ef\u64cd\u4f5c\u7684\u8f93\u51fa\u7ed3\u679c\u4e3a32-bit\u6d6e\u70b9\u578b\u6570\u636e\u3002 4) \u572832-bit\u6d6e\u70b9\u578b\u6570\u636e\u4e0a\u6267\u884c\u504f\u7f6e\u52a0\u6cd5\u64cd\u4f5c\u3002\u6b64\u5904\uff0c\u504f\u7f6e\u5e76\u672a\u88ab\u91cf\u5316\u3002 \u5bf9\u4e8e\u901a\u7528\u77e9\u9635\u4e58\u6cd5( GEMM )\uff0c\u8f93\u5165 X X \u548c\u6743\u91cd W W \u7684\u91cf\u5316\u64cd\u4f5c\u53ef\u88ab\u8868\u8ff0\u4e3a\u5982\u4e0b\u8fc7\u7a0b\uff1a X_q = \\left \\lfloor \\frac{X}{X_m} * (n - 1) \\right \\rceil W_q = \\left \\lfloor \\frac{W}{W_m} * (n - 1) \\right \\rceil \u6267\u884c\u901a\u7528\u77e9\u9635\u4e58\u6cd5\uff1a Y_q = X_q * W_q \u5bf9\u91cf\u5316\u4e58\u79ef\u7ed3\u679c Yq Yq \u8fdb\u884c\u53cd\u91cf\u5316: \\begin{align} Y_{dq} = \\frac{Y_q}{(n - 1) * (n - 1)} * X_m * W_m \\ =\\frac{X_q * W_q}{(n - 1) * (n - 1)} * X_m * W_m \\ =(\\frac{X_q}{n - 1} * X_m) * (\\frac{W_q}{n - 1} * W_m) \\ \\end{align} \u4e0a\u8ff0\u516c\u5f0f\u8868\u660e\u53cd\u91cf\u5316\u64cd\u4f5c\u53ef\u4ee5\u88ab\u79fb\u52a8\u5230 GEMM \u4e4b\u524d\uff0c\u5373\u5148\u5bf9 Xq Xq \u548c Wq Wq \u6267\u884c\u53cd\u91cf\u5316\u64cd\u4f5c\u518d\u505a GEMM \u64cd\u4f5c\u3002\u56e0\u6b64\uff0c\u524d\u5411\u4f20\u64ad\u7684\u5de5\u4f5c\u6d41\u4ea6\u53ef\u8868\u793a\u4e3a\u5982\u4e0b\u65b9\u5f0f\uff1a \u56fe2\uff1a\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u524d\u5411\u8fc7\u7a0b\u7684\u7b49\u4ef7\u5de5\u4f5c\u6d41 \u8bad\u7ec3\u8fc7\u7a0b\u4e2d\uff0cPaddleSlim\u4f7f\u7528\u56fe2\u4e2d\u6240\u793a\u7684\u7b49\u4ef7\u5de5\u4f5c\u6d41\u3002\u5728\u8bbe\u8ba1\u4e2d\uff0c\u91cf\u5316Pass\u5728IrGraph\u4e2d\u63d2\u5165\u91cf\u5316op\u548c\u53cd\u91cf\u5316op\u3002\u56e0\u4e3a\u5728\u8fde\u7eed\u7684\u91cf\u5316\u3001\u53cd\u91cf\u5316\u64cd\u4f5c\u4e4b\u540e\u8f93\u5165\u4ecd\u7136\u4e3a32-bit\u6d6e\u70b9\u578b\u6570\u636e\u3002\u56e0\u6b64\uff0cPaddleSlim\u91cf\u5316\u8bad\u7ec3\u6846\u67b6\u6240\u91c7\u7528\u7684\u91cf\u5316\u65b9\u5f0f\u88ab\u79f0\u4e3a\u6a21\u62df\u91cf\u5316\u3002","title":"1.2.2.1 \u524d\u5411\u4f20\u64ad"},{"location":"algo/algo/#1222","text":"\u7531\u56fe3\u53ef\u77e5\uff0c\u6743\u91cd\u66f4\u65b0\u6240\u9700\u7684\u68af\u5ea6\u503c\u53ef\u4ee5\u7531\u91cf\u5316\u540e\u7684\u6743\u91cd\u548c\u91cf\u5316\u540e\u7684\u6fc0\u6d3b\u6c42\u5f97\u3002\u53cd\u5411\u4f20\u64ad\u8fc7\u7a0b\u4e2d\u7684\u6240\u6709\u8f93\u5165\u548c\u8f93\u51fa\u5747\u4e3a32-bit\u6d6e\u70b9\u578b\u6570\u636e\u3002\u6ce8\u610f\uff0c\u68af\u5ea6\u66f4\u65b0\u64cd\u4f5c\u9700\u8981\u5728\u539f\u59cb\u6743\u91cd\u4e0a\u8fdb\u884c\uff0c\u5373\u8ba1\u7b97\u51fa\u7684\u68af\u5ea6\u5c06\u88ab\u52a0\u5230\u539f\u59cb\u6743\u91cd\u4e0a\u800c\u975e\u91cf\u5316\u540e\u6216\u53cd\u91cf\u5316\u540e\u7684\u6743\u91cd\u4e0a\u3002 \u56fe3\uff1a\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u7684\u53cd\u5411\u4f20\u64ad\u548c\u6743\u91cd\u66f4\u65b0\u8fc7\u7a0b \u56e0\u6b64\uff0c\u91cf\u5316Pass\u4e5f\u4f1a\u6539\u53d8\u76f8\u5e94\u53cd\u5411\u7b97\u5b50\u7684\u67d0\u4e9b\u8f93\u5165\u3002","title":"1.2.2.2 \u53cd\u5411\u4f20\u64ad"},{"location":"algo/algo/#1223","text":"\u5b58\u5728\u7740\u4e24\u79cd\u7b56\u7565\u53ef\u4ee5\u8ba1\u7b97\u6c42\u53d6\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\uff0c\u5373\u52a8\u6001\u7b56\u7565\u548c\u9759\u6001\u7b56\u7565\u3002\u52a8\u6001\u7b56\u7565\u4f1a\u5728\u6bcf\u6b21\u8fed\u4ee3\u8fc7\u7a0b\u4e2d\u8ba1\u7b97\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u7684\u503c\u3002\u9759\u6001\u7b56\u7565\u5219\u5bf9\u4e0d\u540c\u7684\u8f93\u5165\u91c7\u7528\u76f8\u540c\u7684\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u3002 \u5bf9\u4e8e\u6743\u91cd\u800c\u8a00\uff0c\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u91c7\u7528\u52a8\u6001\u7b56\u7565\u3002\u6362\u53e5\u8bdd\u8bf4\uff0c\u5728\u6bcf\u6b21\u8fed\u4ee3\u8fc7\u7a0b\u4e2d\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u5747\u4f1a\u88ab\u91cd\u65b0\u8ba1\u7b97\u5f97\u5230\u76f4\u81f3\u8bad\u7ec3\u8fc7\u7a0b\u7ed3\u675f\u3002 \u5bf9\u4e8e\u6fc0\u6d3b\u800c\u8a00\uff0c\u53ef\u4ee5\u9009\u62e9\u52a8\u6001\u7b56\u7565\u4e5f\u53ef\u4ee5\u9009\u62e9\u9759\u6001\u7b56\u7565\u3002\u82e5\u9009\u62e9\u4f7f\u7528\u9759\u6001\u7b56\u7565\uff0c\u5219\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u4f1a\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u88ab\u8bc4\u4f30\u6c42\u5f97\uff0c\u4e14\u5728\u63a8\u65ad\u8fc7\u7a0b\u4e2d\u88ab\u4f7f\u7528(\u4e0d\u540c\u7684\u8f93\u5165\u5747\u4fdd\u6301\u4e0d\u53d8)\u3002\u9759\u6001\u7b56\u7565\u4e2d\u7684\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u53ef\u4e8e\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u901a\u8fc7\u5982\u4e0b\u4e09\u79cd\u65b9\u5f0f\u8fdb\u884c\u8bc4\u4f30\uff1a \u5728\u4e00\u4e2a\u7a97\u53e3\u4e2d\u8ba1\u7b97\u6fc0\u6d3b\u6700\u5927\u7edd\u5bf9\u503c\u7684\u5e73\u5747\u503c\u3002 \u5728\u4e00\u4e2a\u7a97\u53e3\u4e2d\u8ba1\u7b97\u6fc0\u6d3b\u6700\u5927\u7edd\u5bf9\u503c\u7684\u6700\u5927\u503c\u3002 \u5728\u4e00\u4e2a\u7a97\u53e3\u4e2d\u8ba1\u7b97\u6fc0\u6d3b\u6700\u5927\u7edd\u5bf9\u503c\u7684\u6ed1\u52a8\u5e73\u5747\u503c\uff0c\u8ba1\u7b97\u516c\u5f0f\u5982\u4e0b\uff1a Vt = (1 - k) * V + k * V_{t-1} Vt = (1 - k) * V + k * V_{t-1} \u5f0f\u4e2d\uff0c V V \u662f\u5f53\u524dbatch\u7684\u6700\u5927\u7edd\u5bf9\u503c\uff0c Vt Vt \u662f\u6ed1\u52a8\u5e73\u5747\u503c\u3002 k k \u662f\u4e00\u4e2a\u56e0\u5b50\uff0c\u4f8b\u5982\u5176\u503c\u53ef\u53d6\u4e3a0.9\u3002","title":"1.2.2.3 \u786e\u5b9a\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570"},{"location":"algo/algo/#124","text":"\u8bad\u7ec3\u540e\u91cf\u5316\u662f\u57fa\u4e8e\u91c7\u6837\u6570\u636e\uff0c\u91c7\u7528KL\u6563\u5ea6\u7b49\u65b9\u6cd5\u8ba1\u7b97\u91cf\u5316\u6bd4\u4f8b\u56e0\u5b50\u7684\u65b9\u6cd5\u3002\u76f8\u6bd4\u91cf\u5316\u8bad\u7ec3\uff0c\u8bad\u7ec3\u540e\u91cf\u5316\u4e0d\u9700\u8981\u91cd\u65b0\u8bad\u7ec3\uff0c\u53ef\u4ee5\u5feb\u901f\u5f97\u5230\u91cf\u5316\u6a21\u578b\u3002 \u8bad\u7ec3\u540e\u91cf\u5316\u7684\u76ee\u6807\u662f\u6c42\u53d6\u91cf\u5316\u6bd4\u4f8b\u56e0\u5b50\uff0c\u4e3b\u8981\u6709\u4e24\u79cd\u65b9\u6cd5\uff1a\u975e\u9971\u548c\u91cf\u5316\u65b9\u6cd5 ( No Saturation) \u548c\u9971\u548c\u91cf\u5316\u65b9\u6cd5 (Saturation)\u3002\u975e\u9971\u548c\u91cf\u5316\u65b9\u6cd5\u8ba1\u7b97FP32\u7c7b\u578bTensor\u4e2d\u7edd\u5bf9\u503c\u7684\u6700\u5927\u503c abs_max \uff0c\u5c06\u5176\u6620\u5c04\u4e3a127\uff0c\u5219\u91cf\u5316\u6bd4\u4f8b\u56e0\u5b50\u7b49\u4e8e abs_max/127 \u3002\u9971\u548c\u91cf\u5316\u65b9\u6cd5\u4f7f\u7528KL\u6563\u5ea6\u8ba1\u7b97\u4e00\u4e2a\u5408\u9002\u7684\u9608\u503c T ( 0=r \\end{cases} \\end{equation} \\begin{equation} P(r_k) = \\begin{cases} e^{\\frac{(r_k-r)}{T_k}} & r_k < r\\\\ 1 & r_k>=r \\end{cases} \\end{equation} \u5728\u7b2ck\u6b21\u8fed\u4ee3\uff0c\u641c\u5230\u7684\u7f51\u7edc\u4e3a N_k N_k , \u5bf9 N_k N_k \u8bad\u7ec3\u82e5\u5e72epoch\u540e\uff0c\u5728\u6d4b\u8bd5\u96c6\u4e0a\u5f97\u5230reward\u4e3a r_k r_k , \u4ee5\u6982\u7387 P(r_k) P(r_k) \u63a5\u53d7 r_k r_k \uff0c\u5373\u6267\u884c r=r_k r=r_k \u3002 r r \u5728\u641c\u7d22\u8fc7\u7a0b\u8d77\u59cb\u65f6\u88ab\u521d\u59cb\u5316\u4e3a0. T_0 T_0 \u4e3a\u521d\u59cb\u5316\u6e29\u5ea6\uff0c \\theta \\theta \u4e3a\u6e29\u5ea6\u8870\u51cf\u7cfb\u6570\uff0c T_k T_k \u4e3a\u7b2ck\u6b21\u8fed\u4ee3\u7684\u6e29\u5ea6\u3002 \u5728\u6211\u4eec\u7684NAS\u4efb\u52a1\u4e2d\uff0c\u533a\u522b\u4e8eRL\u6bcf\u6b21\u91cd\u65b0\u751f\u6210\u4e00\u4e2a\u5b8c\u6574\u7684\u7f51\u7edc\uff0c\u6211\u4eec\u5c06\u7f51\u7edc\u7ed3\u6784\u6620\u5c04\u6210\u4e00\u6bb5\u7f16\u7801\uff0c\u7b2c\u4e00\u6b21\u968f\u673a\u521d\u59cb\u5316\uff0c\u7136\u540e\u6bcf\u6b21\u968f\u673a\u4fee\u6539\u7f16\u7801\u4e2d\u7684\u4e00\u90e8\u5206\uff08\u5bf9\u5e94\u4e8e\u7f51\u7edc\u7ed3\u6784\u7684\u4e00\u90e8\u5206\uff09\u751f\u6210\u4e00\u4e2a\u65b0\u7684\u7f16\u7801\uff0c\u7136\u540e\u5c06\u8fd9\u4e2a\u7f16\u7801\u518d\u6620\u5c04\u56de\u7f51\u7edc\u7ed3\u6784\uff0c\u901a\u8fc7\u5728\u8bad\u7ec3\u96c6\u4e0a\u8bad\u7ec3\u4e00\u5b9a\u7684epochs\u540e\u7684\u7cbe\u5ea6\u4ee5\u53ca\u7f51\u7edc\u5ef6\u65f6\u878d\u5408\u83b7\u5f97reward\uff0c\u6765\u6307\u5bfc\u9000\u706b\u7b97\u6cd5\u7684\u6536\u655b\u3002","title":"4.1.1 \u6a21\u62df\u9000\u706b"},{"location":"algo/algo/#42","text":"\u641c\u7d22\u7a7a\u95f4\u5b9a\u4e49\u4e86\u4f18\u5316\u95ee\u9898\u7684\u53d8\u91cf\uff0c\u53d8\u91cf\u89c4\u6a21\u51b3\u5b9a\u4e86\u641c\u7d22\u7b97\u6cd5\u7684\u96be\u5ea6\u548c\u641c\u7d22\u65f6\u95f4\u3002\u56e0\u6b64\u4e3a\u4e86\u52a0\u5feb\u641c\u7d22\u901f\u5ea6\uff0c\u5b9a\u4e49\u4e00\u4e2a\u5408\u7406\u7684\u641c\u7d22\u7a7a\u95f4\u81f3\u5173\u91cd\u8981\u3002\u5728Light-NAS\u4e2d\uff0c\u4e3a\u4e86\u52a0\u901f\u641c\u7d22\u901f\u5ea6\uff0c\u6211\u4eec\u5c06\u4e00\u4e2a\u7f51\u7edc\u5212\u5206\u4e3a\u591a\u4e2ablock\uff0c\u5148\u624b\u52a8\u6309\u94fe\u72b6\u5c42\u7ea7\u7ed3\u6784\u5806\u53e0c\uff0c\u518d \u4f7f\u7528\u641c\u7d22\u7b97\u6cd5\u81ea\u52a8\u641c\u7d22\u6bcf\u4e2ablock\u5185\u90e8\u7684\u7ed3\u6784\u3002 \u56e0\u4e3a\u8981\u641c\u7d22\u51fa\u5728\u79fb\u52a8\u7aef\u8fd0\u884c\u901f\u5ea6\u5feb\u7684\u6a21\u578b\uff0c\u6211\u4eec\u53c2\u8003\u4e86MobileNetV2\u4e2d\u7684Linear Bottlenecks\u548cInverted residuals\u7ed3\u6784\uff0c\u641c\u7d22\u6bcf\u4e00\u4e2aInverted residuals\u4e2d\u7684\u5177\u4f53\u53c2\u6570\uff0c\u5305\u62eckernelsize\u3001channel\u6269\u5f20\u500d\u6570\u3001\u91cd\u590d\u6b21\u6570\u3001channels number\u3002\u5982\u56fe10\u6240\u793a\uff1a \u56fe10","title":"4.2 \u641c\u7d22\u7a7a\u95f4"},{"location":"algo/algo/#43","text":"\u641c\u7d22\u8fc7\u7a0b\u652f\u6301 FLOPS \u7ea6\u675f\u548c\u6a21\u578b\u5ef6\u65f6\u7ea6\u675f\u3002\u800c\u57fa\u4e8e Android/iOS \u79fb\u52a8\u7aef\u3001\u5f00\u53d1\u677f\u7b49\u786c\u4ef6\u5e73\u53f0\uff0c\u8fed\u4ee3\u641c\u7d22\u8fc7\u7a0b\u4e2d\u4e0d\u65ad\u6d4b\u8bd5\u6a21\u578b\u7684\u5ef6\u65f6\u4e0d\u4ec5\u6d88\u8017\u65f6\u95f4\u800c\u4e14\u975e\u5e38\u4e0d\u65b9\u4fbf\uff0c\u56e0\u6b64\u6211\u4eec\u5f00\u53d1\u4e86\u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30\u5668\u6765\u8bc4\u4f30\u641c\u7d22\u5f97\u5230\u6a21\u578b\u7684\u5ef6\u65f6\u3002\u901a\u8fc7\u5ef6\u65f6\u8bc4\u4f30\u5668\u8bc4\u4f30\u5f97\u5230\u7684\u5ef6\u65f6\u4e0e\u6a21\u578b\u5b9e\u9645\u6d4b\u8bd5\u7684\u5ef6\u65f6\u6ce2\u52a8\u504f\u5dee\u5c0f\u4e8e 10%\u3002 \u5ef6\u65f6\u8bc4\u4f30\u5668\u5206\u4e3a\u914d\u7f6e\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668\u548c\u8bc4\u4f30\u6a21\u578b\u5ef6\u65f6\u4e24\u4e2a\u9636\u6bb5\uff0c\u914d\u7f6e\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668\u53ea\u9700\u8981\u6267\u884c\u4e00\u6b21\uff0c\u800c\u8bc4\u4f30\u6a21\u578b\u5ef6\u65f6\u5219\u5728\u641c\u7d22\u8fc7\u7a0b\u4e2d\u4e0d\u65ad\u8bc4\u4f30\u641c\u7d22\u5f97\u5230\u7684\u6a21\u578b\u5ef6\u65f6\u3002 \u914d\u7f6e\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668 \u83b7\u53d6\u641c\u7d22\u7a7a\u95f4\u4e2d\u6240\u6709\u4e0d\u91cd\u590d\u7684 op \u53ca\u5176\u53c2\u6570 \u83b7\u53d6\u6bcf\u7ec4 op \u53ca\u5176\u53c2\u6570\u7684\u5ef6\u65f6 \u8bc4\u4f30\u6a21\u578b\u5ef6\u65f6 \u83b7\u53d6\u7ed9\u5b9a\u6a21\u578b\u7684\u6240\u6709 op \u53ca\u5176\u53c2\u6570 \u6839\u636e\u7ed9\u5b9a\u6a21\u578b\u7684\u6240\u6709 op \u53ca\u53c2\u6570\uff0c\u5229\u7528\u5ef6\u65f6\u8bc4\u4f30\u5668\u53bb\u4f30\u8ba1\u6a21\u578b\u7684\u5ef6\u65f6","title":"4.3 \u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30"},{"location":"algo/algo/#5","text":"High-Performance Hardware for Machine Learning Quantizing deep convolutional networks for efficient inference: A whitepaper Pruning Filters for Efficient ConvNets Distilling the Knowledge in a Neural Network A Gift from Knowledge Distillation: Fast Optimization, Network Minimization and Transfer Learning","title":"5. \u53c2\u8003\u6587\u732e"},{"location":"api/analysis_api/","text":"FLOPs # paddleslim.analysis.flops(program, detail=False) \u6e90\u4ee3\u7801 \u83b7\u5f97\u6307\u5b9a\u7f51\u7edc\u7684\u6d6e\u70b9\u8fd0\u7b97\u6b21\u6570(FLOPs)\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u5f85\u5206\u6790\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 detail(bool) - \u662f\u5426\u8fd4\u56de\u6bcf\u4e2a\u5377\u79ef\u5c42\u7684FLOPs\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 only_conv(bool) - \u5982\u679c\u8bbe\u7f6e\u4e3aTrue\uff0c\u5219\u4ec5\u8ba1\u7b97\u5377\u79ef\u5c42\u548c\u5168\u8fde\u63a5\u5c42\u7684FLOPs\uff0c\u5373\u6d6e\u70b9\u6570\u7684\u4e58\u52a0\uff08multiplication-adds\uff09\u64cd\u4f5c\u6b21\u6570\u3002\u5982\u679c\u8bbe\u7f6e\u4e3aFalse\uff0c\u5219\u4e5f\u4f1a\u8ba1\u7b97\u5377\u79ef\u548c\u5168\u8fde\u63a5\u5c42\u4e4b\u5916\u7684\u64cd\u4f5c\u7684FLOPs\u3002 \u8fd4\u56de\u503c\uff1a flops(float) - \u6574\u4e2a\u7f51\u7edc\u7684FLOPs\u3002 params2flops(dict) - \u6bcf\u5c42\u5377\u79ef\u5bf9\u5e94\u7684FLOPs\uff0c\u5176\u4e2dkey\u4e3a\u5377\u79ef\u5c42\u53c2\u6570\u540d\u79f0\uff0cvalue\u4e3aFLOPs\u503c\u3002 \u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.analysis import flops def conv_bn_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) bn_name = name + \"_bn\" return fluid . layers . batch_norm ( input = conv , act = act , name = bn_name + '_output' , param_attr = ParamAttr ( name = bn_name + '_scale' ), bias_attr = ParamAttr ( bn_name + '_offset' ), moving_mean_name = bn_name + '_mean' , moving_variance_name = bn_name + '_variance' , ) main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels with fluid . program_guard ( main_program , startup_program ): input = fluid . data ( name = \"image\" , shape = [ None , 3 , 16 , 16 ]) conv1 = conv_bn_layer ( input , 8 , 3 , \"conv1\" ) conv2 = conv_bn_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_bn_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_bn_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_bn_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_bn_layer ( conv5 , 8 , 3 , \"conv6\" ) print ( \"FLOPs: {}\" . format ( flops ( main_program ))) model_size # paddleslim.analysis.model_size(program) \u6e90\u4ee3\u7801 \u83b7\u5f97\u6307\u5b9a\u7f51\u7edc\u7684\u53c2\u6570\u6570\u91cf\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u5f85\u5206\u6790\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 \u8fd4\u56de\u503c\uff1a model_size(int) - \u6574\u4e2a\u7f51\u7edc\u7684\u53c2\u6570\u6570\u91cf\u3002 \u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.analysis import model_size def conv_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) return conv main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels with fluid . program_guard ( main_program , startup_program ): input = fluid . data ( name = \"image\" , shape = [ None , 3 , 16 , 16 ]) conv1 = conv_layer ( input , 8 , 3 , \"conv1\" ) conv2 = conv_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_layer ( conv5 , 8 , 3 , \"conv6\" ) print ( \"FLOPs: {}\" . format ( model_size ( main_program ))) TableLatencyEvaluator # paddleslim.analysis.TableLatencyEvaluator(table_file, delimiter=\",\") \u6e90\u4ee3\u7801 \u57fa\u4e8e\u786c\u4ef6\u5ef6\u65f6\u8868\u7684\u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30\u5668\u3002 \u53c2\u6570\uff1a table_file(str) - \u6240\u4f7f\u7528\u7684\u5ef6\u65f6\u8bc4\u4f30\u8868\u7684\u7edd\u5bf9\u8def\u5f84\u3002\u5173\u4e8e\u6f14\u793a\u8bc4\u4f30\u8868\u683c\u5f0f\u8bf7\u53c2\u8003\uff1a PaddleSlim\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u683c\u5f0f delimiter(str) - \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u4e2d\uff0c\u64cd\u4f5c\u4fe1\u606f\u4e4b\u524d\u6240\u4f7f\u7528\u7684\u5206\u5272\u7b26\uff0c\u9ed8\u8ba4\u4e3a\u82f1\u6587\u5b57\u7b26\u9017\u53f7\u3002 \u8fd4\u56de\u503c\uff1a Evaluator - \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668\u7684\u5b9e\u4f8b\u3002 paddleslim.analysis.TableLatencyEvaluator.latency(graph) \u6e90\u4ee3\u7801 \u83b7\u5f97\u6307\u5b9a\u7f51\u7edc\u7684\u9884\u4f30\u5ef6\u65f6\u3002 \u53c2\u6570\uff1a graph(Program) - \u5f85\u9884\u4f30\u7684\u76ee\u6807\u7f51\u7edc\u3002 \u8fd4\u56de\u503c\uff1a latency - \u76ee\u6807\u7f51\u7edc\u7684\u9884\u4f30\u5ef6\u65f6\u3002","title":"\u6a21\u578b\u5206\u6790"},{"location":"api/analysis_api/#flops","text":"paddleslim.analysis.flops(program, detail=False) \u6e90\u4ee3\u7801 \u83b7\u5f97\u6307\u5b9a\u7f51\u7edc\u7684\u6d6e\u70b9\u8fd0\u7b97\u6b21\u6570(FLOPs)\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u5f85\u5206\u6790\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 detail(bool) - \u662f\u5426\u8fd4\u56de\u6bcf\u4e2a\u5377\u79ef\u5c42\u7684FLOPs\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 only_conv(bool) - \u5982\u679c\u8bbe\u7f6e\u4e3aTrue\uff0c\u5219\u4ec5\u8ba1\u7b97\u5377\u79ef\u5c42\u548c\u5168\u8fde\u63a5\u5c42\u7684FLOPs\uff0c\u5373\u6d6e\u70b9\u6570\u7684\u4e58\u52a0\uff08multiplication-adds\uff09\u64cd\u4f5c\u6b21\u6570\u3002\u5982\u679c\u8bbe\u7f6e\u4e3aFalse\uff0c\u5219\u4e5f\u4f1a\u8ba1\u7b97\u5377\u79ef\u548c\u5168\u8fde\u63a5\u5c42\u4e4b\u5916\u7684\u64cd\u4f5c\u7684FLOPs\u3002 \u8fd4\u56de\u503c\uff1a flops(float) - \u6574\u4e2a\u7f51\u7edc\u7684FLOPs\u3002 params2flops(dict) - \u6bcf\u5c42\u5377\u79ef\u5bf9\u5e94\u7684FLOPs\uff0c\u5176\u4e2dkey\u4e3a\u5377\u79ef\u5c42\u53c2\u6570\u540d\u79f0\uff0cvalue\u4e3aFLOPs\u503c\u3002 \u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.analysis import flops def conv_bn_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) bn_name = name + \"_bn\" return fluid . layers . batch_norm ( input = conv , act = act , name = bn_name + '_output' , param_attr = ParamAttr ( name = bn_name + '_scale' ), bias_attr = ParamAttr ( bn_name + '_offset' ), moving_mean_name = bn_name + '_mean' , moving_variance_name = bn_name + '_variance' , ) main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels with fluid . program_guard ( main_program , startup_program ): input = fluid . data ( name = \"image\" , shape = [ None , 3 , 16 , 16 ]) conv1 = conv_bn_layer ( input , 8 , 3 , \"conv1\" ) conv2 = conv_bn_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_bn_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_bn_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_bn_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_bn_layer ( conv5 , 8 , 3 , \"conv6\" ) print ( \"FLOPs: {}\" . format ( flops ( main_program )))","title":"FLOPs"},{"location":"api/analysis_api/#model_size","text":"paddleslim.analysis.model_size(program) \u6e90\u4ee3\u7801 \u83b7\u5f97\u6307\u5b9a\u7f51\u7edc\u7684\u53c2\u6570\u6570\u91cf\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u5f85\u5206\u6790\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 \u8fd4\u56de\u503c\uff1a model_size(int) - \u6574\u4e2a\u7f51\u7edc\u7684\u53c2\u6570\u6570\u91cf\u3002 \u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.analysis import model_size def conv_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) return conv main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels with fluid . program_guard ( main_program , startup_program ): input = fluid . data ( name = \"image\" , shape = [ None , 3 , 16 , 16 ]) conv1 = conv_layer ( input , 8 , 3 , \"conv1\" ) conv2 = conv_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_layer ( conv5 , 8 , 3 , \"conv6\" ) print ( \"FLOPs: {}\" . format ( model_size ( main_program )))","title":"model_size"},{"location":"api/analysis_api/#tablelatencyevaluator","text":"paddleslim.analysis.TableLatencyEvaluator(table_file, delimiter=\",\") \u6e90\u4ee3\u7801 \u57fa\u4e8e\u786c\u4ef6\u5ef6\u65f6\u8868\u7684\u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30\u5668\u3002 \u53c2\u6570\uff1a table_file(str) - \u6240\u4f7f\u7528\u7684\u5ef6\u65f6\u8bc4\u4f30\u8868\u7684\u7edd\u5bf9\u8def\u5f84\u3002\u5173\u4e8e\u6f14\u793a\u8bc4\u4f30\u8868\u683c\u5f0f\u8bf7\u53c2\u8003\uff1a PaddleSlim\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u683c\u5f0f delimiter(str) - \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u4e2d\uff0c\u64cd\u4f5c\u4fe1\u606f\u4e4b\u524d\u6240\u4f7f\u7528\u7684\u5206\u5272\u7b26\uff0c\u9ed8\u8ba4\u4e3a\u82f1\u6587\u5b57\u7b26\u9017\u53f7\u3002 \u8fd4\u56de\u503c\uff1a Evaluator - \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668\u7684\u5b9e\u4f8b\u3002 paddleslim.analysis.TableLatencyEvaluator.latency(graph) \u6e90\u4ee3\u7801 \u83b7\u5f97\u6307\u5b9a\u7f51\u7edc\u7684\u9884\u4f30\u5ef6\u65f6\u3002 \u53c2\u6570\uff1a graph(Program) - \u5f85\u9884\u4f30\u7684\u76ee\u6807\u7f51\u7edc\u3002 \u8fd4\u56de\u503c\uff1a latency - \u76ee\u6807\u7f51\u7edc\u7684\u9884\u4f30\u5ef6\u65f6\u3002","title":"TableLatencyEvaluator"},{"location":"api/api_guide/","text":"PaddleSlim API\u6587\u6863\u5bfc\u822a # \u6a21\u578b\u5206\u6790 # \u5377\u79ef\u901a\u9053\u526a\u88c1 # \u84b8\u998f # \u5355\u8fdb\u7a0b\u84b8\u998f \u901a\u9053\u526a\u88c1 \u91cf\u5316 # \u91cf\u5316\u8bad\u7ec3 \u79bb\u7ebf\u91cf\u5316 embedding\u91cf\u5316 \u5c0f\u6a21\u578b\u7ed3\u6784\u641c\u7d22 # nas API SearchSpace","title":"PaddleSlim API\u6587\u6863\u5bfc\u822a"},{"location":"api/api_guide/#paddleslim-api","text":"","title":"PaddleSlim API\u6587\u6863\u5bfc\u822a"},{"location":"api/api_guide/#_1","text":"","title":"\u6a21\u578b\u5206\u6790"},{"location":"api/api_guide/#_2","text":"","title":"\u5377\u79ef\u901a\u9053\u526a\u88c1"},{"location":"api/api_guide/#_3","text":"\u5355\u8fdb\u7a0b\u84b8\u998f \u901a\u9053\u526a\u88c1","title":"\u84b8\u998f"},{"location":"api/api_guide/#_4","text":"\u91cf\u5316\u8bad\u7ec3 \u79bb\u7ebf\u91cf\u5316 embedding\u91cf\u5316","title":"\u91cf\u5316"},{"location":"api/api_guide/#_5","text":"nas API SearchSpace","title":"\u5c0f\u6a21\u578b\u7ed3\u6784\u641c\u7d22"},{"location":"api/nas_api/","text":"paddleslim.nas API\u6587\u6863 # SANAS API\u6587\u6863 # class SANAS # SANAS\uff08Simulated Annealing Neural Architecture Search\uff09\u662f\u57fa\u4e8e\u6a21\u62df\u9000\u706b\u7b97\u6cd5\u8fdb\u884c\u6a21\u578b\u7ed3\u6784\u641c\u7d22\u7684\u7b97\u6cd5\uff0c\u4e00\u822c\u7528\u4e8e\u79bb\u6563\u641c\u7d22\u4efb\u52a1\u3002 paddleslim.nas.SANAS(configs, server_addr, init_temperature, reduce_rate, search_steps, save_checkpoint, load_checkpoint, is_server) \u53c2\u6570\uff1a - configs(list ): \u641c\u7d22\u7a7a\u95f4\u914d\u7f6e\u5217\u8868\uff0c\u683c\u5f0f\u662f [(key, {input_size, output_size, block_num, block_mask})] \u6216\u8005 [(key)] \uff08MobileNetV2\u3001MobilenetV1\u548cResNet\u7684\u641c\u7d22\u7a7a\u95f4\u4f7f\u7528\u548c\u539f\u672c\u7f51\u7edc\u7ed3\u6784\u76f8\u540c\u7684\u641c\u7d22\u7a7a\u95f4\uff0c\u6240\u4ee5\u4ec5\u9700\u6307\u5b9a key \u5373\u53ef\uff09, input_size \u548c output_size \u8868\u793a\u8f93\u5165\u548c\u8f93\u51fa\u7684\u7279\u5f81\u56fe\u7684\u5927\u5c0f\uff0c block_num \u662f\u6307\u641c\u7d22\u7f51\u7edc\u4e2d\u7684block\u6570\u91cf\uff0c block_mask \u662f\u4e00\u7ec4\u75310\u548c1\u7ec4\u6210\u7684\u5217\u8868\uff0c0\u4ee3\u8868\u4e0d\u8fdb\u884c\u4e0b\u91c7\u6837\u7684block\uff0c1\u4ee3\u8868\u4e0b\u91c7\u6837\u7684block\u3002 \u66f4\u591apaddleslim\u63d0\u4f9b\u7684\u641c\u7d22\u7a7a\u95f4\u914d\u7f6e\u53ef\u4ee5\u53c2\u8003\u3002 - server_addr(tuple): SANAS\u7684\u5730\u5740\uff0c\u5305\u62ecserver\u7684ip\u5730\u5740\u548c\u7aef\u53e3\u53f7\uff0c\u5982\u679cip\u5730\u5740\u4e3aNone\u6216\u8005\u4e3a\"\"\u7684\u8bdd\u5219\u9ed8\u8ba4\u4f7f\u7528\u672c\u673aip\u3002\u9ed8\u8ba4\uff1a\uff08\"\", 8881\uff09\u3002 - init_temperature(float): \u57fa\u4e8e\u6a21\u62df\u9000\u706b\u8fdb\u884c\u641c\u7d22\u7684\u521d\u59cb\u6e29\u5ea6\u3002\u9ed8\u8ba4\uff1a100\u3002 - reduce_rate(float): \u57fa\u4e8e\u6a21\u62df\u9000\u706b\u8fdb\u884c\u641c\u7d22\u7684\u8870\u51cf\u7387\u3002\u9ed8\u8ba4\uff1a0.85\u3002 - search_steps(int): \u641c\u7d22\u8fc7\u7a0b\u8fed\u4ee3\u7684\u6b21\u6570\u3002\u9ed8\u8ba4\uff1a300\u3002 - save_checkpoint(str|None): \u4fdd\u5b58checkpoint\u7684\u6587\u4ef6\u76ee\u5f55\uff0c\u5982\u679c\u8bbe\u7f6e\u4e3aNone\u7684\u8bdd\u5219\u4e0d\u4fdd\u5b58checkpoint\u3002\u9ed8\u8ba4\uff1a ./nas_checkpoint \u3002 - load_checkpoint(str|None): \u52a0\u8f7dcheckpoint\u7684\u6587\u4ef6\u76ee\u5f55\uff0c\u5982\u679c\u8bbe\u7f6e\u4e3aNone\u7684\u8bdd\u5219\u4e0d\u52a0\u8f7dcheckpoint\u3002\u9ed8\u8ba4\uff1aNone\u3002 - is_server(bool): \u5f53\u524d\u5b9e\u4f8b\u662f\u5426\u8981\u542f\u52a8\u4e00\u4e2aserver\u3002\u9ed8\u8ba4\uff1aTrue\u3002 \u8fd4\u56de\uff1a \u4e00\u4e2aSANAS\u7c7b\u7684\u5b9e\u4f8b \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 3 from paddleslim.nas import SANAS config = [( 'MobileNetV2Space' )] sanas = SANAS ( config = config ) tokens2arch(tokens) \u901a\u8fc7\u4e00\u7ec4token\u5f97\u5230\u5b9e\u9645\u7684\u6a21\u578b\u7ed3\u6784\uff0c\u4e00\u822c\u7528\u6765\u628a\u641c\u7d22\u5230\u6700\u4f18\u7684token\u8f6c\u6362\u4e3a\u6a21\u578b\u7ed3\u6784\u7528\u6765\u505a\u6700\u540e\u7684\u8bad\u7ec3\u3002 \u53c2\u6570\uff1a - tokens(list): \u4e00\u7ec4token\u3002 \u8fd4\u56de \u8fd4\u56de\u4e00\u4e2a\u6a21\u578b\u7ed3\u6784\u5b9e\u4f8b\u3002 \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 3 4 5 6 import paddle.fluid as fluid input = fluid . data ( name = 'input' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) archs = sanas . token2arch ( tokens ) for arch in archs : output = arch ( input ) input = output next_archs(): \u83b7\u53d6\u4e0b\u4e00\u7ec4\u6a21\u578b\u7ed3\u6784\u3002 \u8fd4\u56de \u8fd4\u56de\u6a21\u578b\u7ed3\u6784\u5b9e\u4f8b\u7684\u5217\u8868\uff0c\u5f62\u5f0f\u4e3alist\u3002 \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 3 4 5 6 import paddle.fluid as fluid input = fluid . data ( name = 'input' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) archs = sanas . next_archs () for arch in archs : output = arch ( input ) input = output reward(score): \u628a\u5f53\u524d\u6a21\u578b\u7ed3\u6784\u7684\u5f97\u5206\u60c5\u51b5\u56de\u4f20\u3002 \u53c2\u6570\uff1a score : \u5f53\u524d\u6a21\u578b\u7684\u5f97\u5206\uff0c\u5206\u6570\u8d8a\u5927\u8d8a\u597d\u3002 \u8fd4\u56de \u6a21\u578b\u7ed3\u6784\u66f4\u65b0\u6210\u529f\u6216\u8005\u5931\u8d25\uff0c\u6210\u529f\u5219\u8fd4\u56de True \uff0c\u5931\u8d25\u5219\u8fd4\u56de False \u3002 \u4ee3\u7801\u793a\u4f8b 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 import numpy as np import paddle import paddle.fluid as fluid from paddleslim.nas import SANAS from paddleslim.analysis import flops max_flops = 321208544 batch_size = 256 # \u641c\u7d22\u7a7a\u95f4\u914d\u7f6e config = [( 'MobileNetV2Space' )] # \u5b9e\u4f8b\u5316SANAS sa_nas = SANAS ( config , server_addr = ( \"\" , 8887 ), init_temperature = 10.24 , reduce_rate = 0.85 , search_steps = 100 , is_server = True ) for step in range ( 100 ): archs = sa_nas . next_archs () train_program = fluid . Program () test_program = fluid . Program () startup_program = fluid . Program () ### \u6784\u9020\u8bad\u7ec3program with fluid . program_guard ( train_program , startup_program ): image = fluid . data ( name = 'image' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) for arch in archs : output = arch ( image ) out = fluid . layers . fc ( output , size = 10 , act = \"softmax\" ) softmax_out = fluid . layers . softmax ( input = out , use_cudnn = False ) cost = fluid . layers . cross_entropy ( input = softmax_out , label = label ) avg_cost = fluid . layers . mean ( cost ) acc_top1 = fluid . layers . accuracy ( input = softmax_out , label = label , k = 1 ) ### \u6784\u9020\u6d4b\u8bd5program test_program = train_program . clone ( for_test = True ) ### \u5b9a\u4e49\u4f18\u5316\u5668 sgd = fluid . optimizer . SGD ( learning_rate = 1e-3 ) sgd . minimize ( avg_cost ) ### \u589e\u52a0\u9650\u5236\u6761\u4ef6\uff0c\u5982\u679c\u6ca1\u6709\u5219\u8fdb\u884c\u65e0\u9650\u5236\u641c\u7d22 if flops ( train_program ) > max_flops : continue ### \u5b9a\u4e49\u4ee3\u7801\u662f\u5728cpu\u4e0a\u8fd0\u884c place = fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( startup_program ) ### \u5b9a\u4e49\u8bad\u7ec3\u8f93\u5165\u6570\u636e train_reader = paddle . batch ( paddle . reader . shuffle ( paddle . dataset . cifar . train10 ( cycle = False ), buf_size = 1024 ), batch_size = batch_size , drop_last = True ) ### \u5b9a\u4e49\u9884\u6d4b\u8f93\u5165\u6570\u636e test_reader = paddle . batch ( paddle . dataset . cifar . test10 ( cycle = False ), batch_size = batch_size , drop_last = False ) train_feeder = fluid . DataFeeder ([ image , label ], place , program = train_program ) test_feeder = fluid . DataFeeder ([ image , label ], place , program = test_program ) ### \u5f00\u59cb\u8bad\u7ec3\uff0c\u6bcf\u4e2a\u641c\u7d22\u7ed3\u679c\u8bad\u7ec35\u4e2aepoch for epoch_id in range ( 5 ): for batch_id , data in enumerate ( train_reader ()): fetches = [ avg_cost . name ] outs = exe . run ( train_program , feed = train_feeder . feed ( data ), fetch_list = fetches )[ 0 ] if batch_id % 10 == 0 : print ( 'TRAIN: steps: {}, epoch: {}, batch: {}, cost: {}' . format ( step , epoch_id , batch_id , outs [ 0 ])) ### \u5f00\u59cb\u9884\u6d4b\uff0c\u5f97\u5230\u6700\u7ec8\u7684\u6d4b\u8bd5\u7ed3\u679c\u4f5c\u4e3ascore\u56de\u4f20\u7ed9sa_nas reward = [] for batch_id , data in enumerate ( test_reader ()): test_fetches = [ avg_cost . name , acc_top1 . name ] batch_reward = exe . run ( test_program , feed = test_feeder . feed ( data ), fetch_list = test_fetches ) reward_avg = np . mean ( np . array ( batch_reward ), axis = 1 ) reward . append ( reward_avg ) print ( 'TEST: step: {}, batch: {}, avg_cost: {}, acc_top1: {}' . format ( step , batch_id , batch_reward [ 0 ], batch_reward [ 1 ])) finally_reward = np . mean ( np . array ( reward ), axis = 0 ) print ( 'FINAL TEST: avg_cost: {}, acc_top1: {}' . format ( finally_reward [ 0 ], finally_reward [ 1 ])) ### \u56de\u4f20score sa_nas . reward ( float ( finally_reward [ 1 ]))","title":"SA\u641c\u7d22"},{"location":"api/nas_api/#paddleslimnas-api","text":"","title":"paddleslim.nas API\u6587\u6863"},{"location":"api/nas_api/#sanas-api","text":"","title":"SANAS API\u6587\u6863"},{"location":"api/nas_api/#class-sanas","text":"SANAS\uff08Simulated Annealing Neural Architecture Search\uff09\u662f\u57fa\u4e8e\u6a21\u62df\u9000\u706b\u7b97\u6cd5\u8fdb\u884c\u6a21\u578b\u7ed3\u6784\u641c\u7d22\u7684\u7b97\u6cd5\uff0c\u4e00\u822c\u7528\u4e8e\u79bb\u6563\u641c\u7d22\u4efb\u52a1\u3002 paddleslim.nas.SANAS(configs, server_addr, init_temperature, reduce_rate, search_steps, save_checkpoint, load_checkpoint, is_server) \u53c2\u6570\uff1a - configs(list ): \u641c\u7d22\u7a7a\u95f4\u914d\u7f6e\u5217\u8868\uff0c\u683c\u5f0f\u662f [(key, {input_size, output_size, block_num, block_mask})] \u6216\u8005 [(key)] \uff08MobileNetV2\u3001MobilenetV1\u548cResNet\u7684\u641c\u7d22\u7a7a\u95f4\u4f7f\u7528\u548c\u539f\u672c\u7f51\u7edc\u7ed3\u6784\u76f8\u540c\u7684\u641c\u7d22\u7a7a\u95f4\uff0c\u6240\u4ee5\u4ec5\u9700\u6307\u5b9a key \u5373\u53ef\uff09, input_size \u548c output_size \u8868\u793a\u8f93\u5165\u548c\u8f93\u51fa\u7684\u7279\u5f81\u56fe\u7684\u5927\u5c0f\uff0c block_num \u662f\u6307\u641c\u7d22\u7f51\u7edc\u4e2d\u7684block\u6570\u91cf\uff0c block_mask \u662f\u4e00\u7ec4\u75310\u548c1\u7ec4\u6210\u7684\u5217\u8868\uff0c0\u4ee3\u8868\u4e0d\u8fdb\u884c\u4e0b\u91c7\u6837\u7684block\uff0c1\u4ee3\u8868\u4e0b\u91c7\u6837\u7684block\u3002 \u66f4\u591apaddleslim\u63d0\u4f9b\u7684\u641c\u7d22\u7a7a\u95f4\u914d\u7f6e\u53ef\u4ee5\u53c2\u8003\u3002 - server_addr(tuple): SANAS\u7684\u5730\u5740\uff0c\u5305\u62ecserver\u7684ip\u5730\u5740\u548c\u7aef\u53e3\u53f7\uff0c\u5982\u679cip\u5730\u5740\u4e3aNone\u6216\u8005\u4e3a\"\"\u7684\u8bdd\u5219\u9ed8\u8ba4\u4f7f\u7528\u672c\u673aip\u3002\u9ed8\u8ba4\uff1a\uff08\"\", 8881\uff09\u3002 - init_temperature(float): \u57fa\u4e8e\u6a21\u62df\u9000\u706b\u8fdb\u884c\u641c\u7d22\u7684\u521d\u59cb\u6e29\u5ea6\u3002\u9ed8\u8ba4\uff1a100\u3002 - reduce_rate(float): \u57fa\u4e8e\u6a21\u62df\u9000\u706b\u8fdb\u884c\u641c\u7d22\u7684\u8870\u51cf\u7387\u3002\u9ed8\u8ba4\uff1a0.85\u3002 - search_steps(int): \u641c\u7d22\u8fc7\u7a0b\u8fed\u4ee3\u7684\u6b21\u6570\u3002\u9ed8\u8ba4\uff1a300\u3002 - save_checkpoint(str|None): \u4fdd\u5b58checkpoint\u7684\u6587\u4ef6\u76ee\u5f55\uff0c\u5982\u679c\u8bbe\u7f6e\u4e3aNone\u7684\u8bdd\u5219\u4e0d\u4fdd\u5b58checkpoint\u3002\u9ed8\u8ba4\uff1a ./nas_checkpoint \u3002 - load_checkpoint(str|None): \u52a0\u8f7dcheckpoint\u7684\u6587\u4ef6\u76ee\u5f55\uff0c\u5982\u679c\u8bbe\u7f6e\u4e3aNone\u7684\u8bdd\u5219\u4e0d\u52a0\u8f7dcheckpoint\u3002\u9ed8\u8ba4\uff1aNone\u3002 - is_server(bool): \u5f53\u524d\u5b9e\u4f8b\u662f\u5426\u8981\u542f\u52a8\u4e00\u4e2aserver\u3002\u9ed8\u8ba4\uff1aTrue\u3002 \u8fd4\u56de\uff1a \u4e00\u4e2aSANAS\u7c7b\u7684\u5b9e\u4f8b \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 3 from paddleslim.nas import SANAS config = [( 'MobileNetV2Space' )] sanas = SANAS ( config = config ) tokens2arch(tokens) \u901a\u8fc7\u4e00\u7ec4token\u5f97\u5230\u5b9e\u9645\u7684\u6a21\u578b\u7ed3\u6784\uff0c\u4e00\u822c\u7528\u6765\u628a\u641c\u7d22\u5230\u6700\u4f18\u7684token\u8f6c\u6362\u4e3a\u6a21\u578b\u7ed3\u6784\u7528\u6765\u505a\u6700\u540e\u7684\u8bad\u7ec3\u3002 \u53c2\u6570\uff1a - tokens(list): \u4e00\u7ec4token\u3002 \u8fd4\u56de \u8fd4\u56de\u4e00\u4e2a\u6a21\u578b\u7ed3\u6784\u5b9e\u4f8b\u3002 \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 3 4 5 6 import paddle.fluid as fluid input = fluid . data ( name = 'input' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) archs = sanas . token2arch ( tokens ) for arch in archs : output = arch ( input ) input = output next_archs(): \u83b7\u53d6\u4e0b\u4e00\u7ec4\u6a21\u578b\u7ed3\u6784\u3002 \u8fd4\u56de \u8fd4\u56de\u6a21\u578b\u7ed3\u6784\u5b9e\u4f8b\u7684\u5217\u8868\uff0c\u5f62\u5f0f\u4e3alist\u3002 \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 3 4 5 6 import paddle.fluid as fluid input = fluid . data ( name = 'input' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) archs = sanas . next_archs () for arch in archs : output = arch ( input ) input = output reward(score): \u628a\u5f53\u524d\u6a21\u578b\u7ed3\u6784\u7684\u5f97\u5206\u60c5\u51b5\u56de\u4f20\u3002 \u53c2\u6570\uff1a score : \u5f53\u524d\u6a21\u578b\u7684\u5f97\u5206\uff0c\u5206\u6570\u8d8a\u5927\u8d8a\u597d\u3002 \u8fd4\u56de \u6a21\u578b\u7ed3\u6784\u66f4\u65b0\u6210\u529f\u6216\u8005\u5931\u8d25\uff0c\u6210\u529f\u5219\u8fd4\u56de True \uff0c\u5931\u8d25\u5219\u8fd4\u56de False \u3002 \u4ee3\u7801\u793a\u4f8b 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 import numpy as np import paddle import paddle.fluid as fluid from paddleslim.nas import SANAS from paddleslim.analysis import flops max_flops = 321208544 batch_size = 256 # \u641c\u7d22\u7a7a\u95f4\u914d\u7f6e config = [( 'MobileNetV2Space' )] # \u5b9e\u4f8b\u5316SANAS sa_nas = SANAS ( config , server_addr = ( \"\" , 8887 ), init_temperature = 10.24 , reduce_rate = 0.85 , search_steps = 100 , is_server = True ) for step in range ( 100 ): archs = sa_nas . next_archs () train_program = fluid . Program () test_program = fluid . Program () startup_program = fluid . Program () ### \u6784\u9020\u8bad\u7ec3program with fluid . program_guard ( train_program , startup_program ): image = fluid . data ( name = 'image' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) for arch in archs : output = arch ( image ) out = fluid . layers . fc ( output , size = 10 , act = \"softmax\" ) softmax_out = fluid . layers . softmax ( input = out , use_cudnn = False ) cost = fluid . layers . cross_entropy ( input = softmax_out , label = label ) avg_cost = fluid . layers . mean ( cost ) acc_top1 = fluid . layers . accuracy ( input = softmax_out , label = label , k = 1 ) ### \u6784\u9020\u6d4b\u8bd5program test_program = train_program . clone ( for_test = True ) ### \u5b9a\u4e49\u4f18\u5316\u5668 sgd = fluid . optimizer . SGD ( learning_rate = 1e-3 ) sgd . minimize ( avg_cost ) ### \u589e\u52a0\u9650\u5236\u6761\u4ef6\uff0c\u5982\u679c\u6ca1\u6709\u5219\u8fdb\u884c\u65e0\u9650\u5236\u641c\u7d22 if flops ( train_program ) > max_flops : continue ### \u5b9a\u4e49\u4ee3\u7801\u662f\u5728cpu\u4e0a\u8fd0\u884c place = fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( startup_program ) ### \u5b9a\u4e49\u8bad\u7ec3\u8f93\u5165\u6570\u636e train_reader = paddle . batch ( paddle . reader . shuffle ( paddle . dataset . cifar . train10 ( cycle = False ), buf_size = 1024 ), batch_size = batch_size , drop_last = True ) ### \u5b9a\u4e49\u9884\u6d4b\u8f93\u5165\u6570\u636e test_reader = paddle . batch ( paddle . dataset . cifar . test10 ( cycle = False ), batch_size = batch_size , drop_last = False ) train_feeder = fluid . DataFeeder ([ image , label ], place , program = train_program ) test_feeder = fluid . DataFeeder ([ image , label ], place , program = test_program ) ### \u5f00\u59cb\u8bad\u7ec3\uff0c\u6bcf\u4e2a\u641c\u7d22\u7ed3\u679c\u8bad\u7ec35\u4e2aepoch for epoch_id in range ( 5 ): for batch_id , data in enumerate ( train_reader ()): fetches = [ avg_cost . name ] outs = exe . run ( train_program , feed = train_feeder . feed ( data ), fetch_list = fetches )[ 0 ] if batch_id % 10 == 0 : print ( 'TRAIN: steps: {}, epoch: {}, batch: {}, cost: {}' . format ( step , epoch_id , batch_id , outs [ 0 ])) ### \u5f00\u59cb\u9884\u6d4b\uff0c\u5f97\u5230\u6700\u7ec8\u7684\u6d4b\u8bd5\u7ed3\u679c\u4f5c\u4e3ascore\u56de\u4f20\u7ed9sa_nas reward = [] for batch_id , data in enumerate ( test_reader ()): test_fetches = [ avg_cost . name , acc_top1 . name ] batch_reward = exe . run ( test_program , feed = test_feeder . feed ( data ), fetch_list = test_fetches ) reward_avg = np . mean ( np . array ( batch_reward ), axis = 1 ) reward . append ( reward_avg ) print ( 'TEST: step: {}, batch: {}, avg_cost: {}, acc_top1: {}' . format ( step , batch_id , batch_reward [ 0 ], batch_reward [ 1 ])) finally_reward = np . mean ( np . array ( reward ), axis = 0 ) print ( 'FINAL TEST: avg_cost: {}, acc_top1: {}' . format ( finally_reward [ 0 ], finally_reward [ 1 ])) ### \u56de\u4f20score sa_nas . reward ( float ( finally_reward [ 1 ]))","title":"class SANAS"},{"location":"api/prune_api/","text":"Pruner # paddleslim.prune.Pruner(criterion=\"l1_norm\") \u6e90\u4ee3\u7801 \u5bf9\u5377\u79ef\u7f51\u7edc\u7684\u901a\u9053\u8fdb\u884c\u4e00\u6b21\u526a\u88c1\u3002\u526a\u88c1\u4e00\u4e2a\u5377\u79ef\u5c42\u7684\u901a\u9053\uff0c\u662f\u6307\u526a\u88c1\u8be5\u5377\u79ef\u5c42\u8f93\u51fa\u7684\u901a\u9053\u3002\u5377\u79ef\u5c42\u7684\u6743\u91cd\u5f62\u72b6\u4e3a [output_channel, input_channel, kernel_size, kernel_size] \uff0c\u901a\u8fc7\u526a\u88c1\u8be5\u6743\u91cd\u7684\u7b2c\u4e00\u7eac\u5ea6\u8fbe\u5230\u526a\u88c1\u8f93\u51fa\u901a\u9053\u6570\u7684\u76ee\u7684\u3002 \u53c2\u6570\uff1a criterion - \u8bc4\u4f30\u4e00\u4e2a\u5377\u79ef\u5c42\u5185\u901a\u9053\u91cd\u8981\u6027\u6240\u53c2\u8003\u7684\u6307\u6807\u3002\u76ee\u524d\u4ec5\u652f\u6301 l1_norm \u3002\u9ed8\u8ba4\u4e3a l1_norm \u3002 \u8fd4\u56de\uff1a \u4e00\u4e2aPruner\u7c7b\u7684\u5b9e\u4f8b \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 from paddleslim.prune import Pruner pruner = Pruner () paddleslim.prune.Pruner.prune(program, scope, params, ratios, place=None, lazy=False, only_graph=False, param_backup=False, param_shape_backup=False) \u6e90\u4ee3\u7801 \u5bf9\u76ee\u6807\u7f51\u7edc\u7684\u4e00\u7ec4\u5377\u79ef\u5c42\u7684\u6743\u91cd\u8fdb\u884c\u88c1\u526a\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u8981\u88c1\u526a\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 scope(paddle.fluid.Scope) - \u8981\u88c1\u526a\u7684\u6743\u91cd\u6240\u5728\u7684 scope \uff0cPaddle\u4e2d\u7528 scope \u5b9e\u4f8b\u5b58\u653e\u6a21\u578b\u53c2\u6570\u548c\u8fd0\u884c\u65f6\u53d8\u91cf\u7684\u503c\u3002Scope\u4e2d\u7684\u53c2\u6570\u503c\u4f1a\u88ab inplace \u7684\u88c1\u526a\u3002\u66f4\u591a\u4ecb\u7ecd\u8bf7\u53c2\u8003 Scope\u6982\u5ff5\u4ecb\u7ecd params(list ) - \u9700\u8981\u88ab\u88c1\u526a\u7684\u5377\u79ef\u5c42\u7684\u53c2\u6570\u7684\u540d\u79f0\u5217\u8868\u3002\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u67e5\u770b\u6a21\u578b\u4e2d\u6240\u6709\u53c2\u6570\u7684\u540d\u79f0: 1 2 3 for block in program . blocks : for param in block . all_parameters () : print ( \" param: {}; shape: {} \" . format ( param . name , param . shape )) ratios(list ) - \u7528\u4e8e\u88c1\u526a params \u7684\u526a\u5207\u7387\uff0c\u7c7b\u578b\u4e3a\u5217\u8868\u3002\u8be5\u5217\u8868\u957f\u5ea6\u5fc5\u987b\u4e0e params \u7684\u957f\u5ea6\u4e00\u81f4\u3002 place(paddle.fluid.Place) - \u5f85\u88c1\u526a\u53c2\u6570\u6240\u5728\u7684\u8bbe\u5907\u4f4d\u7f6e\uff0c\u53ef\u4ee5\u662f CUDAPlace \u6216 CPUPlace \u3002 Place\u6982\u5ff5\u4ecb\u7ecd lazy(bool) - lazy \u4e3aTrue\u65f6\uff0c\u901a\u8fc7\u5c06\u6307\u5b9a\u901a\u9053\u7684\u53c2\u6570\u7f6e\u96f6\u8fbe\u5230\u88c1\u526a\u7684\u76ee\u7684\uff0c\u53c2\u6570\u7684 shape\u4fdd\u6301\u4e0d\u53d8 \uff1b lazy \u4e3aFalse\u65f6\uff0c\u76f4\u63a5\u5c06\u8981\u88c1\u7684\u901a\u9053\u7684\u53c2\u6570\u5220\u9664\uff0c\u53c2\u6570\u7684 shape \u4f1a\u53d1\u751f\u53d8\u5316\u3002 only_graph(bool) - \u662f\u5426\u53ea\u88c1\u526a\u7f51\u7edc\u7ed3\u6784\u3002\u5728Paddle\u4e2d\uff0cProgram\u5b9a\u4e49\u4e86\u7f51\u7edc\u7ed3\u6784\uff0cScope\u5b58\u50a8\u53c2\u6570\u7684\u6570\u503c\u3002\u4e00\u4e2aScope\u5b9e\u4f8b\u53ef\u4ee5\u88ab\u591a\u4e2aProgram\u4f7f\u7528\uff0c\u6bd4\u5982\u5b9a\u4e49\u4e86\u8bad\u7ec3\u7f51\u7edc\u7684Program\u548c\u5b9a\u4e49\u4e86\u6d4b\u8bd5\u7f51\u7edc\u7684Program\u662f\u4f7f\u7528\u540c\u4e00\u4e2aScope\u5b9e\u4f8b\u7684\u3002 only_graph \u4e3aTrue\u65f6\uff0c\u53ea\u5bf9Program\u4e2d\u5b9a\u4e49\u7684\u5377\u79ef\u7684\u901a\u9053\u8fdb\u884c\u526a\u88c1\uff1b only_graph \u4e3afalse\u65f6\uff0cScope\u4e2d\u5377\u79ef\u53c2\u6570\u7684\u6570\u503c\u4e5f\u4f1a\u88ab\u526a\u88c1\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 param_backup(bool) - \u662f\u5426\u8fd4\u56de\u5bf9\u53c2\u6570\u503c\u7684\u5907\u4efd\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 param_shape_backup(bool) - \u662f\u5426\u8fd4\u56de\u5bf9\u53c2\u6570 shape \u7684\u5907\u4efd\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 \u8fd4\u56de\uff1a pruned_program(paddle.fluid.Program) - \u88ab\u88c1\u526a\u540e\u7684Program\u3002 param_backup(dict) - \u5bf9\u53c2\u6570\u6570\u503c\u7684\u5907\u4efd\uff0c\u7528\u4e8e\u6062\u590dScope\u4e2d\u7684\u53c2\u6570\u6570\u503c\u3002 param_shape_backup(dict) - \u5bf9\u53c2\u6570\u5f62\u72b6\u7684\u5907\u4efd\u3002 \u793a\u4f8b\uff1a \u70b9\u51fb AIStudio \u6267\u884c\u4ee5\u4e0b\u793a\u4f8b\u4ee3\u7801\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.prune import Pruner def conv_bn_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) bn_name = name + \"_bn\" return fluid . layers . batch_norm ( input = conv , act = act , name = bn_name + '_output' , param_attr = ParamAttr ( name = bn_name + '_scale' ), bias_attr = ParamAttr ( bn_name + '_offset' ), moving_mean_name = bn_name + '_mean' , moving_variance_name = bn_name + '_variance' , ) main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels with fluid . program_guard ( main_program , startup_program ): input = fluid . data ( name = \"image\" , shape = [ None , 3 , 16 , 16 ]) conv1 = conv_bn_layer ( input , 8 , 3 , \"conv1\" ) conv2 = conv_bn_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_bn_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_bn_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_bn_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_bn_layer ( conv5 , 8 , 3 , \"conv6\" ) place = fluid . CPUPlace () exe = fluid . Executor ( place ) scope = fluid . Scope () exe . run ( startup_program , scope = scope ) pruner = Pruner () main_program , _ , _ = pruner . prune ( main_program , scope , params = [ \"conv4_weights\" ], ratios = [ 0.5 ], place = place , lazy = False , only_graph = False , param_backup = False , param_shape_backup = False ) for param in main_program . global_block () . all_parameters (): if \"weights\" in param . name : print ( \"param name: {}; param shape: {}\" . format ( param . name , param . shape )) sensitivity # paddleslim.prune.sensitivity(program, place, param_names, eval_func, sensitivities_file=None, pruned_ratios=None) \u6e90\u4ee3\u7801 \u8ba1\u7b97\u7f51\u7edc\u4e2d\u6bcf\u4e2a\u5377\u79ef\u5c42\u7684\u654f\u611f\u5ea6\u3002\u6bcf\u4e2a\u5377\u79ef\u5c42\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u7edf\u8ba1\u65b9\u6cd5\u4e3a\uff1a\u4f9d\u6b21\u526a\u6389\u5f53\u524d\u5377\u79ef\u5c42\u4e0d\u540c\u6bd4\u4f8b\u7684\u8f93\u51fa\u901a\u9053\u6570\uff0c\u5728\u6d4b\u8bd5\u96c6\u4e0a\u8ba1\u7b97\u526a\u88c1\u540e\u7684\u7cbe\u5ea6\u635f\u5931\u3002\u5f97\u5230\u654f\u611f\u5ea6\u4fe1\u606f\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7\u89c2\u5bdf\u6216\u5176\u5b83\u65b9\u5f0f\u786e\u5b9a\u6bcf\u5c42\u5377\u79ef\u7684\u526a\u88c1\u7387\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u5f85\u8bc4\u4f30\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 place(paddle.fluid.Place) - \u5f85\u5206\u6790\u7684\u53c2\u6570\u6240\u5728\u7684\u8bbe\u5907\u4f4d\u7f6e\uff0c\u53ef\u4ee5\u662f CUDAPlace \u6216 CPUPlace \u3002 Place\u6982\u5ff5\u4ecb\u7ecd param_names(list ) - \u5f85\u5206\u6790\u7684\u5377\u79ef\u5c42\u7684\u53c2\u6570\u7684\u540d\u79f0\u5217\u8868\u3002\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u67e5\u770b\u6a21\u578b\u4e2d\u6240\u6709\u53c2\u6570\u7684\u540d\u79f0: 1 2 3 for block in program . blocks : for param in block . all_parameters () : print ( \" param: {}; shape: {} \" . format ( param . name , param . shape )) eval_func(function) - \u7528\u4e8e\u8bc4\u4f30\u88c1\u526a\u540e\u6a21\u578b\u6548\u679c\u7684\u56de\u8c03\u51fd\u6570\u3002\u8be5\u56de\u8c03\u51fd\u6570\u63a5\u53d7\u88ab\u88c1\u526a\u540e\u7684 program \u4e3a\u53c2\u6570\uff0c\u8fd4\u56de\u4e00\u4e2a\u8868\u793a\u5f53\u524dprogram\u7684\u7cbe\u5ea6\uff0c\u7528\u4ee5\u8ba1\u7b97\u5f53\u524d\u88c1\u526a\u5e26\u6765\u7684\u7cbe\u5ea6\u635f\u5931\u3002 sensitivities_file(str) - \u4fdd\u5b58\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u672c\u5730\u6587\u4ef6\u7cfb\u7edf\u7684\u6587\u4ef6\u3002\u5728\u654f\u611f\u5ea6\u8ba1\u7b97\u8fc7\u7a0b\u4e2d\uff0c\u4f1a\u6301\u7eed\u5c06\u65b0\u8ba1\u7b97\u51fa\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u8ffd\u52a0\u5230\u8be5\u6587\u4ef6\u4e2d\u3002\u91cd\u542f\u4efb\u52a1\u540e\uff0c\u6587\u4ef6\u4e2d\u5df2\u6709\u654f\u611f\u5ea6\u4fe1\u606f\u4e0d\u4f1a\u88ab\u91cd\u590d\u8ba1\u7b97\u3002\u8be5\u6587\u4ef6\u53ef\u4ee5\u7528 pickle \u52a0\u8f7d\u3002 pruned_ratios(list ) - \u8ba1\u7b97\u5377\u79ef\u5c42\u654f\u611f\u5ea6\u4fe1\u606f\u65f6\uff0c\u4f9d\u6b21\u526a\u6389\u7684\u901a\u9053\u6570\u6bd4\u4f8b\u3002\u9ed8\u8ba4\u4e3a[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\u3002 \u8fd4\u56de\uff1a sensitivities(dict) - \u5b58\u653e\u654f\u611f\u5ea6\u4fe1\u606f\u7684dict\uff0c\u5176\u683c\u5f0f\u4e3a\uff1a 1 2 3 4 5 6 7 8 9 { \"weight_0\" : { 0 . 1 : 0 . 22 , 0 . 2 : 0 . 33 } , \"weight_1\" : { 0 . 1 : 0 . 21 , 0 . 2 : 0 . 4 } } \u5176\u4e2d\uff0c weight_0 \u662f\u5377\u79ef\u5c42\u53c2\u6570\u7684\u540d\u79f0\uff0csensitivities['weight_0']\u7684 value \u4e3a\u526a\u88c1\u6bd4\u4f8b\uff0c value \u4e3a\u7cbe\u5ea6\u635f\u5931\u7684\u6bd4\u4f8b\u3002 \u793a\u4f8b\uff1a \u70b9\u51fb AIStudio \u8fd0\u884c\u4ee5\u4e0b\u793a\u4f8b\u4ee3\u7801\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 import paddle import numpy as np import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.prune import sensitivity import paddle.dataset.mnist as reader def conv_bn_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) bn_name = name + \"_bn\" return fluid . layers . batch_norm ( input = conv , act = act , name = bn_name + '_output' , param_attr = ParamAttr ( name = bn_name + '_scale' ), bias_attr = ParamAttr ( bn_name + '_offset' ), moving_mean_name = bn_name + '_mean' , moving_variance_name = bn_name + '_variance' , ) main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels image_shape = [ 1 , 28 , 28 ] with fluid . program_guard ( main_program , startup_program ): image = fluid . data ( name = 'image' , shape = [ None ] + image_shape , dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) conv1 = conv_bn_layer ( image , 8 , 3 , \"conv1\" ) conv2 = conv_bn_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_bn_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_bn_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_bn_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_bn_layer ( conv5 , 8 , 3 , \"conv6\" ) out = fluid . layers . fc ( conv6 , size = 10 , act = \"softmax\" ) # cost = fluid.layers.cross_entropy(input=out, label=label) # avg_cost = fluid.layers.mean(x=cost) acc_top1 = fluid . layers . accuracy ( input = out , label = label , k = 1 ) # acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) place = fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( startup_program ) val_reader = paddle . batch ( reader . test (), batch_size = 128 ) val_feeder = feeder = fluid . DataFeeder ( [ image , label ], place , program = main_program ) def eval_func ( program ): acc_top1_ns = [] for data in val_reader (): acc_top1_n = exe . run ( program , feed = val_feeder . feed ( data ), fetch_list = [ acc_top1 . name ]) acc_top1_ns . append ( np . mean ( acc_top1_n )) return np . mean ( acc_top1_ns ) param_names = [] for param in main_program . global_block () . all_parameters (): if \"weights\" in param . name : param_names . append ( param . name ) sensitivities = sensitivity ( main_program , place , param_names , eval_func , sensitivities_file = \"./sensitive.data\" , pruned_ratios = [ 0.1 , 0.2 , 0.3 ]) print ( sensitivities ) merge_sensitive # paddleslim.prune.merge_sensitive(sensitivities) \u6e90\u4ee3\u7801 \u5408\u5e76\u591a\u4e2a\u654f\u611f\u5ea6\u4fe1\u606f\u3002 \u53c2\u6570\uff1a sensitivities(list | list ) - \u5f85\u5408\u5e76\u7684\u654f\u611f\u5ea6\u4fe1\u606f\uff0c\u53ef\u4ee5\u662f\u5b57\u5178\u7684\u5217\u8868\uff0c\u6216\u8005\u662f\u5b58\u653e\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u6587\u4ef6\u7684\u8def\u5f84\u5217\u8868\u3002 \u8fd4\u56de\uff1a sensitivities(dict) - \u5408\u5e76\u540e\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u3002\u5176\u683c\u5f0f\u4e3a\uff1a 1 2 3 4 5 6 7 8 9 { \"weight_0\" : { 0 . 1 : 0 . 22 , 0 . 2 : 0 . 33 } , \"weight_1\" : { 0 . 1 : 0 . 21 , 0 . 2 : 0 . 4 } } \u5176\u4e2d\uff0c weight_0 \u662f\u5377\u79ef\u5c42\u53c2\u6570\u7684\u540d\u79f0\uff0csensitivities['weight_0']\u7684 value \u4e3a\u526a\u88c1\u6bd4\u4f8b\uff0c value \u4e3a\u7cbe\u5ea6\u635f\u5931\u7684\u6bd4\u4f8b\u3002 \u793a\u4f8b\uff1a load_sensitivities # paddleslim.prune.load_sensitivities(sensitivities_file) \u6e90\u4ee3\u7801 \u4ece\u6587\u4ef6\u4e2d\u52a0\u8f7d\u654f\u611f\u5ea6\u4fe1\u606f\u3002 \u53c2\u6570\uff1a sensitivities_file(str) - \u5b58\u653e\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u672c\u5730\u6587\u4ef6. \u8fd4\u56de\uff1a sensitivities(dict) - \u654f\u611f\u5ea6\u4fe1\u606f\u3002 \u793a\u4f8b\uff1a get_ratios_by_loss # paddleslim.prune.get_ratios_by_loss(sensitivities, loss) \u6e90\u4ee3\u7801 \u6839\u636e\u654f\u611f\u5ea6\u548c\u7cbe\u5ea6\u635f\u5931\u9608\u503c\u8ba1\u7b97\u51fa\u4e00\u7ec4\u526a\u5207\u7387\u3002\u5bf9\u4e8e\u53c2\u6570 w , \u5176\u526a\u88c1\u7387\u4e3a\u4f7f\u7cbe\u5ea6\u635f\u5931\u4f4e\u4e8e loss \u7684\u6700\u5927\u526a\u88c1\u7387\u3002 \u53c2\u6570\uff1a sensitivities(dict) - \u654f\u611f\u5ea6\u4fe1\u606f\u3002 loss - \u7cbe\u5ea6\u635f\u5931\u9608\u503c\u3002 \u8fd4\u56de\uff1a ratios(dict) - \u4e00\u7ec4\u526a\u5207\u7387\u3002 key \u662f\u5f85\u526a\u88c1\u53c2\u6570\u7684\u540d\u79f0\u3002 value \u662f\u5bf9\u5e94\u53c2\u6570\u7684\u526a\u88c1\u7387\u3002","title":"\u526a\u679d\u4e0e\u654f\u611f\u5ea6"},{"location":"api/prune_api/#pruner","text":"paddleslim.prune.Pruner(criterion=\"l1_norm\") \u6e90\u4ee3\u7801 \u5bf9\u5377\u79ef\u7f51\u7edc\u7684\u901a\u9053\u8fdb\u884c\u4e00\u6b21\u526a\u88c1\u3002\u526a\u88c1\u4e00\u4e2a\u5377\u79ef\u5c42\u7684\u901a\u9053\uff0c\u662f\u6307\u526a\u88c1\u8be5\u5377\u79ef\u5c42\u8f93\u51fa\u7684\u901a\u9053\u3002\u5377\u79ef\u5c42\u7684\u6743\u91cd\u5f62\u72b6\u4e3a [output_channel, input_channel, kernel_size, kernel_size] \uff0c\u901a\u8fc7\u526a\u88c1\u8be5\u6743\u91cd\u7684\u7b2c\u4e00\u7eac\u5ea6\u8fbe\u5230\u526a\u88c1\u8f93\u51fa\u901a\u9053\u6570\u7684\u76ee\u7684\u3002 \u53c2\u6570\uff1a criterion - \u8bc4\u4f30\u4e00\u4e2a\u5377\u79ef\u5c42\u5185\u901a\u9053\u91cd\u8981\u6027\u6240\u53c2\u8003\u7684\u6307\u6807\u3002\u76ee\u524d\u4ec5\u652f\u6301 l1_norm \u3002\u9ed8\u8ba4\u4e3a l1_norm \u3002 \u8fd4\u56de\uff1a \u4e00\u4e2aPruner\u7c7b\u7684\u5b9e\u4f8b \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 from paddleslim.prune import Pruner pruner = Pruner () paddleslim.prune.Pruner.prune(program, scope, params, ratios, place=None, lazy=False, only_graph=False, param_backup=False, param_shape_backup=False) \u6e90\u4ee3\u7801 \u5bf9\u76ee\u6807\u7f51\u7edc\u7684\u4e00\u7ec4\u5377\u79ef\u5c42\u7684\u6743\u91cd\u8fdb\u884c\u88c1\u526a\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u8981\u88c1\u526a\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 scope(paddle.fluid.Scope) - \u8981\u88c1\u526a\u7684\u6743\u91cd\u6240\u5728\u7684 scope \uff0cPaddle\u4e2d\u7528 scope \u5b9e\u4f8b\u5b58\u653e\u6a21\u578b\u53c2\u6570\u548c\u8fd0\u884c\u65f6\u53d8\u91cf\u7684\u503c\u3002Scope\u4e2d\u7684\u53c2\u6570\u503c\u4f1a\u88ab inplace \u7684\u88c1\u526a\u3002\u66f4\u591a\u4ecb\u7ecd\u8bf7\u53c2\u8003 Scope\u6982\u5ff5\u4ecb\u7ecd params(list ) - \u9700\u8981\u88ab\u88c1\u526a\u7684\u5377\u79ef\u5c42\u7684\u53c2\u6570\u7684\u540d\u79f0\u5217\u8868\u3002\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u67e5\u770b\u6a21\u578b\u4e2d\u6240\u6709\u53c2\u6570\u7684\u540d\u79f0: 1 2 3 for block in program . blocks : for param in block . all_parameters () : print ( \" param: {}; shape: {} \" . format ( param . name , param . shape )) ratios(list ) - \u7528\u4e8e\u88c1\u526a params \u7684\u526a\u5207\u7387\uff0c\u7c7b\u578b\u4e3a\u5217\u8868\u3002\u8be5\u5217\u8868\u957f\u5ea6\u5fc5\u987b\u4e0e params \u7684\u957f\u5ea6\u4e00\u81f4\u3002 place(paddle.fluid.Place) - \u5f85\u88c1\u526a\u53c2\u6570\u6240\u5728\u7684\u8bbe\u5907\u4f4d\u7f6e\uff0c\u53ef\u4ee5\u662f CUDAPlace \u6216 CPUPlace \u3002 Place\u6982\u5ff5\u4ecb\u7ecd lazy(bool) - lazy \u4e3aTrue\u65f6\uff0c\u901a\u8fc7\u5c06\u6307\u5b9a\u901a\u9053\u7684\u53c2\u6570\u7f6e\u96f6\u8fbe\u5230\u88c1\u526a\u7684\u76ee\u7684\uff0c\u53c2\u6570\u7684 shape\u4fdd\u6301\u4e0d\u53d8 \uff1b lazy \u4e3aFalse\u65f6\uff0c\u76f4\u63a5\u5c06\u8981\u88c1\u7684\u901a\u9053\u7684\u53c2\u6570\u5220\u9664\uff0c\u53c2\u6570\u7684 shape \u4f1a\u53d1\u751f\u53d8\u5316\u3002 only_graph(bool) - \u662f\u5426\u53ea\u88c1\u526a\u7f51\u7edc\u7ed3\u6784\u3002\u5728Paddle\u4e2d\uff0cProgram\u5b9a\u4e49\u4e86\u7f51\u7edc\u7ed3\u6784\uff0cScope\u5b58\u50a8\u53c2\u6570\u7684\u6570\u503c\u3002\u4e00\u4e2aScope\u5b9e\u4f8b\u53ef\u4ee5\u88ab\u591a\u4e2aProgram\u4f7f\u7528\uff0c\u6bd4\u5982\u5b9a\u4e49\u4e86\u8bad\u7ec3\u7f51\u7edc\u7684Program\u548c\u5b9a\u4e49\u4e86\u6d4b\u8bd5\u7f51\u7edc\u7684Program\u662f\u4f7f\u7528\u540c\u4e00\u4e2aScope\u5b9e\u4f8b\u7684\u3002 only_graph \u4e3aTrue\u65f6\uff0c\u53ea\u5bf9Program\u4e2d\u5b9a\u4e49\u7684\u5377\u79ef\u7684\u901a\u9053\u8fdb\u884c\u526a\u88c1\uff1b only_graph \u4e3afalse\u65f6\uff0cScope\u4e2d\u5377\u79ef\u53c2\u6570\u7684\u6570\u503c\u4e5f\u4f1a\u88ab\u526a\u88c1\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 param_backup(bool) - \u662f\u5426\u8fd4\u56de\u5bf9\u53c2\u6570\u503c\u7684\u5907\u4efd\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 param_shape_backup(bool) - \u662f\u5426\u8fd4\u56de\u5bf9\u53c2\u6570 shape \u7684\u5907\u4efd\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 \u8fd4\u56de\uff1a pruned_program(paddle.fluid.Program) - \u88ab\u88c1\u526a\u540e\u7684Program\u3002 param_backup(dict) - \u5bf9\u53c2\u6570\u6570\u503c\u7684\u5907\u4efd\uff0c\u7528\u4e8e\u6062\u590dScope\u4e2d\u7684\u53c2\u6570\u6570\u503c\u3002 param_shape_backup(dict) - \u5bf9\u53c2\u6570\u5f62\u72b6\u7684\u5907\u4efd\u3002 \u793a\u4f8b\uff1a \u70b9\u51fb AIStudio \u6267\u884c\u4ee5\u4e0b\u793a\u4f8b\u4ee3\u7801\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.prune import Pruner def conv_bn_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) bn_name = name + \"_bn\" return fluid . layers . batch_norm ( input = conv , act = act , name = bn_name + '_output' , param_attr = ParamAttr ( name = bn_name + '_scale' ), bias_attr = ParamAttr ( bn_name + '_offset' ), moving_mean_name = bn_name + '_mean' , moving_variance_name = bn_name + '_variance' , ) main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels with fluid . program_guard ( main_program , startup_program ): input = fluid . data ( name = \"image\" , shape = [ None , 3 , 16 , 16 ]) conv1 = conv_bn_layer ( input , 8 , 3 , \"conv1\" ) conv2 = conv_bn_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_bn_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_bn_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_bn_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_bn_layer ( conv5 , 8 , 3 , \"conv6\" ) place = fluid . CPUPlace () exe = fluid . Executor ( place ) scope = fluid . Scope () exe . run ( startup_program , scope = scope ) pruner = Pruner () main_program , _ , _ = pruner . prune ( main_program , scope , params = [ \"conv4_weights\" ], ratios = [ 0.5 ], place = place , lazy = False , only_graph = False , param_backup = False , param_shape_backup = False ) for param in main_program . global_block () . all_parameters (): if \"weights\" in param . name : print ( \"param name: {}; param shape: {}\" . format ( param . name , param . shape ))","title":"Pruner"},{"location":"api/prune_api/#sensitivity","text":"paddleslim.prune.sensitivity(program, place, param_names, eval_func, sensitivities_file=None, pruned_ratios=None) \u6e90\u4ee3\u7801 \u8ba1\u7b97\u7f51\u7edc\u4e2d\u6bcf\u4e2a\u5377\u79ef\u5c42\u7684\u654f\u611f\u5ea6\u3002\u6bcf\u4e2a\u5377\u79ef\u5c42\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u7edf\u8ba1\u65b9\u6cd5\u4e3a\uff1a\u4f9d\u6b21\u526a\u6389\u5f53\u524d\u5377\u79ef\u5c42\u4e0d\u540c\u6bd4\u4f8b\u7684\u8f93\u51fa\u901a\u9053\u6570\uff0c\u5728\u6d4b\u8bd5\u96c6\u4e0a\u8ba1\u7b97\u526a\u88c1\u540e\u7684\u7cbe\u5ea6\u635f\u5931\u3002\u5f97\u5230\u654f\u611f\u5ea6\u4fe1\u606f\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7\u89c2\u5bdf\u6216\u5176\u5b83\u65b9\u5f0f\u786e\u5b9a\u6bcf\u5c42\u5377\u79ef\u7684\u526a\u88c1\u7387\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u5f85\u8bc4\u4f30\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 place(paddle.fluid.Place) - \u5f85\u5206\u6790\u7684\u53c2\u6570\u6240\u5728\u7684\u8bbe\u5907\u4f4d\u7f6e\uff0c\u53ef\u4ee5\u662f CUDAPlace \u6216 CPUPlace \u3002 Place\u6982\u5ff5\u4ecb\u7ecd param_names(list ) - \u5f85\u5206\u6790\u7684\u5377\u79ef\u5c42\u7684\u53c2\u6570\u7684\u540d\u79f0\u5217\u8868\u3002\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u67e5\u770b\u6a21\u578b\u4e2d\u6240\u6709\u53c2\u6570\u7684\u540d\u79f0: 1 2 3 for block in program . blocks : for param in block . all_parameters () : print ( \" param: {}; shape: {} \" . format ( param . name , param . shape )) eval_func(function) - \u7528\u4e8e\u8bc4\u4f30\u88c1\u526a\u540e\u6a21\u578b\u6548\u679c\u7684\u56de\u8c03\u51fd\u6570\u3002\u8be5\u56de\u8c03\u51fd\u6570\u63a5\u53d7\u88ab\u88c1\u526a\u540e\u7684 program \u4e3a\u53c2\u6570\uff0c\u8fd4\u56de\u4e00\u4e2a\u8868\u793a\u5f53\u524dprogram\u7684\u7cbe\u5ea6\uff0c\u7528\u4ee5\u8ba1\u7b97\u5f53\u524d\u88c1\u526a\u5e26\u6765\u7684\u7cbe\u5ea6\u635f\u5931\u3002 sensitivities_file(str) - \u4fdd\u5b58\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u672c\u5730\u6587\u4ef6\u7cfb\u7edf\u7684\u6587\u4ef6\u3002\u5728\u654f\u611f\u5ea6\u8ba1\u7b97\u8fc7\u7a0b\u4e2d\uff0c\u4f1a\u6301\u7eed\u5c06\u65b0\u8ba1\u7b97\u51fa\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u8ffd\u52a0\u5230\u8be5\u6587\u4ef6\u4e2d\u3002\u91cd\u542f\u4efb\u52a1\u540e\uff0c\u6587\u4ef6\u4e2d\u5df2\u6709\u654f\u611f\u5ea6\u4fe1\u606f\u4e0d\u4f1a\u88ab\u91cd\u590d\u8ba1\u7b97\u3002\u8be5\u6587\u4ef6\u53ef\u4ee5\u7528 pickle \u52a0\u8f7d\u3002 pruned_ratios(list ) - \u8ba1\u7b97\u5377\u79ef\u5c42\u654f\u611f\u5ea6\u4fe1\u606f\u65f6\uff0c\u4f9d\u6b21\u526a\u6389\u7684\u901a\u9053\u6570\u6bd4\u4f8b\u3002\u9ed8\u8ba4\u4e3a[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\u3002 \u8fd4\u56de\uff1a sensitivities(dict) - \u5b58\u653e\u654f\u611f\u5ea6\u4fe1\u606f\u7684dict\uff0c\u5176\u683c\u5f0f\u4e3a\uff1a 1 2 3 4 5 6 7 8 9 { \"weight_0\" : { 0 . 1 : 0 . 22 , 0 . 2 : 0 . 33 } , \"weight_1\" : { 0 . 1 : 0 . 21 , 0 . 2 : 0 . 4 } } \u5176\u4e2d\uff0c weight_0 \u662f\u5377\u79ef\u5c42\u53c2\u6570\u7684\u540d\u79f0\uff0csensitivities['weight_0']\u7684 value \u4e3a\u526a\u88c1\u6bd4\u4f8b\uff0c value \u4e3a\u7cbe\u5ea6\u635f\u5931\u7684\u6bd4\u4f8b\u3002 \u793a\u4f8b\uff1a \u70b9\u51fb AIStudio \u8fd0\u884c\u4ee5\u4e0b\u793a\u4f8b\u4ee3\u7801\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 import paddle import numpy as np import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.prune import sensitivity import paddle.dataset.mnist as reader def conv_bn_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) bn_name = name + \"_bn\" return fluid . layers . batch_norm ( input = conv , act = act , name = bn_name + '_output' , param_attr = ParamAttr ( name = bn_name + '_scale' ), bias_attr = ParamAttr ( bn_name + '_offset' ), moving_mean_name = bn_name + '_mean' , moving_variance_name = bn_name + '_variance' , ) main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels image_shape = [ 1 , 28 , 28 ] with fluid . program_guard ( main_program , startup_program ): image = fluid . data ( name = 'image' , shape = [ None ] + image_shape , dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) conv1 = conv_bn_layer ( image , 8 , 3 , \"conv1\" ) conv2 = conv_bn_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_bn_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_bn_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_bn_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_bn_layer ( conv5 , 8 , 3 , \"conv6\" ) out = fluid . layers . fc ( conv6 , size = 10 , act = \"softmax\" ) # cost = fluid.layers.cross_entropy(input=out, label=label) # avg_cost = fluid.layers.mean(x=cost) acc_top1 = fluid . layers . accuracy ( input = out , label = label , k = 1 ) # acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) place = fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( startup_program ) val_reader = paddle . batch ( reader . test (), batch_size = 128 ) val_feeder = feeder = fluid . DataFeeder ( [ image , label ], place , program = main_program ) def eval_func ( program ): acc_top1_ns = [] for data in val_reader (): acc_top1_n = exe . run ( program , feed = val_feeder . feed ( data ), fetch_list = [ acc_top1 . name ]) acc_top1_ns . append ( np . mean ( acc_top1_n )) return np . mean ( acc_top1_ns ) param_names = [] for param in main_program . global_block () . all_parameters (): if \"weights\" in param . name : param_names . append ( param . name ) sensitivities = sensitivity ( main_program , place , param_names , eval_func , sensitivities_file = \"./sensitive.data\" , pruned_ratios = [ 0.1 , 0.2 , 0.3 ]) print ( sensitivities )","title":"sensitivity"},{"location":"api/prune_api/#merge_sensitive","text":"paddleslim.prune.merge_sensitive(sensitivities) \u6e90\u4ee3\u7801 \u5408\u5e76\u591a\u4e2a\u654f\u611f\u5ea6\u4fe1\u606f\u3002 \u53c2\u6570\uff1a sensitivities(list | list ) - \u5f85\u5408\u5e76\u7684\u654f\u611f\u5ea6\u4fe1\u606f\uff0c\u53ef\u4ee5\u662f\u5b57\u5178\u7684\u5217\u8868\uff0c\u6216\u8005\u662f\u5b58\u653e\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u6587\u4ef6\u7684\u8def\u5f84\u5217\u8868\u3002 \u8fd4\u56de\uff1a sensitivities(dict) - \u5408\u5e76\u540e\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u3002\u5176\u683c\u5f0f\u4e3a\uff1a 1 2 3 4 5 6 7 8 9 { \"weight_0\" : { 0 . 1 : 0 . 22 , 0 . 2 : 0 . 33 } , \"weight_1\" : { 0 . 1 : 0 . 21 , 0 . 2 : 0 . 4 } } \u5176\u4e2d\uff0c weight_0 \u662f\u5377\u79ef\u5c42\u53c2\u6570\u7684\u540d\u79f0\uff0csensitivities['weight_0']\u7684 value \u4e3a\u526a\u88c1\u6bd4\u4f8b\uff0c value \u4e3a\u7cbe\u5ea6\u635f\u5931\u7684\u6bd4\u4f8b\u3002 \u793a\u4f8b\uff1a","title":"merge_sensitive"},{"location":"api/prune_api/#load_sensitivities","text":"paddleslim.prune.load_sensitivities(sensitivities_file) \u6e90\u4ee3\u7801 \u4ece\u6587\u4ef6\u4e2d\u52a0\u8f7d\u654f\u611f\u5ea6\u4fe1\u606f\u3002 \u53c2\u6570\uff1a sensitivities_file(str) - \u5b58\u653e\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u672c\u5730\u6587\u4ef6. \u8fd4\u56de\uff1a sensitivities(dict) - \u654f\u611f\u5ea6\u4fe1\u606f\u3002 \u793a\u4f8b\uff1a","title":"load_sensitivities"},{"location":"api/prune_api/#get_ratios_by_loss","text":"paddleslim.prune.get_ratios_by_loss(sensitivities, loss) \u6e90\u4ee3\u7801 \u6839\u636e\u654f\u611f\u5ea6\u548c\u7cbe\u5ea6\u635f\u5931\u9608\u503c\u8ba1\u7b97\u51fa\u4e00\u7ec4\u526a\u5207\u7387\u3002\u5bf9\u4e8e\u53c2\u6570 w , \u5176\u526a\u88c1\u7387\u4e3a\u4f7f\u7cbe\u5ea6\u635f\u5931\u4f4e\u4e8e loss \u7684\u6700\u5927\u526a\u88c1\u7387\u3002 \u53c2\u6570\uff1a sensitivities(dict) - \u654f\u611f\u5ea6\u4fe1\u606f\u3002 loss - \u7cbe\u5ea6\u635f\u5931\u9608\u503c\u3002 \u8fd4\u56de\uff1a ratios(dict) - \u4e00\u7ec4\u526a\u5207\u7387\u3002 key \u662f\u5f85\u526a\u88c1\u53c2\u6570\u7684\u540d\u79f0\u3002 value \u662f\u5bf9\u5e94\u53c2\u6570\u7684\u526a\u88c1\u7387\u3002","title":"get_ratios_by_loss"},{"location":"api/quantization_api/","text":"\u91cf\u5316\u914d\u7f6e # \u901a\u8fc7\u5b57\u5178\u914d\u7f6e\u91cf\u5316\u53c2\u6570 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 quant_config_default = { ' weight_quantize_type ' : ' abs_max ' , ' activation_quantize_type ' : ' abs_max ' , ' weight_bits ' : 8 , ' activation_bits ' : 8 , # ops of name_scope in not_quant_pattern list , will not be quantized ' not_quant_pattern ' : [ ' skip_quant ' ], # ops of type in quantize_op_types , will be quantized ' quantize_op_types ' : [ ' conv2d ' , ' depthwise_conv2d ' , ' mul ' , ' elementwise_add ' , ' pool2d ' ], # data type after quantization , such as ' uint8 ' , ' int8 ' , etc . default is ' int8 ' ' dtype ' : ' int8 ' , # window size for ' range_abs_max ' quantization . defaulf is 10000 ' window_size ' : 10000 , # The decay coefficient of moving average , default is 0 . 9 ' moving_rate ' : 0 . 9 , } \u53c2\u6570\uff1a weight_quantize_type(str) - \u53c2\u6570\u91cf\u5316\u65b9\u5f0f\u3002\u53ef\u9009 'abs_max' , 'channel_wise_abs_max' , 'range_abs_max' , 'moving_average_abs_max' \u3002 \u9ed8\u8ba4 'abs_max' \u3002 activation_quantize_type(str) - \u6fc0\u6d3b\u91cf\u5316\u65b9\u5f0f\uff0c\u53ef\u9009 'abs_max' , 'range_abs_max' , 'moving_average_abs_max' \uff0c\u9ed8\u8ba4 'abs_max' \u3002 weight_bits(int) - \u53c2\u6570\u91cf\u5316bit\u6570\uff0c\u9ed8\u8ba48, \u63a8\u8350\u8bbe\u4e3a8\u3002 activation_bits(int) - \u6fc0\u6d3b\u91cf\u5316bit\u6570\uff0c\u9ed8\u8ba48\uff0c \u63a8\u8350\u8bbe\u4e3a8\u3002 not_quant_pattern(str | list[str]) - \u6240\u6709 name_scope \u5305\u542b 'not_quant_pattern' \u5b57\u7b26\u4e32\u7684 op \uff0c\u90fd\u4e0d\u91cf\u5316, \u8bbe\u7f6e\u65b9\u5f0f\u8bf7\u53c2\u8003 fluid.name_scope \u3002 quantize_op_types(list[str]) - \u9700\u8981\u8fdb\u884c\u91cf\u5316\u7684 op \u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 'conv2d', 'depthwise_conv2d', 'mul' \u3002 dtype(int8) - \u91cf\u5316\u540e\u7684\u53c2\u6570\u7c7b\u578b\uff0c\u9ed8\u8ba4 int8 , \u76ee\u524d\u4ec5\u652f\u6301 int8 \u3002 window_size(int) - 'range_abs_max' \u91cf\u5316\u65b9\u5f0f\u7684 window size \uff0c\u9ed8\u8ba410000\u3002 moving_rate(int) - 'moving_average_abs_max' \u91cf\u5316\u65b9\u5f0f\u7684\u8870\u51cf\u7cfb\u6570\uff0c\u9ed8\u8ba4 0.9\u3002 quant_aware # paddleslim.quant.quant_aware(program, place, config, scope=None, for_test=False) [\u6e90\u4ee3\u7801] \u5728 program \u4e2d\u52a0\u5165\u91cf\u5316\u548c\u53cd\u91cf\u5316 op , \u7528\u4e8e\u91cf\u5316\u8bad\u7ec3\u3002 \u53c2\u6570\uff1a program (fluid.Program) - \u4f20\u5165\u8bad\u7ec3\u6216\u6d4b\u8bd5 program \u3002 place(fluid.CPUPlace | fluid.CUDAPlace) - \u8be5\u53c2\u6570\u8868\u793a Executor \u6267\u884c\u6240\u5728\u7684\u8bbe\u5907\u3002 config(dict) - \u91cf\u5316\u914d\u7f6e\u8868\u3002 scope(fluid.Scope, optional) - \u4f20\u5165\u7528\u4e8e\u5b58\u50a8 Variable \u7684 scope \uff0c\u9700\u8981\u4f20\u5165 program \u6240\u4f7f\u7528\u7684 scope \uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\uff0c\u662f fluid.global_scope() \u3002\u8bbe\u7f6e\u4e3a None \u65f6\u5c06\u4f7f\u7528 fluid.global_scope() \uff0c\u9ed8\u8ba4\u503c\u4e3a None \u3002 for_test(bool) - \u5982\u679c program \u53c2\u6570\u662f\u4e00\u4e2a\u6d4b\u8bd5 program \uff0c for_test \u5e94\u8bbe\u4e3a True \uff0c\u5426\u5219\u8bbe\u4e3a False \u3002 \u8fd4\u56de \u542b\u6709\u91cf\u5316\u548c\u53cd\u91cf\u5316 operator \u7684 program \u8fd4\u56de\u7c7b\u578b \u5f53 for_test=False \uff0c\u8fd4\u56de\u7c7b\u578b\u4e3a fluid.CompiledProgram \uff0c \u6ce8\u610f\uff0c\u6b64\u8fd4\u56de\u503c\u4e0d\u80fd\u7528\u4e8e\u4fdd\u5b58\u53c2\u6570 \u3002 \u5f53 for_test=True \uff0c\u8fd4\u56de\u7c7b\u578b\u4e3a fluid.Program \u3002 \u6ce8\u610f\u4e8b\u9879 \u6b64\u63a5\u53e3\u4f1a\u6539\u53d8 program \u7ed3\u6784\uff0c\u5e76\u4e14\u53ef\u80fd\u589e\u52a0\u4e00\u4e9b persistable \u7684\u53d8\u91cf\uff0c\u6240\u4ee5\u52a0\u8f7d\u6a21\u578b\u53c2\u6570\u65f6\u8bf7\u6ce8\u610f\u548c\u76f8\u5e94\u7684 program \u5bf9\u5e94\u3002 \u6b64\u63a5\u53e3\u5e95\u5c42\u7ecf\u5386\u4e86 fluid.Program -> fluid.framework.IrGraph -> fluid.Program \u7684\u8f6c\u53d8\uff0c\u5728 fluid.framework.IrGraph \u4e2d\u6ca1\u6709 Parameter \u7684\u6982\u5ff5\uff0c Variable \u53ea\u6709 persistable \u548c not persistable \u7684\u533a\u522b\uff0c\u6240\u4ee5\u5728\u4fdd\u5b58\u548c\u52a0\u8f7d\u53c2\u6570\u65f6\uff0c\u8bf7\u4f7f\u7528 fluid.io.save_persistables \u548c fluid.io.load_persistables \u63a5\u53e3\u3002 \u7531\u4e8e\u6b64\u63a5\u53e3\u4f1a\u6839\u636e program \u7684\u7ed3\u6784\u548c\u91cf\u5316\u914d\u7f6e\u6765\u5bf9 program \u6dfb\u52a0op\uff0c\u6240\u4ee5 Paddle \u4e2d\u4e00\u4e9b\u901a\u8fc7 fuse op \u6765\u52a0\u901f\u8bad\u7ec3\u7684\u7b56\u7565\u4e0d\u80fd\u4f7f\u7528\u3002\u5df2\u77e5\u4ee5\u4e0b\u7b56\u7565\u5728\u4f7f\u7528\u91cf\u5316\u65f6\u5fc5\u987b\u8bbe\u4e3a False \uff1a fuse_all_reduce_ops, sync_batch_norm \u3002 \u5982\u679c\u4f20\u5165\u7684 program \u4e2d\u5b58\u5728\u548c\u4efb\u4f55op\u90fd\u6ca1\u6709\u8fde\u63a5\u7684 Variable \uff0c\u5219\u4f1a\u5728\u91cf\u5316\u7684\u8fc7\u7a0b\u4e2d\u88ab\u4f18\u5316\u6389\u3002 convert # paddleslim.quant.convert(program, place, config, scope=None, save_int8=False) [\u6e90\u4ee3\u7801] \u628a\u8bad\u7ec3\u597d\u7684\u91cf\u5316 program \uff0c\u8f6c\u6362\u4e3a\u53ef\u7528\u4e8e\u4fdd\u5b58 inference model \u7684 program \u3002 \u53c2\u6570\uff1a program (fluid.Program) - \u4f20\u5165\u6d4b\u8bd5 program \u3002 place(fluid.CPUPlace | fluid.CUDAPlace) - \u8be5\u53c2\u6570\u8868\u793a Executor \u6267\u884c\u6240\u5728\u7684\u8bbe\u5907\u3002 config(dict) - \u91cf\u5316\u914d\u7f6e\u8868\u3002 scope(fluid.Scope) - \u4f20\u5165\u7528\u4e8e\u5b58\u50a8 Variable \u7684 scope \uff0c\u9700\u8981\u4f20\u5165 program \u6240\u4f7f\u7528\u7684 scope \uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\uff0c\u662f fluid.global_scope() \u3002\u8bbe\u7f6e\u4e3a None \u65f6\u5c06\u4f7f\u7528 fluid.global_scope() \uff0c\u9ed8\u8ba4\u503c\u4e3a None \u3002 save_int8\uff08bool) - \u662f\u5426\u9700\u8981\u8fd4\u56de\u53c2\u6570\u4e3a int8 \u7684 program \u3002\u8be5\u529f\u80fd\u76ee\u524d\u53ea\u80fd\u7528\u4e8e\u786e\u8ba4\u6a21\u578b\u5927\u5c0f\u3002\u9ed8\u8ba4\u503c\u4e3a False \u3002 \u8fd4\u56de program (fluid.Program) - freezed program\uff0c\u53ef\u7528\u4e8e\u4fdd\u5b58inference model\uff0c\u53c2\u6570\u4e3a float32 \u7c7b\u578b\uff0c\u4f46\u5176\u6570\u503c\u8303\u56f4\u53ef\u7528int8\u8868\u793a\u3002 int8_program (fluid.Program) - freezed program\uff0c\u53ef\u7528\u4e8e\u4fdd\u5b58inference model\uff0c\u53c2\u6570\u4e3a int8 \u7c7b\u578b\u3002\u5f53 save_int8 \u4e3a False \u65f6\uff0c\u4e0d\u8fd4\u56de\u8be5\u503c\u3002 \u6ce8\u610f\u4e8b\u9879 \u56e0\u4e3a\u8be5\u63a5\u53e3\u4f1a\u5bf9 op \u548c Variable \u505a\u76f8\u5e94\u7684\u5220\u9664\u548c\u4fee\u6539\uff0c\u6240\u4ee5\u6b64\u63a5\u53e3\u53ea\u80fd\u5728\u8bad\u7ec3\u5b8c\u6210\u4e4b\u540e\u8c03\u7528\u3002\u5982\u679c\u60f3\u8f6c\u5316\u8bad\u7ec3\u7684\u4e2d\u95f4\u6a21\u578b\uff0c\u53ef\u52a0\u8f7d\u76f8\u5e94\u7684\u53c2\u6570\u4e4b\u540e\u518d\u4f7f\u7528\u6b64\u63a5\u53e3\u3002 \u4ee3\u7801\u793a\u4f8b 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 #encoding=utf8 import paddle.fluid as fluid import paddleslim.quant as quant train_program = fluid . Program () with fluid . program_guard ( train_program ): image = fluid . data ( name = 'x' , shape = [ None , 1 , 28 , 28 ]) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) conv = fluid . layers . conv2d ( image , 32 , 1 ) feat = fluid . layers . fc ( conv , 10 , act = 'softmax' ) cost = fluid . layers . cross_entropy ( input = feat , label = label ) avg_cost = fluid . layers . mean ( x = cost ) use_gpu = True place = fluid . CUDAPlace ( 0 ) if use_gpu else fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( fluid . default_startup_program ()) eval_program = train_program . clone ( for_test = True ) #\u914d\u7f6e config = { 'weight_quantize_type' : 'abs_max' , 'activation_quantize_type' : 'moving_average_abs_max' } build_strategy = fluid . BuildStrategy () exec_strategy = fluid . ExecutionStrategy () #\u8c03\u7528api quant_train_program = quant . quant_aware ( train_program , place , config , for_test = False ) quant_eval_program = quant . quant_aware ( eval_program , place , config , for_test = True ) #\u5173\u95ed\u7b56\u7565 build_strategy . fuse_all_reduce_ops = False build_strategy . sync_batch_norm = False quant_train_program = quant_train_program . with_data_parallel ( loss_name = avg_cost . name , build_strategy = build_strategy , exec_strategy = exec_strategy ) inference_prog = quant . convert ( quant_eval_program , place , config ) \u66f4\u8be6\u7ec6\u7684\u7528\u6cd5\u8bf7\u53c2\u8003 \u91cf\u5316\u8bad\u7ec3demo \u3002 quant_post # paddleslim.quant.quant_post(executor, model_dir, quantize_model_path,sample_generator, model_filename=None, params_filename=None, batch_size=16,batch_nums=None, scope=None, algo='KL', quantizable_op_type=[\"conv2d\", \"depthwise_conv2d\", \"mul\"]) [\u6e90\u4ee3\u7801] \u5bf9\u4fdd\u5b58\u5728 ${model_dir} \u4e0b\u7684\u6a21\u578b\u8fdb\u884c\u91cf\u5316\uff0c\u4f7f\u7528 sample_generator \u7684\u6570\u636e\u8fdb\u884c\u53c2\u6570\u6821\u6b63\u3002 \u53c2\u6570: executor (fluid.Executor) - \u6267\u884c\u6a21\u578b\u7684executor\uff0c\u53ef\u4ee5\u5728cpu\u6216\u8005gpu\u4e0a\u6267\u884c\u3002 model_dir\uff08str) - \u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u6240\u5728\u7684\u6587\u4ef6\u5939\u3002 quantize_model_path(str) - \u4fdd\u5b58\u91cf\u5316\u540e\u7684\u6a21\u578b\u7684\u8def\u5f84 sample_generator(python generator) - \u8bfb\u53d6\u6570\u636e\u6837\u672c\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u6837\u672c\u3002 model_filename(str, optional) - \u6a21\u578b\u6587\u4ef6\u540d\uff0c\u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u9700\u8981\u8bbe\u7f6e model_filename \u4e3a\u6a21\u578b\u6587\u4ef6\u7684\u540d\u79f0\uff0c\u5426\u5219\u8bbe\u7f6e\u4e3a None \u5373\u53ef\u3002\u9ed8\u8ba4\u503c\u662f None \u3002 params_filename(str) - \u53c2\u6570\u6587\u4ef6\u540d\uff0c\u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u9700\u8981\u8bbe\u7f6e params_filename \u4e3a\u53c2\u6570\u6587\u4ef6\u7684\u540d\u79f0\uff0c\u5426\u5219\u8bbe\u7f6e\u4e3a None \u5373\u53ef\u3002\u9ed8\u8ba4\u503c\u662f None \u3002 batch_size(int) - \u6bcf\u4e2abatch\u7684\u56fe\u7247\u6570\u91cf\u3002\u9ed8\u8ba4\u503c\u4e3a16 \u3002 batch_nums(int, optional) - \u8fed\u4ee3\u6b21\u6570\u3002\u5982\u679c\u8bbe\u7f6e\u4e3a None \uff0c\u5219\u4f1a\u4e00\u76f4\u8fd0\u884c\u5230 sample_generator \u8fed\u4ee3\u7ed3\u675f\uff0c \u5426\u5219\uff0c\u8fed\u4ee3\u6b21\u6570\u4e3a batch_nums , \u4e5f\u5c31\u662f\u8bf4\u53c2\u4e0e\u5bf9 Scale \u8fdb\u884c\u6821\u6b63\u7684\u6837\u672c\u4e2a\u6570\u4e3a 'batch_nums' * 'batch_size' . scope(fluid.Scope, optional) - \u7528\u6765\u83b7\u53d6\u548c\u5199\u5165 Variable , \u5982\u679c\u8bbe\u7f6e\u4e3a None ,\u5219\u4f7f\u7528 fluid.global_scope() . \u9ed8\u8ba4\u503c\u662f None . algo(str) - \u91cf\u5316\u65f6\u4f7f\u7528\u7684\u7b97\u6cd5\u540d\u79f0\uff0c\u53ef\u4e3a 'KL' \u6216\u8005 'direct' \u3002\u8be5\u53c2\u6570\u4ec5\u9488\u5bf9\u6fc0\u6d3b\u503c\u7684\u91cf\u5316\uff0c\u56e0\u4e3a\u53c2\u6570\u503c\u7684\u91cf\u5316\u4f7f\u7528\u7684\u65b9\u5f0f\u4e3a 'channel_wise_abs_max' . \u5f53 algo \u8bbe\u7f6e\u4e3a 'direct' \u65f6\uff0c\u4f7f\u7528\u6821\u6b63\u6570\u636e\u7684\u6fc0\u6d3b\u503c\u7684\u7edd\u5bf9\u503c\u7684\u6700\u5927\u503c\u5f53\u4f5c Scale \u503c\uff0c\u5f53\u8bbe\u7f6e\u4e3a 'KL' \u65f6\uff0c\u5219\u4f7f\u7528 KL \u6563\u5ea6\u7684\u65b9\u6cd5\u6765\u8ba1\u7b97 Scale \u503c\u3002\u9ed8\u8ba4\u503c\u4e3a 'KL' \u3002 quantizable_op_type(list[str]) - \u9700\u8981\u91cf\u5316\u7684 op \u7c7b\u578b\u5217\u8868\u3002\u9ed8\u8ba4\u503c\u4e3a [\"conv2d\", \"depthwise_conv2d\", \"mul\"] \u3002 \u8fd4\u56de \u65e0\u3002 \u6ce8\u610f\u4e8b\u9879 \u56e0\u4e3a\u8be5\u63a5\u53e3\u4f1a\u6536\u96c6\u6821\u6b63\u6570\u636e\u7684\u6240\u6709\u7684\u6fc0\u6d3b\u503c\uff0c\u6240\u4ee5\u4f7f\u7528\u7684\u6821\u6b63\u56fe\u7247\u4e0d\u80fd\u592a\u591a\u3002 'KL' \u6563\u5ea6\u7684\u8ba1\u7b97\u4e5f\u6bd4\u8f83\u8017\u65f6\u3002 \u4ee3\u7801\u793a\u4f8b \u6ce8\uff1a \u6b64\u793a\u4f8b\u4e0d\u80fd\u76f4\u63a5\u8fd0\u884c\uff0c\u56e0\u4e3a\u9700\u8981\u52a0\u8f7d ${model_dir} \u4e0b\u7684\u6a21\u578b\uff0c\u6240\u4ee5\u4e0d\u80fd\u76f4\u63a5\u8fd0\u884c\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 import paddle.fluid as fluid import paddle.dataset.mnist as reader from paddleslim.quant import quant_post val_reader = reader . train () use_gpu = True place = fluid . CUDAPlace ( 0 ) if use_gpu else fluid . CPUPlace () exe = fluid . Executor ( place ) quant_post ( executor = exe , model_dir = './model_path' , quantize_model_path = './save_path' , sample_generator = val_reader , model_filename = '__model__' , params_filename = '__params__' , batch_size = 16 , batch_nums = 10 ) \u66f4\u8be6\u7ec6\u7684\u7528\u6cd5\u8bf7\u53c2\u8003 \u79bb\u7ebf\u91cf\u5316demo \u3002 quant_embedding # paddleslim.quant.quant_embedding(program, place, config, scope=None) [\u6e90\u4ee3\u7801] \u5bf9 Embedding \u53c2\u6570\u8fdb\u884c\u91cf\u5316\u3002 \u53c2\u6570: program(fluid.Program) - \u9700\u8981\u91cf\u5316\u7684program scope(fluid.Scope, optional) - \u7528\u6765\u83b7\u53d6\u548c\u5199\u5165 Variable , \u5982\u679c\u8bbe\u7f6e\u4e3a None ,\u5219\u4f7f\u7528 fluid.global_scope() . place(fluid.CPUPlace | fluid.CUDAPlace) - \u8fd0\u884cprogram\u7684\u8bbe\u5907 config(dict) - \u5b9a\u4e49\u91cf\u5316\u7684\u914d\u7f6e\u3002\u53ef\u4ee5\u914d\u7f6e\u7684\u53c2\u6570\u6709\uff1a 'params_name' (str, required): \u9700\u8981\u8fdb\u884c\u91cf\u5316\u7684\u53c2\u6570\u540d\u79f0\uff0c\u6b64\u53c2\u6570\u5fc5\u987b\u8bbe\u7f6e\u3002 'quantize_type' (str, optional): \u91cf\u5316\u7684\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301\u7684\u7c7b\u578b\u662f 'abs_max' , \u5f85\u652f\u6301\u7684\u7c7b\u578b\u6709 'log', 'product_quantization' \u3002 \u9ed8\u8ba4\u503c\u662f 'abs_max' . 'quantize_bits' \uff08int, optional): \u91cf\u5316\u7684 bit \u6570\uff0c\u76ee\u524d\u652f\u6301\u7684 bit \u6570\u4e3a8\u3002\u9ed8\u8ba4\u503c\u662f8. 'dtype' (str, optional): \u91cf\u5316\u4e4b\u540e\u7684\u6570\u636e\u7c7b\u578b\uff0c \u76ee\u524d\u652f\u6301\u7684\u662f 'int8' . \u9ed8\u8ba4\u503c\u662f int8 \u3002 'threshold' (float, optional): \u91cf\u5316\u4e4b\u524d\u5c06\u6839\u636e\u6b64\u9608\u503c\u5bf9\u9700\u8981\u91cf\u5316\u7684\u53c2\u6570\u503c\u8fdb\u884c clip . \u5982\u679c\u4e0d\u8bbe\u7f6e\uff0c\u5219\u8df3\u8fc7 clip \u8fc7\u7a0b\u76f4\u63a5\u91cf\u5316\u3002 \u8fd4\u56de \u91cf\u5316\u4e4b\u540e\u7684program \u8fd4\u56de\u7c7b\u578b fluid.Program \u4ee3\u7801\u793a\u4f8b 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 import paddle.fluid as fluid import paddleslim.quant as quant train_program = fluid . Program () with fluid . program_guard ( train_program ): input_word = fluid . data ( name = \"input_word\" , shape = [ None , 1 ], dtype = 'int64' ) input_emb = fluid . embedding ( input = input_word , is_sparse = False , size = [ 100 , 128 ], param_attr = fluid . ParamAttr ( name = 'emb' , initializer = fluid . initializer . Uniform ( - 0.005 , 0.005 ))) infer_program = train_program . clone ( for_test = True ) use_gpu = True place = fluid . CUDAPlace ( 0 ) if use_gpu else fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( fluid . default_startup_program ()) config = { 'params_name' : 'emb' , 'quantize_type' : 'abs_max' } quant_program = quant . quant_embedding ( infer_program , place , config ) \u66f4\u8be6\u7ec6\u7684\u7528\u6cd5\u8bf7\u53c2\u8003 Embedding\u91cf\u5316demo \u3002","title":"\u91cf\u5316"},{"location":"api/quantization_api/#_1","text":"\u901a\u8fc7\u5b57\u5178\u914d\u7f6e\u91cf\u5316\u53c2\u6570 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 quant_config_default = { ' weight_quantize_type ' : ' abs_max ' , ' activation_quantize_type ' : ' abs_max ' , ' weight_bits ' : 8 , ' activation_bits ' : 8 , # ops of name_scope in not_quant_pattern list , will not be quantized ' not_quant_pattern ' : [ ' skip_quant ' ], # ops of type in quantize_op_types , will be quantized ' quantize_op_types ' : [ ' conv2d ' , ' depthwise_conv2d ' , ' mul ' , ' elementwise_add ' , ' pool2d ' ], # data type after quantization , such as ' uint8 ' , ' int8 ' , etc . default is ' int8 ' ' dtype ' : ' int8 ' , # window size for ' range_abs_max ' quantization . defaulf is 10000 ' window_size ' : 10000 , # The decay coefficient of moving average , default is 0 . 9 ' moving_rate ' : 0 . 9 , } \u53c2\u6570\uff1a weight_quantize_type(str) - \u53c2\u6570\u91cf\u5316\u65b9\u5f0f\u3002\u53ef\u9009 'abs_max' , 'channel_wise_abs_max' , 'range_abs_max' , 'moving_average_abs_max' \u3002 \u9ed8\u8ba4 'abs_max' \u3002 activation_quantize_type(str) - \u6fc0\u6d3b\u91cf\u5316\u65b9\u5f0f\uff0c\u53ef\u9009 'abs_max' , 'range_abs_max' , 'moving_average_abs_max' \uff0c\u9ed8\u8ba4 'abs_max' \u3002 weight_bits(int) - \u53c2\u6570\u91cf\u5316bit\u6570\uff0c\u9ed8\u8ba48, \u63a8\u8350\u8bbe\u4e3a8\u3002 activation_bits(int) - \u6fc0\u6d3b\u91cf\u5316bit\u6570\uff0c\u9ed8\u8ba48\uff0c \u63a8\u8350\u8bbe\u4e3a8\u3002 not_quant_pattern(str | list[str]) - \u6240\u6709 name_scope \u5305\u542b 'not_quant_pattern' \u5b57\u7b26\u4e32\u7684 op \uff0c\u90fd\u4e0d\u91cf\u5316, \u8bbe\u7f6e\u65b9\u5f0f\u8bf7\u53c2\u8003 fluid.name_scope \u3002 quantize_op_types(list[str]) - \u9700\u8981\u8fdb\u884c\u91cf\u5316\u7684 op \u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 'conv2d', 'depthwise_conv2d', 'mul' \u3002 dtype(int8) - \u91cf\u5316\u540e\u7684\u53c2\u6570\u7c7b\u578b\uff0c\u9ed8\u8ba4 int8 , \u76ee\u524d\u4ec5\u652f\u6301 int8 \u3002 window_size(int) - 'range_abs_max' \u91cf\u5316\u65b9\u5f0f\u7684 window size \uff0c\u9ed8\u8ba410000\u3002 moving_rate(int) - 'moving_average_abs_max' \u91cf\u5316\u65b9\u5f0f\u7684\u8870\u51cf\u7cfb\u6570\uff0c\u9ed8\u8ba4 0.9\u3002","title":"\u91cf\u5316\u914d\u7f6e"},{"location":"api/quantization_api/#quant_aware","text":"paddleslim.quant.quant_aware(program, place, config, scope=None, for_test=False) [\u6e90\u4ee3\u7801] \u5728 program \u4e2d\u52a0\u5165\u91cf\u5316\u548c\u53cd\u91cf\u5316 op , \u7528\u4e8e\u91cf\u5316\u8bad\u7ec3\u3002 \u53c2\u6570\uff1a program (fluid.Program) - \u4f20\u5165\u8bad\u7ec3\u6216\u6d4b\u8bd5 program \u3002 place(fluid.CPUPlace | fluid.CUDAPlace) - \u8be5\u53c2\u6570\u8868\u793a Executor \u6267\u884c\u6240\u5728\u7684\u8bbe\u5907\u3002 config(dict) - \u91cf\u5316\u914d\u7f6e\u8868\u3002 scope(fluid.Scope, optional) - \u4f20\u5165\u7528\u4e8e\u5b58\u50a8 Variable \u7684 scope \uff0c\u9700\u8981\u4f20\u5165 program \u6240\u4f7f\u7528\u7684 scope \uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\uff0c\u662f fluid.global_scope() \u3002\u8bbe\u7f6e\u4e3a None \u65f6\u5c06\u4f7f\u7528 fluid.global_scope() \uff0c\u9ed8\u8ba4\u503c\u4e3a None \u3002 for_test(bool) - \u5982\u679c program \u53c2\u6570\u662f\u4e00\u4e2a\u6d4b\u8bd5 program \uff0c for_test \u5e94\u8bbe\u4e3a True \uff0c\u5426\u5219\u8bbe\u4e3a False \u3002 \u8fd4\u56de \u542b\u6709\u91cf\u5316\u548c\u53cd\u91cf\u5316 operator \u7684 program \u8fd4\u56de\u7c7b\u578b \u5f53 for_test=False \uff0c\u8fd4\u56de\u7c7b\u578b\u4e3a fluid.CompiledProgram \uff0c \u6ce8\u610f\uff0c\u6b64\u8fd4\u56de\u503c\u4e0d\u80fd\u7528\u4e8e\u4fdd\u5b58\u53c2\u6570 \u3002 \u5f53 for_test=True \uff0c\u8fd4\u56de\u7c7b\u578b\u4e3a fluid.Program \u3002 \u6ce8\u610f\u4e8b\u9879 \u6b64\u63a5\u53e3\u4f1a\u6539\u53d8 program \u7ed3\u6784\uff0c\u5e76\u4e14\u53ef\u80fd\u589e\u52a0\u4e00\u4e9b persistable \u7684\u53d8\u91cf\uff0c\u6240\u4ee5\u52a0\u8f7d\u6a21\u578b\u53c2\u6570\u65f6\u8bf7\u6ce8\u610f\u548c\u76f8\u5e94\u7684 program \u5bf9\u5e94\u3002 \u6b64\u63a5\u53e3\u5e95\u5c42\u7ecf\u5386\u4e86 fluid.Program -> fluid.framework.IrGraph -> fluid.Program \u7684\u8f6c\u53d8\uff0c\u5728 fluid.framework.IrGraph \u4e2d\u6ca1\u6709 Parameter \u7684\u6982\u5ff5\uff0c Variable \u53ea\u6709 persistable \u548c not persistable \u7684\u533a\u522b\uff0c\u6240\u4ee5\u5728\u4fdd\u5b58\u548c\u52a0\u8f7d\u53c2\u6570\u65f6\uff0c\u8bf7\u4f7f\u7528 fluid.io.save_persistables \u548c fluid.io.load_persistables \u63a5\u53e3\u3002 \u7531\u4e8e\u6b64\u63a5\u53e3\u4f1a\u6839\u636e program \u7684\u7ed3\u6784\u548c\u91cf\u5316\u914d\u7f6e\u6765\u5bf9 program \u6dfb\u52a0op\uff0c\u6240\u4ee5 Paddle \u4e2d\u4e00\u4e9b\u901a\u8fc7 fuse op \u6765\u52a0\u901f\u8bad\u7ec3\u7684\u7b56\u7565\u4e0d\u80fd\u4f7f\u7528\u3002\u5df2\u77e5\u4ee5\u4e0b\u7b56\u7565\u5728\u4f7f\u7528\u91cf\u5316\u65f6\u5fc5\u987b\u8bbe\u4e3a False \uff1a fuse_all_reduce_ops, sync_batch_norm \u3002 \u5982\u679c\u4f20\u5165\u7684 program \u4e2d\u5b58\u5728\u548c\u4efb\u4f55op\u90fd\u6ca1\u6709\u8fde\u63a5\u7684 Variable \uff0c\u5219\u4f1a\u5728\u91cf\u5316\u7684\u8fc7\u7a0b\u4e2d\u88ab\u4f18\u5316\u6389\u3002","title":"quant_aware"},{"location":"api/quantization_api/#convert","text":"paddleslim.quant.convert(program, place, config, scope=None, save_int8=False) [\u6e90\u4ee3\u7801] \u628a\u8bad\u7ec3\u597d\u7684\u91cf\u5316 program \uff0c\u8f6c\u6362\u4e3a\u53ef\u7528\u4e8e\u4fdd\u5b58 inference model \u7684 program \u3002 \u53c2\u6570\uff1a program (fluid.Program) - \u4f20\u5165\u6d4b\u8bd5 program \u3002 place(fluid.CPUPlace | fluid.CUDAPlace) - \u8be5\u53c2\u6570\u8868\u793a Executor \u6267\u884c\u6240\u5728\u7684\u8bbe\u5907\u3002 config(dict) - \u91cf\u5316\u914d\u7f6e\u8868\u3002 scope(fluid.Scope) - \u4f20\u5165\u7528\u4e8e\u5b58\u50a8 Variable \u7684 scope \uff0c\u9700\u8981\u4f20\u5165 program \u6240\u4f7f\u7528\u7684 scope \uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\uff0c\u662f fluid.global_scope() \u3002\u8bbe\u7f6e\u4e3a None \u65f6\u5c06\u4f7f\u7528 fluid.global_scope() \uff0c\u9ed8\u8ba4\u503c\u4e3a None \u3002 save_int8\uff08bool) - \u662f\u5426\u9700\u8981\u8fd4\u56de\u53c2\u6570\u4e3a int8 \u7684 program \u3002\u8be5\u529f\u80fd\u76ee\u524d\u53ea\u80fd\u7528\u4e8e\u786e\u8ba4\u6a21\u578b\u5927\u5c0f\u3002\u9ed8\u8ba4\u503c\u4e3a False \u3002 \u8fd4\u56de program (fluid.Program) - freezed program\uff0c\u53ef\u7528\u4e8e\u4fdd\u5b58inference model\uff0c\u53c2\u6570\u4e3a float32 \u7c7b\u578b\uff0c\u4f46\u5176\u6570\u503c\u8303\u56f4\u53ef\u7528int8\u8868\u793a\u3002 int8_program (fluid.Program) - freezed program\uff0c\u53ef\u7528\u4e8e\u4fdd\u5b58inference model\uff0c\u53c2\u6570\u4e3a int8 \u7c7b\u578b\u3002\u5f53 save_int8 \u4e3a False \u65f6\uff0c\u4e0d\u8fd4\u56de\u8be5\u503c\u3002 \u6ce8\u610f\u4e8b\u9879 \u56e0\u4e3a\u8be5\u63a5\u53e3\u4f1a\u5bf9 op \u548c Variable \u505a\u76f8\u5e94\u7684\u5220\u9664\u548c\u4fee\u6539\uff0c\u6240\u4ee5\u6b64\u63a5\u53e3\u53ea\u80fd\u5728\u8bad\u7ec3\u5b8c\u6210\u4e4b\u540e\u8c03\u7528\u3002\u5982\u679c\u60f3\u8f6c\u5316\u8bad\u7ec3\u7684\u4e2d\u95f4\u6a21\u578b\uff0c\u53ef\u52a0\u8f7d\u76f8\u5e94\u7684\u53c2\u6570\u4e4b\u540e\u518d\u4f7f\u7528\u6b64\u63a5\u53e3\u3002 \u4ee3\u7801\u793a\u4f8b 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 #encoding=utf8 import paddle.fluid as fluid import paddleslim.quant as quant train_program = fluid . Program () with fluid . program_guard ( train_program ): image = fluid . data ( name = 'x' , shape = [ None , 1 , 28 , 28 ]) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) conv = fluid . layers . conv2d ( image , 32 , 1 ) feat = fluid . layers . fc ( conv , 10 , act = 'softmax' ) cost = fluid . layers . cross_entropy ( input = feat , label = label ) avg_cost = fluid . layers . mean ( x = cost ) use_gpu = True place = fluid . CUDAPlace ( 0 ) if use_gpu else fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( fluid . default_startup_program ()) eval_program = train_program . clone ( for_test = True ) #\u914d\u7f6e config = { 'weight_quantize_type' : 'abs_max' , 'activation_quantize_type' : 'moving_average_abs_max' } build_strategy = fluid . BuildStrategy () exec_strategy = fluid . ExecutionStrategy () #\u8c03\u7528api quant_train_program = quant . quant_aware ( train_program , place , config , for_test = False ) quant_eval_program = quant . quant_aware ( eval_program , place , config , for_test = True ) #\u5173\u95ed\u7b56\u7565 build_strategy . fuse_all_reduce_ops = False build_strategy . sync_batch_norm = False quant_train_program = quant_train_program . with_data_parallel ( loss_name = avg_cost . name , build_strategy = build_strategy , exec_strategy = exec_strategy ) inference_prog = quant . convert ( quant_eval_program , place , config ) \u66f4\u8be6\u7ec6\u7684\u7528\u6cd5\u8bf7\u53c2\u8003 \u91cf\u5316\u8bad\u7ec3demo \u3002","title":"convert"},{"location":"api/quantization_api/#quant_post","text":"paddleslim.quant.quant_post(executor, model_dir, quantize_model_path,sample_generator, model_filename=None, params_filename=None, batch_size=16,batch_nums=None, scope=None, algo='KL', quantizable_op_type=[\"conv2d\", \"depthwise_conv2d\", \"mul\"]) [\u6e90\u4ee3\u7801] \u5bf9\u4fdd\u5b58\u5728 ${model_dir} \u4e0b\u7684\u6a21\u578b\u8fdb\u884c\u91cf\u5316\uff0c\u4f7f\u7528 sample_generator \u7684\u6570\u636e\u8fdb\u884c\u53c2\u6570\u6821\u6b63\u3002 \u53c2\u6570: executor (fluid.Executor) - \u6267\u884c\u6a21\u578b\u7684executor\uff0c\u53ef\u4ee5\u5728cpu\u6216\u8005gpu\u4e0a\u6267\u884c\u3002 model_dir\uff08str) - \u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u6240\u5728\u7684\u6587\u4ef6\u5939\u3002 quantize_model_path(str) - \u4fdd\u5b58\u91cf\u5316\u540e\u7684\u6a21\u578b\u7684\u8def\u5f84 sample_generator(python generator) - \u8bfb\u53d6\u6570\u636e\u6837\u672c\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u6837\u672c\u3002 model_filename(str, optional) - \u6a21\u578b\u6587\u4ef6\u540d\uff0c\u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u9700\u8981\u8bbe\u7f6e model_filename \u4e3a\u6a21\u578b\u6587\u4ef6\u7684\u540d\u79f0\uff0c\u5426\u5219\u8bbe\u7f6e\u4e3a None \u5373\u53ef\u3002\u9ed8\u8ba4\u503c\u662f None \u3002 params_filename(str) - \u53c2\u6570\u6587\u4ef6\u540d\uff0c\u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u9700\u8981\u8bbe\u7f6e params_filename \u4e3a\u53c2\u6570\u6587\u4ef6\u7684\u540d\u79f0\uff0c\u5426\u5219\u8bbe\u7f6e\u4e3a None \u5373\u53ef\u3002\u9ed8\u8ba4\u503c\u662f None \u3002 batch_size(int) - \u6bcf\u4e2abatch\u7684\u56fe\u7247\u6570\u91cf\u3002\u9ed8\u8ba4\u503c\u4e3a16 \u3002 batch_nums(int, optional) - \u8fed\u4ee3\u6b21\u6570\u3002\u5982\u679c\u8bbe\u7f6e\u4e3a None \uff0c\u5219\u4f1a\u4e00\u76f4\u8fd0\u884c\u5230 sample_generator \u8fed\u4ee3\u7ed3\u675f\uff0c \u5426\u5219\uff0c\u8fed\u4ee3\u6b21\u6570\u4e3a batch_nums , \u4e5f\u5c31\u662f\u8bf4\u53c2\u4e0e\u5bf9 Scale \u8fdb\u884c\u6821\u6b63\u7684\u6837\u672c\u4e2a\u6570\u4e3a 'batch_nums' * 'batch_size' . scope(fluid.Scope, optional) - \u7528\u6765\u83b7\u53d6\u548c\u5199\u5165 Variable , \u5982\u679c\u8bbe\u7f6e\u4e3a None ,\u5219\u4f7f\u7528 fluid.global_scope() . \u9ed8\u8ba4\u503c\u662f None . algo(str) - \u91cf\u5316\u65f6\u4f7f\u7528\u7684\u7b97\u6cd5\u540d\u79f0\uff0c\u53ef\u4e3a 'KL' \u6216\u8005 'direct' \u3002\u8be5\u53c2\u6570\u4ec5\u9488\u5bf9\u6fc0\u6d3b\u503c\u7684\u91cf\u5316\uff0c\u56e0\u4e3a\u53c2\u6570\u503c\u7684\u91cf\u5316\u4f7f\u7528\u7684\u65b9\u5f0f\u4e3a 'channel_wise_abs_max' . \u5f53 algo \u8bbe\u7f6e\u4e3a 'direct' \u65f6\uff0c\u4f7f\u7528\u6821\u6b63\u6570\u636e\u7684\u6fc0\u6d3b\u503c\u7684\u7edd\u5bf9\u503c\u7684\u6700\u5927\u503c\u5f53\u4f5c Scale \u503c\uff0c\u5f53\u8bbe\u7f6e\u4e3a 'KL' \u65f6\uff0c\u5219\u4f7f\u7528 KL \u6563\u5ea6\u7684\u65b9\u6cd5\u6765\u8ba1\u7b97 Scale \u503c\u3002\u9ed8\u8ba4\u503c\u4e3a 'KL' \u3002 quantizable_op_type(list[str]) - \u9700\u8981\u91cf\u5316\u7684 op \u7c7b\u578b\u5217\u8868\u3002\u9ed8\u8ba4\u503c\u4e3a [\"conv2d\", \"depthwise_conv2d\", \"mul\"] \u3002 \u8fd4\u56de \u65e0\u3002 \u6ce8\u610f\u4e8b\u9879 \u56e0\u4e3a\u8be5\u63a5\u53e3\u4f1a\u6536\u96c6\u6821\u6b63\u6570\u636e\u7684\u6240\u6709\u7684\u6fc0\u6d3b\u503c\uff0c\u6240\u4ee5\u4f7f\u7528\u7684\u6821\u6b63\u56fe\u7247\u4e0d\u80fd\u592a\u591a\u3002 'KL' \u6563\u5ea6\u7684\u8ba1\u7b97\u4e5f\u6bd4\u8f83\u8017\u65f6\u3002 \u4ee3\u7801\u793a\u4f8b \u6ce8\uff1a \u6b64\u793a\u4f8b\u4e0d\u80fd\u76f4\u63a5\u8fd0\u884c\uff0c\u56e0\u4e3a\u9700\u8981\u52a0\u8f7d ${model_dir} \u4e0b\u7684\u6a21\u578b\uff0c\u6240\u4ee5\u4e0d\u80fd\u76f4\u63a5\u8fd0\u884c\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 import paddle.fluid as fluid import paddle.dataset.mnist as reader from paddleslim.quant import quant_post val_reader = reader . train () use_gpu = True place = fluid . CUDAPlace ( 0 ) if use_gpu else fluid . CPUPlace () exe = fluid . Executor ( place ) quant_post ( executor = exe , model_dir = './model_path' , quantize_model_path = './save_path' , sample_generator = val_reader , model_filename = '__model__' , params_filename = '__params__' , batch_size = 16 , batch_nums = 10 ) \u66f4\u8be6\u7ec6\u7684\u7528\u6cd5\u8bf7\u53c2\u8003 \u79bb\u7ebf\u91cf\u5316demo \u3002","title":"quant_post"},{"location":"api/quantization_api/#quant_embedding","text":"paddleslim.quant.quant_embedding(program, place, config, scope=None) [\u6e90\u4ee3\u7801] \u5bf9 Embedding \u53c2\u6570\u8fdb\u884c\u91cf\u5316\u3002 \u53c2\u6570: program(fluid.Program) - \u9700\u8981\u91cf\u5316\u7684program scope(fluid.Scope, optional) - \u7528\u6765\u83b7\u53d6\u548c\u5199\u5165 Variable , \u5982\u679c\u8bbe\u7f6e\u4e3a None ,\u5219\u4f7f\u7528 fluid.global_scope() . place(fluid.CPUPlace | fluid.CUDAPlace) - \u8fd0\u884cprogram\u7684\u8bbe\u5907 config(dict) - \u5b9a\u4e49\u91cf\u5316\u7684\u914d\u7f6e\u3002\u53ef\u4ee5\u914d\u7f6e\u7684\u53c2\u6570\u6709\uff1a 'params_name' (str, required): \u9700\u8981\u8fdb\u884c\u91cf\u5316\u7684\u53c2\u6570\u540d\u79f0\uff0c\u6b64\u53c2\u6570\u5fc5\u987b\u8bbe\u7f6e\u3002 'quantize_type' (str, optional): \u91cf\u5316\u7684\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301\u7684\u7c7b\u578b\u662f 'abs_max' , \u5f85\u652f\u6301\u7684\u7c7b\u578b\u6709 'log', 'product_quantization' \u3002 \u9ed8\u8ba4\u503c\u662f 'abs_max' . 'quantize_bits' \uff08int, optional): \u91cf\u5316\u7684 bit \u6570\uff0c\u76ee\u524d\u652f\u6301\u7684 bit \u6570\u4e3a8\u3002\u9ed8\u8ba4\u503c\u662f8. 'dtype' (str, optional): \u91cf\u5316\u4e4b\u540e\u7684\u6570\u636e\u7c7b\u578b\uff0c \u76ee\u524d\u652f\u6301\u7684\u662f 'int8' . \u9ed8\u8ba4\u503c\u662f int8 \u3002 'threshold' (float, optional): \u91cf\u5316\u4e4b\u524d\u5c06\u6839\u636e\u6b64\u9608\u503c\u5bf9\u9700\u8981\u91cf\u5316\u7684\u53c2\u6570\u503c\u8fdb\u884c clip . \u5982\u679c\u4e0d\u8bbe\u7f6e\uff0c\u5219\u8df3\u8fc7 clip \u8fc7\u7a0b\u76f4\u63a5\u91cf\u5316\u3002 \u8fd4\u56de \u91cf\u5316\u4e4b\u540e\u7684program \u8fd4\u56de\u7c7b\u578b fluid.Program \u4ee3\u7801\u793a\u4f8b 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 import paddle.fluid as fluid import paddleslim.quant as quant train_program = fluid . Program () with fluid . program_guard ( train_program ): input_word = fluid . data ( name = \"input_word\" , shape = [ None , 1 ], dtype = 'int64' ) input_emb = fluid . embedding ( input = input_word , is_sparse = False , size = [ 100 , 128 ], param_attr = fluid . ParamAttr ( name = 'emb' , initializer = fluid . initializer . Uniform ( - 0.005 , 0.005 ))) infer_program = train_program . clone ( for_test = True ) use_gpu = True place = fluid . CUDAPlace ( 0 ) if use_gpu else fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( fluid . default_startup_program ()) config = { 'params_name' : 'emb' , 'quantize_type' : 'abs_max' } quant_program = quant . quant_embedding ( infer_program , place , config ) \u66f4\u8be6\u7ec6\u7684\u7528\u6cd5\u8bf7\u53c2\u8003 Embedding\u91cf\u5316demo \u3002","title":"quant_embedding"},{"location":"api/search_space/","text":"paddleslim.nas \u63d0\u4f9b\u7684\u641c\u7d22\u7a7a\u95f4\uff1a # \u6839\u636e\u539f\u672c\u6a21\u578b\u7ed3\u6784\u6784\u9020\u641c\u7d22\u7a7a\u95f4\uff1a 1.1 MobileNetV2Space 1.2 MobileNetV1Space 1.3 ResNetSpace \u6839\u636e\u76f8\u5e94\u6a21\u578b\u7684block\u6784\u9020\u641c\u7d22\u7a7a\u95f4 2.1 MobileNetV1BlockSpace 2.2 MobileNetV2BlockSpace 2.3 ResNetBlockSpace 2.4 InceptionABlockSpace 2.5 InceptionCBlockSpace \u641c\u7d22\u7a7a\u95f4\u7684\u914d\u7f6e\u4ecb\u7ecd\uff1a # input_size(int|None) \uff1a input_size \u8868\u793a\u8f93\u5165feature map\u7684\u5927\u5c0f\u3002 output_size(int|None) \uff1a output_size \u8868\u793a\u8f93\u51fafeature map\u7684\u5927\u5c0f\u3002 block_num(int|None) \uff1a block_num \u8868\u793a\u641c\u7d22\u7a7a\u95f4\u4e2dblock\u7684\u6570\u91cf\u3002 block_mask(list|None) \uff1a block_mask \u8868\u793a\u5f53\u524d\u7684block\u662f\u4e00\u4e2areduction block\u8fd8\u662f\u4e00\u4e2anormal block\uff0c\u662f\u4e00\u7ec4\u75310\u30011\u7ec4\u6210\u7684\u5217\u8868\uff0c0\u8868\u793a\u5f53\u524dblock\u662fnormal block\uff0c1\u8868\u793a\u5f53\u524dblock\u662freduction block\u3002\u5982\u679c\u8bbe\u7f6e\u4e86 block_mask \uff0c\u5219\u4e3b\u8981\u4ee5 block_mask \u4e3a\u4e3b\u8981\u914d\u7f6e\uff0c input_size \uff0c output_size \u548c block_num \u4e09\u79cd\u914d\u7f6e\u662f\u65e0\u6548\u7684\u3002 Note: 1. reduction block\u8868\u793a\u7ecf\u8fc7\u8fd9\u4e2ablock\u4e4b\u540e\u7684feature map\u5927\u5c0f\u4e0b\u964d\u4e3a\u4e4b\u524d\u7684\u4e00\u534a\uff0cnormal block\u8868\u793a\u7ecf\u8fc7\u8fd9\u4e2ablock\u4e4b\u540efeature map\u5927\u5c0f\u4e0d\u53d8\u3002 2. input_size \u548c output_size \u7528\u6765\u8ba1\u7b97\u6574\u4e2a\u6a21\u578b\u7ed3\u6784\u4e2dreduction block\u6570\u91cf\u3002 \u641c\u7d22\u7a7a\u95f4\u793a\u4f8b\uff1a # \u4f7f\u7528paddleslim\u4e2d\u63d0\u4f9b\u7528\u539f\u672c\u7684\u6a21\u578b\u7ed3\u6784\u6765\u6784\u9020\u641c\u7d22\u7a7a\u95f4\u7684\u8bdd\uff0c\u4ec5\u9700\u8981\u6307\u5b9a\u641c\u7d22\u7a7a\u95f4\u540d\u5b57\u5373\u53ef\u3002\u4f8b\u5982\uff1a\u5982\u679c\u4f7f\u7528\u539f\u672c\u7684MobileNetV2\u7684\u641c\u7d22\u7a7a\u95f4\u8fdb\u884c\u641c\u7d22\u7684\u8bdd\uff0c\u4f20\u5165SANAS\u4e2d\u7684config\u76f4\u63a5\u6307\u5b9a\u4e3a[('MobileNetV2Space')]\u3002 \u4f7f\u7528paddleslim\u4e2d\u63d0\u4f9b\u7684block\u641c\u7d22\u7a7a\u95f4\u6784\u9020\u641c\u7d22\u7a7a\u95f4\uff1a 2.1 \u4f7f\u7528 input_size , output_size \u548c block_num \u6765\u6784\u9020\u641c\u7d22\u7a7a\u95f4\u3002\u4f8b\u5982\uff1a\u4f20\u5165SANAS\u7684config\u53ef\u4ee5\u6307\u5b9a\u4e3a[('MobileNetV2BlockSpace', {'input_size': 224, 'output_size': 32, 'block_num': 10})]\u3002 2.2 \u4f7f\u7528 block_mask \u6784\u9020\u641c\u7d22\u7a7a\u95f4\u3002\u4f8b\u5982\uff1a\u4f20\u5165SANAS\u7684config\u53ef\u4ee5\u6307\u5b9a\u4e3a[('MobileNetV2BlockSpace', {'block_mask': [0, 1, 1, 1, 1, 0, 1, 0]})]\u3002 \u81ea\u5b9a\u4e49\u641c\u7d22\u7a7a\u95f4(search space) # \u81ea\u5b9a\u4e49\u641c\u7d22\u7a7a\u95f4\u7c7b\u9700\u8981\u7ee7\u627f\u641c\u7d22\u7a7a\u95f4\u57fa\u7c7b\u5e76\u91cd\u5199\u4ee5\u4e0b\u51e0\u90e8\u5206\uff1a 1. \u521d\u59cb\u5316\u7684tokens( init_tokens \u51fd\u6570)\uff0c\u53ef\u4ee5\u8bbe\u7f6e\u4e3a\u81ea\u5df1\u60f3\u8981\u7684tokens\u5217\u8868, tokens\u5217\u8868\u4e2d\u7684\u6bcf\u4e2a\u6570\u5b57\u6307\u7684\u662f\u5f53\u524d\u6570\u5b57\u5728\u76f8\u5e94\u7684\u641c\u7d22\u5217\u8868\u4e2d\u7684\u7d22\u5f15\u3002\u4f8b\u5982\u672c\u793a\u4f8b\u4e2d\u82e5tokens=[0, 3, 5]\uff0c\u5219\u4ee3\u8868\u5f53\u524d\u6a21\u578b\u7ed3\u6784\u641c\u7d22\u5230\u7684\u901a\u9053\u6570\u4e3a[8, 40, 128]\u3002 2. token\u4e2d\u6bcf\u4e2a\u6570\u5b57\u7684\u641c\u7d22\u5217\u8868\u957f\u5ea6( range_table \u51fd\u6570)\uff0ctokens\u4e2d\u6bcf\u4e2atoken\u7684\u7d22\u5f15\u8303\u56f4\u3002 3. \u6839\u636etoken\u4ea7\u751f\u6a21\u578b\u7ed3\u6784( token2arch \u51fd\u6570)\uff0c\u6839\u636e\u641c\u7d22\u5230\u7684tokens\u5217\u8868\u4ea7\u751f\u6a21\u578b\u7ed3\u6784\u3002 \u4ee5\u65b0\u589ereset block\u4e3a\u4f8b\u8bf4\u660e\u5982\u4f55\u6784\u9020\u81ea\u5df1\u7684search space\u3002\u81ea\u5b9a\u4e49\u7684search space\u4e0d\u80fd\u548c\u5df2\u6709\u7684search space\u540c\u540d\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 ### \u5f15\u5165\u641c\u7d22\u7a7a\u95f4\u57fa\u7c7b\u51fd\u6570\u548csearch space\u7684\u6ce8\u518c\u7c7b\u51fd\u6570 from .search_space_base import SearchSpaceBase from .search_space_registry import SEARCHSPACE import numpy as np ### \u9700\u8981\u8c03\u7528\u6ce8\u518c\u51fd\u6570\u628a\u81ea\u5b9a\u4e49\u641c\u7d22\u7a7a\u95f4\u6ce8\u518c\u5230space space\u4e2d @SEARCHSPACE.register ### \u5b9a\u4e49\u4e00\u4e2a\u7ee7\u627fSearchSpaceBase\u57fa\u7c7b\u7684\u641c\u7d22\u7a7a\u95f4\u7684\u7c7b\u51fd\u6570 class ResNetBlockSpace2 ( SearchSpaceBase ): def __init__ ( self , input_size , output_size , block_num , block_mask ): ### \u5b9a\u4e49\u4e00\u4e9b\u5b9e\u9645\u60f3\u8981\u641c\u7d22\u7684\u5185\u5bb9\uff0c\u4f8b\u5982\uff1a\u901a\u9053\u6570\u3001\u6bcf\u4e2a\u5377\u79ef\u7684\u91cd\u590d\u6b21\u6570\u3001\u5377\u79ef\u6838\u5927\u5c0f\u7b49\u7b49 ### self.filter_num \u4ee3\u8868\u901a\u9053\u6570\u7684\u641c\u7d22\u5217\u8868 self . filter_num = np . array ([ 8 , 16 , 32 , 40 , 64 , 128 , 256 , 512 ]) ### \u5b9a\u4e49\u521d\u59cb\u5316token\uff0c\u521d\u59cb\u5316token\u7684\u957f\u5ea6\u6839\u636e\u4f20\u5165\u7684block_num\u6216\u8005block_mask\u7684\u957f\u5ea6\u6765\u5f97\u5230\u7684 def init_tokens ( self ): return [ 0 ] * 3 * len ( self . block_mask ) ### \u5b9a\u4e49 def range_table ( self ): return [ len ( self . filter_num )] * 3 * len ( self . block_mask ) def token2arch ( self , tokens = None ): if tokens == None : tokens = self . init_tokens () self . bottleneck_params_list = [] for i in range ( len ( self . block_mask )): self . bottleneck_params_list . append ( self . filter_num [ tokens [ i * 3 + 0 ]], self . filter_num [ tokens [ i * 3 + 1 ]], self . filter_num [ tokens [ i * 3 + 2 ]], 2 if self . block_mask [ i ] == 1 else 1 ) def net_arch ( input ): for i , layer_setting in enumerate ( self . bottleneck_params_list ): channel_num , stride = layer_setting [: - 1 ], layer_setting [ - 1 ] input = self . _resnet_block ( input , channel_num , stride , name = 'resnet_layer{}' . format ( i + 1 )) return input return net_arch ### \u6784\u9020\u5177\u4f53block\u7684\u64cd\u4f5c def _resnet_block ( self , input , channel_num , stride , name = None ): shortcut_conv = self . _shortcut ( input , channel_num [ 2 ], stride , name = name ) input = self . _conv_bn_layer ( input = input , num_filters = channel_num [ 0 ], filter_size = 1 , act = 'relu' , name = name + '_conv0' ) input = self . _conv_bn_layer ( input = input , num_filters = channel_num [ 1 ], filter_size = 3 , stride = stride , act = 'relu' , name = name + '_conv1' ) input = self . _conv_bn_layer ( input = input , num_filters = channel_num [ 2 ], filter_size = 1 , name = name + '_conv2' ) return fluid . layers . elementwise_add ( x = shortcut_conv , y = input , axis = 0 , name = name + '_elementwise_add' ) def _shortcut ( self , input , channel_num , stride , name = None ): channel_in = input . shape [ 1 ] if channel_in != channel_num or stride != 1 : return self . conv_bn_layer ( input , num_filters = channel_num , filter_size = 1 , stride = stride , name = name + '_shortcut' ) else : return input def _conv_bn_layer ( self , input , num_filters , filter_size , stride = 1 , padding = 'SAME' , act = None , name = None ): conv = fluid . layers . conv2d ( input , num_filters , filter_size , stride , name = name + '_conv' ) bn = fluid . layers . batch_norm ( conv , act = act , name = name + '_bn' ) return bn","title":"\u641c\u7d22\u7a7a\u95f4"},{"location":"api/search_space/#paddleslimnas","text":"\u6839\u636e\u539f\u672c\u6a21\u578b\u7ed3\u6784\u6784\u9020\u641c\u7d22\u7a7a\u95f4\uff1a 1.1 MobileNetV2Space 1.2 MobileNetV1Space 1.3 ResNetSpace \u6839\u636e\u76f8\u5e94\u6a21\u578b\u7684block\u6784\u9020\u641c\u7d22\u7a7a\u95f4 2.1 MobileNetV1BlockSpace 2.2 MobileNetV2BlockSpace 2.3 ResNetBlockSpace 2.4 InceptionABlockSpace 2.5 InceptionCBlockSpace","title":"paddleslim.nas \u63d0\u4f9b\u7684\u641c\u7d22\u7a7a\u95f4\uff1a"},{"location":"api/search_space/#_1","text":"input_size(int|None) \uff1a input_size \u8868\u793a\u8f93\u5165feature map\u7684\u5927\u5c0f\u3002 output_size(int|None) \uff1a output_size \u8868\u793a\u8f93\u51fafeature map\u7684\u5927\u5c0f\u3002 block_num(int|None) \uff1a block_num \u8868\u793a\u641c\u7d22\u7a7a\u95f4\u4e2dblock\u7684\u6570\u91cf\u3002 block_mask(list|None) \uff1a block_mask \u8868\u793a\u5f53\u524d\u7684block\u662f\u4e00\u4e2areduction block\u8fd8\u662f\u4e00\u4e2anormal block\uff0c\u662f\u4e00\u7ec4\u75310\u30011\u7ec4\u6210\u7684\u5217\u8868\uff0c0\u8868\u793a\u5f53\u524dblock\u662fnormal block\uff0c1\u8868\u793a\u5f53\u524dblock\u662freduction block\u3002\u5982\u679c\u8bbe\u7f6e\u4e86 block_mask \uff0c\u5219\u4e3b\u8981\u4ee5 block_mask \u4e3a\u4e3b\u8981\u914d\u7f6e\uff0c input_size \uff0c output_size \u548c block_num \u4e09\u79cd\u914d\u7f6e\u662f\u65e0\u6548\u7684\u3002 Note: 1. reduction block\u8868\u793a\u7ecf\u8fc7\u8fd9\u4e2ablock\u4e4b\u540e\u7684feature map\u5927\u5c0f\u4e0b\u964d\u4e3a\u4e4b\u524d\u7684\u4e00\u534a\uff0cnormal block\u8868\u793a\u7ecf\u8fc7\u8fd9\u4e2ablock\u4e4b\u540efeature map\u5927\u5c0f\u4e0d\u53d8\u3002 2. input_size \u548c output_size \u7528\u6765\u8ba1\u7b97\u6574\u4e2a\u6a21\u578b\u7ed3\u6784\u4e2dreduction block\u6570\u91cf\u3002","title":"\u641c\u7d22\u7a7a\u95f4\u7684\u914d\u7f6e\u4ecb\u7ecd\uff1a"},{"location":"api/search_space/#_2","text":"\u4f7f\u7528paddleslim\u4e2d\u63d0\u4f9b\u7528\u539f\u672c\u7684\u6a21\u578b\u7ed3\u6784\u6765\u6784\u9020\u641c\u7d22\u7a7a\u95f4\u7684\u8bdd\uff0c\u4ec5\u9700\u8981\u6307\u5b9a\u641c\u7d22\u7a7a\u95f4\u540d\u5b57\u5373\u53ef\u3002\u4f8b\u5982\uff1a\u5982\u679c\u4f7f\u7528\u539f\u672c\u7684MobileNetV2\u7684\u641c\u7d22\u7a7a\u95f4\u8fdb\u884c\u641c\u7d22\u7684\u8bdd\uff0c\u4f20\u5165SANAS\u4e2d\u7684config\u76f4\u63a5\u6307\u5b9a\u4e3a[('MobileNetV2Space')]\u3002 \u4f7f\u7528paddleslim\u4e2d\u63d0\u4f9b\u7684block\u641c\u7d22\u7a7a\u95f4\u6784\u9020\u641c\u7d22\u7a7a\u95f4\uff1a 2.1 \u4f7f\u7528 input_size , output_size \u548c block_num \u6765\u6784\u9020\u641c\u7d22\u7a7a\u95f4\u3002\u4f8b\u5982\uff1a\u4f20\u5165SANAS\u7684config\u53ef\u4ee5\u6307\u5b9a\u4e3a[('MobileNetV2BlockSpace', {'input_size': 224, 'output_size': 32, 'block_num': 10})]\u3002 2.2 \u4f7f\u7528 block_mask \u6784\u9020\u641c\u7d22\u7a7a\u95f4\u3002\u4f8b\u5982\uff1a\u4f20\u5165SANAS\u7684config\u53ef\u4ee5\u6307\u5b9a\u4e3a[('MobileNetV2BlockSpace', {'block_mask': [0, 1, 1, 1, 1, 0, 1, 0]})]\u3002","title":"\u641c\u7d22\u7a7a\u95f4\u793a\u4f8b\uff1a"},{"location":"api/search_space/#search-space","text":"\u81ea\u5b9a\u4e49\u641c\u7d22\u7a7a\u95f4\u7c7b\u9700\u8981\u7ee7\u627f\u641c\u7d22\u7a7a\u95f4\u57fa\u7c7b\u5e76\u91cd\u5199\u4ee5\u4e0b\u51e0\u90e8\u5206\uff1a 1. \u521d\u59cb\u5316\u7684tokens( init_tokens \u51fd\u6570)\uff0c\u53ef\u4ee5\u8bbe\u7f6e\u4e3a\u81ea\u5df1\u60f3\u8981\u7684tokens\u5217\u8868, tokens\u5217\u8868\u4e2d\u7684\u6bcf\u4e2a\u6570\u5b57\u6307\u7684\u662f\u5f53\u524d\u6570\u5b57\u5728\u76f8\u5e94\u7684\u641c\u7d22\u5217\u8868\u4e2d\u7684\u7d22\u5f15\u3002\u4f8b\u5982\u672c\u793a\u4f8b\u4e2d\u82e5tokens=[0, 3, 5]\uff0c\u5219\u4ee3\u8868\u5f53\u524d\u6a21\u578b\u7ed3\u6784\u641c\u7d22\u5230\u7684\u901a\u9053\u6570\u4e3a[8, 40, 128]\u3002 2. token\u4e2d\u6bcf\u4e2a\u6570\u5b57\u7684\u641c\u7d22\u5217\u8868\u957f\u5ea6( range_table \u51fd\u6570)\uff0ctokens\u4e2d\u6bcf\u4e2atoken\u7684\u7d22\u5f15\u8303\u56f4\u3002 3. \u6839\u636etoken\u4ea7\u751f\u6a21\u578b\u7ed3\u6784( token2arch \u51fd\u6570)\uff0c\u6839\u636e\u641c\u7d22\u5230\u7684tokens\u5217\u8868\u4ea7\u751f\u6a21\u578b\u7ed3\u6784\u3002 \u4ee5\u65b0\u589ereset block\u4e3a\u4f8b\u8bf4\u660e\u5982\u4f55\u6784\u9020\u81ea\u5df1\u7684search space\u3002\u81ea\u5b9a\u4e49\u7684search space\u4e0d\u80fd\u548c\u5df2\u6709\u7684search space\u540c\u540d\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 ### \u5f15\u5165\u641c\u7d22\u7a7a\u95f4\u57fa\u7c7b\u51fd\u6570\u548csearch space\u7684\u6ce8\u518c\u7c7b\u51fd\u6570 from .search_space_base import SearchSpaceBase from .search_space_registry import SEARCHSPACE import numpy as np ### \u9700\u8981\u8c03\u7528\u6ce8\u518c\u51fd\u6570\u628a\u81ea\u5b9a\u4e49\u641c\u7d22\u7a7a\u95f4\u6ce8\u518c\u5230space space\u4e2d @SEARCHSPACE.register ### \u5b9a\u4e49\u4e00\u4e2a\u7ee7\u627fSearchSpaceBase\u57fa\u7c7b\u7684\u641c\u7d22\u7a7a\u95f4\u7684\u7c7b\u51fd\u6570 class ResNetBlockSpace2 ( SearchSpaceBase ): def __init__ ( self , input_size , output_size , block_num , block_mask ): ### \u5b9a\u4e49\u4e00\u4e9b\u5b9e\u9645\u60f3\u8981\u641c\u7d22\u7684\u5185\u5bb9\uff0c\u4f8b\u5982\uff1a\u901a\u9053\u6570\u3001\u6bcf\u4e2a\u5377\u79ef\u7684\u91cd\u590d\u6b21\u6570\u3001\u5377\u79ef\u6838\u5927\u5c0f\u7b49\u7b49 ### self.filter_num \u4ee3\u8868\u901a\u9053\u6570\u7684\u641c\u7d22\u5217\u8868 self . filter_num = np . array ([ 8 , 16 , 32 , 40 , 64 , 128 , 256 , 512 ]) ### \u5b9a\u4e49\u521d\u59cb\u5316token\uff0c\u521d\u59cb\u5316token\u7684\u957f\u5ea6\u6839\u636e\u4f20\u5165\u7684block_num\u6216\u8005block_mask\u7684\u957f\u5ea6\u6765\u5f97\u5230\u7684 def init_tokens ( self ): return [ 0 ] * 3 * len ( self . block_mask ) ### \u5b9a\u4e49 def range_table ( self ): return [ len ( self . filter_num )] * 3 * len ( self . block_mask ) def token2arch ( self , tokens = None ): if tokens == None : tokens = self . init_tokens () self . bottleneck_params_list = [] for i in range ( len ( self . block_mask )): self . bottleneck_params_list . append ( self . filter_num [ tokens [ i * 3 + 0 ]], self . filter_num [ tokens [ i * 3 + 1 ]], self . filter_num [ tokens [ i * 3 + 2 ]], 2 if self . block_mask [ i ] == 1 else 1 ) def net_arch ( input ): for i , layer_setting in enumerate ( self . bottleneck_params_list ): channel_num , stride = layer_setting [: - 1 ], layer_setting [ - 1 ] input = self . _resnet_block ( input , channel_num , stride , name = 'resnet_layer{}' . format ( i + 1 )) return input return net_arch ### \u6784\u9020\u5177\u4f53block\u7684\u64cd\u4f5c def _resnet_block ( self , input , channel_num , stride , name = None ): shortcut_conv = self . _shortcut ( input , channel_num [ 2 ], stride , name = name ) input = self . _conv_bn_layer ( input = input , num_filters = channel_num [ 0 ], filter_size = 1 , act = 'relu' , name = name + '_conv0' ) input = self . _conv_bn_layer ( input = input , num_filters = channel_num [ 1 ], filter_size = 3 , stride = stride , act = 'relu' , name = name + '_conv1' ) input = self . _conv_bn_layer ( input = input , num_filters = channel_num [ 2 ], filter_size = 1 , name = name + '_conv2' ) return fluid . layers . elementwise_add ( x = shortcut_conv , y = input , axis = 0 , name = name + '_elementwise_add' ) def _shortcut ( self , input , channel_num , stride , name = None ): channel_in = input . shape [ 1 ] if channel_in != channel_num or stride != 1 : return self . conv_bn_layer ( input , num_filters = channel_num , filter_size = 1 , stride = stride , name = name + '_shortcut' ) else : return input def _conv_bn_layer ( self , input , num_filters , filter_size , stride = 1 , padding = 'SAME' , act = None , name = None ): conv = fluid . layers . conv2d ( input , num_filters , filter_size , stride , name = name + '_conv' ) bn = fluid . layers . batch_norm ( conv , act = act , name = name + '_bn' ) return bn","title":"\u81ea\u5b9a\u4e49\u641c\u7d22\u7a7a\u95f4(search space)"},{"location":"api/single_distiller_api/","text":"merge # paddleslim.dist.merge(teacher_program, student_program, data_name_map, place, scope=fluid.global_scope(), name_prefix='teacher_') [\u6e90\u4ee3\u7801] merge\u5c06\u4e24\u4e2apaddle program\uff08teacher_program, student_program\uff09\u878d\u5408\u4e3a\u4e00\u4e2aprogram\uff0c\u5e76\u5c06\u878d\u5408\u5f97\u5230\u7684program\u8fd4\u56de\u3002\u5728\u878d\u5408\u7684program\u4e2d\uff0c\u53ef\u4ee5\u4e3a\u5176\u4e2d\u5408\u9002\u7684teacher\u7279\u5f81\u56fe\u548cstudent\u7279\u5f81\u56fe\u6dfb\u52a0\u84b8\u998f\u635f\u5931\u51fd\u6570\uff0c\u4ece\u800c\u8fbe\u5230\u7528teacher\u6a21\u578b\u7684\u6697\u77e5\u8bc6\uff08Dark Knowledge\uff09\u6307\u5bfcstudent\u6a21\u578b\u5b66\u4e60\u7684\u76ee\u7684\u3002 \u53c2\u6570\uff1a teacher_program (Program)-\u5b9a\u4e49\u4e86teacher\u6a21\u578b\u7684 paddle program student_program (Program)-\u5b9a\u4e49\u4e86student\u6a21\u578b\u7684 paddle program data_name_map (dict)-teacher\u8f93\u5165\u63a5\u53e3\u540d\u4e0estudent\u8f93\u5165\u63a5\u53e3\u540d\u7684\u6620\u5c04\uff0c\u5176\u4e2ddict\u7684 key \u4e3ateacher\u7684\u8f93\u5165\u540d\uff0c value \u4e3astudent\u7684\u8f93\u5165\u540d place (fluid.CPUPlace()|fluid.CUDAPlace(N))-\u8be5\u53c2\u6570\u8868\u793a\u7a0b\u5e8f\u8fd0\u884c\u5728\u4f55\u79cd\u8bbe\u5907\u4e0a\uff0c\u8fd9\u91cc\u7684N\u4e3aGPU\u5bf9\u5e94\u7684ID scope (Scope)-\u8be5\u53c2\u6570\u8868\u793a\u7a0b\u5e8f\u4f7f\u7528\u7684\u53d8\u91cf\u4f5c\u7528\u57df\uff0c\u5982\u679c\u4e0d\u6307\u5b9a\u5c06\u4f7f\u7528\u9ed8\u8ba4\u7684\u5168\u5c40\u4f5c\u7528\u57df\u3002\u9ed8\u8ba4\u503c\uff1a fluid.global_scope() name_prefix (str)-merge\u64cd\u4f5c\u5c06\u7edf\u4e00\u4e3ateacher\u7684 Variables \u6dfb\u52a0\u7684\u540d\u79f0\u524d\u7f00name_prefix\u3002\u9ed8\u8ba4\u503c\uff1a'teacher_' \u8fd4\u56de\uff1a \u7531student_program\u548cteacher_program merge\u5f97\u5230\u7684program Note data_name_map \u662f teacher_var name\u5230student_var name\u7684\u6620\u5c04 \uff0c\u5982\u679c\u5199\u53cd\u53ef\u80fd\u65e0\u6cd5\u6b63\u786e\u8fdb\u884cmerge \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = dist . merge ( teacher_program , student_program , data_name_map , place ) fsp_loss # paddleslim.dist.fsp_loss(teacher_var1_name, teacher_var2_name, student_var1_name, student_var2_name, program=fluid.default_main_program()) [\u6e90\u4ee3\u7801] fsp_loss\u4e3aprogram\u5185\u7684teacher var\u548cstudent var\u6dfb\u52a0fsp loss\uff0c\u51fa\u81ea\u8bba\u6587 <> \u53c2\u6570\uff1a teacher_var1_name (str): teacher_var1\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, x_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64 teacher_var2_name (str): teacher_var2\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, y_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64\u3002\u53ea\u6709y_channel\u53ef\u4ee5\u4e0eteacher_var1\u7684x_channel\u4e0d\u540c\uff0c\u5176\u4ed6\u7ef4\u5ea6\u5fc5\u987b\u4e0eteacher_var1\u76f8\u540c student_var1_name (str): student_var1\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u9700\u4e0eteacher_var1\u5c3a\u5bf8\u4fdd\u6301\u4e00\u81f4\uff0c\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, x_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64 student_var2_name (str): student_var2\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u9700\u4e0eteacher_var2\u5c3a\u5bf8\u4fdd\u6301\u4e00\u81f4\uff0c\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, y_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64\u3002\u53ea\u6709y_channel\u53ef\u4ee5\u4e0estudent_var1\u7684x_channel\u4e0d\u540c\uff0c\u5176\u4ed6\u7ef4\u5ea6\u5fc5\u987b\u4e0estudent_var1\u76f8\u540c program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() \u8fd4\u56de\uff1a \u7531teacher_var1, teacher_var2, student_var1, student_var2\u7ec4\u5408\u5f97\u5230\u7684fsp_loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) with fluid . program_guard ( main_program ): distillation_loss = dist . fsp_loss ( 'teacher_t1.tmp_1' , 'teacher_t2.tmp_1' , 's1.tmp_1' , 's2.tmp_1' , main_program ) l2_loss # paddleslim.dist.l2_loss(teacher_var_name, student_var_name, program=fluid.default_main_program()) [\u6e90\u4ee3\u7801] l2_loss\u4e3aprogram\u5185\u7684teacher var\u548cstudent var\u6dfb\u52a0l2 loss \u53c2\u6570\uff1a teacher_var_name (str): teacher_var\u7684\u540d\u79f0. student_var_name (str): student_var\u7684\u540d\u79f0. program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() \u8fd4\u56de\uff1a \u7531teacher_var, student_var\u7ec4\u5408\u5f97\u5230\u7684l2_loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) with fluid . program_guard ( main_program ): distillation_loss = dist . l2_loss ( 'teacher_t2.tmp_1' , 's2.tmp_1' , main_program ) soft_label_loss # paddleslim.dist.soft_label_loss(teacher_var_name, student_var_name, program=fluid.default_main_program(), teacher_temperature=1., student_temperature=1.) [\u6e90\u4ee3\u7801] soft_label_loss\u4e3aprogram\u5185\u7684teacher var\u548cstudent var\u6dfb\u52a0soft label loss\uff0c\u51fa\u81ea\u8bba\u6587 <> \u53c2\u6570\uff1a teacher_var_name (str): teacher_var\u7684\u540d\u79f0. student_var_name (str): student_var\u7684\u540d\u79f0. program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() teacher_temperature (float): \u5bf9teacher_var\u8fdb\u884csoft\u64cd\u4f5c\u7684\u6e29\u5ea6\u503c\uff0c\u6e29\u5ea6\u503c\u8d8a\u5927\u5f97\u5230\u7684\u7279\u5f81\u56fe\u8d8a\u5e73\u6ed1 student_temperature (float): \u5bf9student_var\u8fdb\u884csoft\u64cd\u4f5c\u7684\u6e29\u5ea6\u503c\uff0c\u6e29\u5ea6\u503c\u8d8a\u5927\u5f97\u5230\u7684\u7279\u5f81\u56fe\u8d8a\u5e73\u6ed1 \u8fd4\u56de\uff1a \u7531teacher_var, student_var\u7ec4\u5408\u5f97\u5230\u7684soft_label_loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) with fluid . program_guard ( main_program ): distillation_loss = dist . soft_label_loss ( 'teacher_t2.tmp_1' , 's2.tmp_1' , main_program , 1. , 1. ) loss # paddleslim.dist.loss(loss_func, program=fluid.default_main_program(), **kwargs) [\u6e90\u4ee3\u7801] loss\u51fd\u6570\u652f\u6301\u5bf9\u4efb\u610f\u591a\u5bf9teacher_var\u548cstudent_var\u4f7f\u7528\u81ea\u5b9a\u4e49\u635f\u5931\u51fd\u6570 \u53c2\u6570\uff1a loss_func (python function): \u81ea\u5b9a\u4e49\u7684\u635f\u5931\u51fd\u6570\uff0c\u8f93\u5165\u4e3ateacher var\u548cstudent var\uff0c\u8f93\u51fa\u4e3a\u81ea\u5b9a\u4e49\u7684loss program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() **kwargs : loss_func\u8f93\u5165\u540d\u4e0e\u5bf9\u5e94variable\u540d\u79f0 \u8fd4\u56de \uff1a\u81ea\u5b9a\u4e49\u7684\u635f\u5931\u51fd\u6570loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) def adaptation_loss ( t_var , s_var ): teacher_channel = t_var . shape [ 1 ] s_hint = fluid . layers . conv2d ( s_var , teacher_channel , 1 ) hint_loss = fluid . layers . reduce_mean ( fluid . layers . square ( s_hint - t_var )) return hint_loss with fluid . program_guard ( main_program ): distillation_loss = dist . loss ( main_program , adaptation_loss , t_var = 'teacher_t2.tmp_1' , s_var = 's2.tmp_1' ) \u6ce8\u610f\u4e8b\u9879 \u5728\u6dfb\u52a0\u84b8\u998floss\u65f6\u4f1a\u5f15\u5165\u65b0\u7684variable\uff0c\u9700\u8981\u6ce8\u610f\u65b0\u5f15\u5165\u7684variable\u4e0d\u8981\u4e0estudent variables\u547d\u540d\u51b2\u7a81\u3002\u8fd9\u91cc\u5efa\u8bae\u4e24\u79cd\u7528\u6cd5\uff08\u4e24\u79cd\u65b9\u6cd5\u4efb\u9009\u5176\u4e00\u5373\u53ef\uff09\uff1a \u5efa\u8bae\u4e0estudent_program\u4f7f\u7528\u540c\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u4ee5\u907f\u514d\u4e00\u4e9b\u672a\u6307\u5b9a\u540d\u79f0\u7684variables(\u4f8b\u5982tmp_0, tmp_1...)\u591a\u6b21\u5b9a\u4e49\u4e3a\u540c\u4e00\u540d\u79f0\u51fa\u73b0\u547d\u540d\u51b2\u7a81 \u5efa\u8bae\u5728\u6dfb\u52a0\u84b8\u998floss\u65f6\u6307\u5b9a\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u524d\u7f00\uff0c\u5177\u4f53\u7528\u6cd5\u8bf7\u53c2\u8003Paddle\u5b98\u65b9\u6587\u6863 fluid.name_scope","title":"\u77e5\u8bc6\u84b8\u998f"},{"location":"api/single_distiller_api/#merge","text":"paddleslim.dist.merge(teacher_program, student_program, data_name_map, place, scope=fluid.global_scope(), name_prefix='teacher_') [\u6e90\u4ee3\u7801] merge\u5c06\u4e24\u4e2apaddle program\uff08teacher_program, student_program\uff09\u878d\u5408\u4e3a\u4e00\u4e2aprogram\uff0c\u5e76\u5c06\u878d\u5408\u5f97\u5230\u7684program\u8fd4\u56de\u3002\u5728\u878d\u5408\u7684program\u4e2d\uff0c\u53ef\u4ee5\u4e3a\u5176\u4e2d\u5408\u9002\u7684teacher\u7279\u5f81\u56fe\u548cstudent\u7279\u5f81\u56fe\u6dfb\u52a0\u84b8\u998f\u635f\u5931\u51fd\u6570\uff0c\u4ece\u800c\u8fbe\u5230\u7528teacher\u6a21\u578b\u7684\u6697\u77e5\u8bc6\uff08Dark Knowledge\uff09\u6307\u5bfcstudent\u6a21\u578b\u5b66\u4e60\u7684\u76ee\u7684\u3002 \u53c2\u6570\uff1a teacher_program (Program)-\u5b9a\u4e49\u4e86teacher\u6a21\u578b\u7684 paddle program student_program (Program)-\u5b9a\u4e49\u4e86student\u6a21\u578b\u7684 paddle program data_name_map (dict)-teacher\u8f93\u5165\u63a5\u53e3\u540d\u4e0estudent\u8f93\u5165\u63a5\u53e3\u540d\u7684\u6620\u5c04\uff0c\u5176\u4e2ddict\u7684 key \u4e3ateacher\u7684\u8f93\u5165\u540d\uff0c value \u4e3astudent\u7684\u8f93\u5165\u540d place (fluid.CPUPlace()|fluid.CUDAPlace(N))-\u8be5\u53c2\u6570\u8868\u793a\u7a0b\u5e8f\u8fd0\u884c\u5728\u4f55\u79cd\u8bbe\u5907\u4e0a\uff0c\u8fd9\u91cc\u7684N\u4e3aGPU\u5bf9\u5e94\u7684ID scope (Scope)-\u8be5\u53c2\u6570\u8868\u793a\u7a0b\u5e8f\u4f7f\u7528\u7684\u53d8\u91cf\u4f5c\u7528\u57df\uff0c\u5982\u679c\u4e0d\u6307\u5b9a\u5c06\u4f7f\u7528\u9ed8\u8ba4\u7684\u5168\u5c40\u4f5c\u7528\u57df\u3002\u9ed8\u8ba4\u503c\uff1a fluid.global_scope() name_prefix (str)-merge\u64cd\u4f5c\u5c06\u7edf\u4e00\u4e3ateacher\u7684 Variables \u6dfb\u52a0\u7684\u540d\u79f0\u524d\u7f00name_prefix\u3002\u9ed8\u8ba4\u503c\uff1a'teacher_' \u8fd4\u56de\uff1a \u7531student_program\u548cteacher_program merge\u5f97\u5230\u7684program Note data_name_map \u662f teacher_var name\u5230student_var name\u7684\u6620\u5c04 \uff0c\u5982\u679c\u5199\u53cd\u53ef\u80fd\u65e0\u6cd5\u6b63\u786e\u8fdb\u884cmerge \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = dist . merge ( teacher_program , student_program , data_name_map , place )","title":"merge"},{"location":"api/single_distiller_api/#fsp_loss","text":"paddleslim.dist.fsp_loss(teacher_var1_name, teacher_var2_name, student_var1_name, student_var2_name, program=fluid.default_main_program()) [\u6e90\u4ee3\u7801] fsp_loss\u4e3aprogram\u5185\u7684teacher var\u548cstudent var\u6dfb\u52a0fsp loss\uff0c\u51fa\u81ea\u8bba\u6587 <> \u53c2\u6570\uff1a teacher_var1_name (str): teacher_var1\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, x_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64 teacher_var2_name (str): teacher_var2\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, y_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64\u3002\u53ea\u6709y_channel\u53ef\u4ee5\u4e0eteacher_var1\u7684x_channel\u4e0d\u540c\uff0c\u5176\u4ed6\u7ef4\u5ea6\u5fc5\u987b\u4e0eteacher_var1\u76f8\u540c student_var1_name (str): student_var1\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u9700\u4e0eteacher_var1\u5c3a\u5bf8\u4fdd\u6301\u4e00\u81f4\uff0c\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, x_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64 student_var2_name (str): student_var2\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u9700\u4e0eteacher_var2\u5c3a\u5bf8\u4fdd\u6301\u4e00\u81f4\uff0c\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, y_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64\u3002\u53ea\u6709y_channel\u53ef\u4ee5\u4e0estudent_var1\u7684x_channel\u4e0d\u540c\uff0c\u5176\u4ed6\u7ef4\u5ea6\u5fc5\u987b\u4e0estudent_var1\u76f8\u540c program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() \u8fd4\u56de\uff1a \u7531teacher_var1, teacher_var2, student_var1, student_var2\u7ec4\u5408\u5f97\u5230\u7684fsp_loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) with fluid . program_guard ( main_program ): distillation_loss = dist . fsp_loss ( 'teacher_t1.tmp_1' , 'teacher_t2.tmp_1' , 's1.tmp_1' , 's2.tmp_1' , main_program )","title":"fsp_loss"},{"location":"api/single_distiller_api/#l2_loss","text":"paddleslim.dist.l2_loss(teacher_var_name, student_var_name, program=fluid.default_main_program()) [\u6e90\u4ee3\u7801] l2_loss\u4e3aprogram\u5185\u7684teacher var\u548cstudent var\u6dfb\u52a0l2 loss \u53c2\u6570\uff1a teacher_var_name (str): teacher_var\u7684\u540d\u79f0. student_var_name (str): student_var\u7684\u540d\u79f0. program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() \u8fd4\u56de\uff1a \u7531teacher_var, student_var\u7ec4\u5408\u5f97\u5230\u7684l2_loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) with fluid . program_guard ( main_program ): distillation_loss = dist . l2_loss ( 'teacher_t2.tmp_1' , 's2.tmp_1' , main_program )","title":"l2_loss"},{"location":"api/single_distiller_api/#soft_label_loss","text":"paddleslim.dist.soft_label_loss(teacher_var_name, student_var_name, program=fluid.default_main_program(), teacher_temperature=1., student_temperature=1.) [\u6e90\u4ee3\u7801] soft_label_loss\u4e3aprogram\u5185\u7684teacher var\u548cstudent var\u6dfb\u52a0soft label loss\uff0c\u51fa\u81ea\u8bba\u6587 <> \u53c2\u6570\uff1a teacher_var_name (str): teacher_var\u7684\u540d\u79f0. student_var_name (str): student_var\u7684\u540d\u79f0. program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() teacher_temperature (float): \u5bf9teacher_var\u8fdb\u884csoft\u64cd\u4f5c\u7684\u6e29\u5ea6\u503c\uff0c\u6e29\u5ea6\u503c\u8d8a\u5927\u5f97\u5230\u7684\u7279\u5f81\u56fe\u8d8a\u5e73\u6ed1 student_temperature (float): \u5bf9student_var\u8fdb\u884csoft\u64cd\u4f5c\u7684\u6e29\u5ea6\u503c\uff0c\u6e29\u5ea6\u503c\u8d8a\u5927\u5f97\u5230\u7684\u7279\u5f81\u56fe\u8d8a\u5e73\u6ed1 \u8fd4\u56de\uff1a \u7531teacher_var, student_var\u7ec4\u5408\u5f97\u5230\u7684soft_label_loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) with fluid . program_guard ( main_program ): distillation_loss = dist . soft_label_loss ( 'teacher_t2.tmp_1' , 's2.tmp_1' , main_program , 1. , 1. )","title":"soft_label_loss"},{"location":"api/single_distiller_api/#loss","text":"paddleslim.dist.loss(loss_func, program=fluid.default_main_program(), **kwargs) [\u6e90\u4ee3\u7801] loss\u51fd\u6570\u652f\u6301\u5bf9\u4efb\u610f\u591a\u5bf9teacher_var\u548cstudent_var\u4f7f\u7528\u81ea\u5b9a\u4e49\u635f\u5931\u51fd\u6570 \u53c2\u6570\uff1a loss_func (python function): \u81ea\u5b9a\u4e49\u7684\u635f\u5931\u51fd\u6570\uff0c\u8f93\u5165\u4e3ateacher var\u548cstudent var\uff0c\u8f93\u51fa\u4e3a\u81ea\u5b9a\u4e49\u7684loss program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() **kwargs : loss_func\u8f93\u5165\u540d\u4e0e\u5bf9\u5e94variable\u540d\u79f0 \u8fd4\u56de \uff1a\u81ea\u5b9a\u4e49\u7684\u635f\u5931\u51fd\u6570loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) def adaptation_loss ( t_var , s_var ): teacher_channel = t_var . shape [ 1 ] s_hint = fluid . layers . conv2d ( s_var , teacher_channel , 1 ) hint_loss = fluid . layers . reduce_mean ( fluid . layers . square ( s_hint - t_var )) return hint_loss with fluid . program_guard ( main_program ): distillation_loss = dist . loss ( main_program , adaptation_loss , t_var = 'teacher_t2.tmp_1' , s_var = 's2.tmp_1' ) \u6ce8\u610f\u4e8b\u9879 \u5728\u6dfb\u52a0\u84b8\u998floss\u65f6\u4f1a\u5f15\u5165\u65b0\u7684variable\uff0c\u9700\u8981\u6ce8\u610f\u65b0\u5f15\u5165\u7684variable\u4e0d\u8981\u4e0estudent variables\u547d\u540d\u51b2\u7a81\u3002\u8fd9\u91cc\u5efa\u8bae\u4e24\u79cd\u7528\u6cd5\uff08\u4e24\u79cd\u65b9\u6cd5\u4efb\u9009\u5176\u4e00\u5373\u53ef\uff09\uff1a \u5efa\u8bae\u4e0estudent_program\u4f7f\u7528\u540c\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u4ee5\u907f\u514d\u4e00\u4e9b\u672a\u6307\u5b9a\u540d\u79f0\u7684variables(\u4f8b\u5982tmp_0, tmp_1...)\u591a\u6b21\u5b9a\u4e49\u4e3a\u540c\u4e00\u540d\u79f0\u51fa\u73b0\u547d\u540d\u51b2\u7a81 \u5efa\u8bae\u5728\u6dfb\u52a0\u84b8\u998floss\u65f6\u6307\u5b9a\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u524d\u7f00\uff0c\u5177\u4f53\u7528\u6cd5\u8bf7\u53c2\u8003Paddle\u5b98\u65b9\u6587\u6863 fluid.name_scope","title":"loss"},{"location":"tutorials/demo_guide/","text":"\u84b8\u998f # \u84b8\u998fdemo\u9ed8\u8ba4\u4f7f\u7528ResNet50\u4f5c\u4e3ateacher\u7f51\u7edc\uff0cMobileNet\u4f5c\u4e3astudent\u7f51\u7edc\uff0c\u6b64\u5916\u8fd8\u652f\u6301\u5c06teacher\u548cstudent\u6362\u6210 models\u76ee\u5f55 \u652f\u6301\u7684\u4efb\u610f\u6a21\u578b\u3002 demo\u4e2d\u5bf9teahcer\u6a21\u578b\u548cstudent\u6a21\u578b\u7684\u4e00\u5c42\u7279\u5f81\u56fe\u6dfb\u52a0\u4e86l2_loss\u7684\u84b8\u998f\u635f\u5931\u51fd\u6570\uff0c\u4f7f\u7528\u65f6\u4e5f\u53ef\u6839\u636e\u9700\u8981\u9009\u62e9fsp_loss, soft_label_loss\u4ee5\u53ca\u81ea\u5b9a\u4e49\u7684loss\u51fd\u6570\u3002 \u8bad\u7ec3\u9ed8\u8ba4\u4f7f\u7528\u7684\u662fcifar10\u6570\u636e\u96c6\uff0cpiecewise_decay\u5b66\u4e60\u7387\u8870\u51cf\u7b56\u7565\uff0cmomentum\u4f18\u5316\u5668\u8fdb\u884c120\u8f6e\u84b8\u998f\u8bad\u7ec3\u3002\u4f7f\u7528\u8005\u4e5f\u53ef\u4ee5\u7b80\u5355\u5730\u7528args\u53c2\u6570\u5207\u6362\u4e3a\u4f7f\u7528ImageNet\u6570\u636e\u96c6\uff0ccosine_decay\u5b66\u4e60\u7387\u8870\u51cf\u7b56\u7565\u7b49\u5176\u4ed6\u8bad\u7ec3\u914d\u7f6e\u3002 \u91cf\u5316 # \u91cf\u5316\u8bad\u7ec3demo\u6587\u6863 # \u79bb\u7ebf\u91cf\u5316demo\u6587\u6863 # Embedding\u91cf\u5316demo\u6587\u6863 # NAS # NAS\u793a\u4f8b #","title":"Demo guide"},{"location":"tutorials/demo_guide/#_1","text":"\u84b8\u998fdemo\u9ed8\u8ba4\u4f7f\u7528ResNet50\u4f5c\u4e3ateacher\u7f51\u7edc\uff0cMobileNet\u4f5c\u4e3astudent\u7f51\u7edc\uff0c\u6b64\u5916\u8fd8\u652f\u6301\u5c06teacher\u548cstudent\u6362\u6210 models\u76ee\u5f55 \u652f\u6301\u7684\u4efb\u610f\u6a21\u578b\u3002 demo\u4e2d\u5bf9teahcer\u6a21\u578b\u548cstudent\u6a21\u578b\u7684\u4e00\u5c42\u7279\u5f81\u56fe\u6dfb\u52a0\u4e86l2_loss\u7684\u84b8\u998f\u635f\u5931\u51fd\u6570\uff0c\u4f7f\u7528\u65f6\u4e5f\u53ef\u6839\u636e\u9700\u8981\u9009\u62e9fsp_loss, soft_label_loss\u4ee5\u53ca\u81ea\u5b9a\u4e49\u7684loss\u51fd\u6570\u3002 \u8bad\u7ec3\u9ed8\u8ba4\u4f7f\u7528\u7684\u662fcifar10\u6570\u636e\u96c6\uff0cpiecewise_decay\u5b66\u4e60\u7387\u8870\u51cf\u7b56\u7565\uff0cmomentum\u4f18\u5316\u5668\u8fdb\u884c120\u8f6e\u84b8\u998f\u8bad\u7ec3\u3002\u4f7f\u7528\u8005\u4e5f\u53ef\u4ee5\u7b80\u5355\u5730\u7528args\u53c2\u6570\u5207\u6362\u4e3a\u4f7f\u7528ImageNet\u6570\u636e\u96c6\uff0ccosine_decay\u5b66\u4e60\u7387\u8870\u51cf\u7b56\u7565\u7b49\u5176\u4ed6\u8bad\u7ec3\u914d\u7f6e\u3002","title":"\u84b8\u998f"},{"location":"tutorials/demo_guide/#_2","text":"","title":"\u91cf\u5316"},{"location":"tutorials/demo_guide/#demo","text":"","title":"\u91cf\u5316\u8bad\u7ec3demo\u6587\u6863"},{"location":"tutorials/demo_guide/#demo_1","text":"","title":"\u79bb\u7ebf\u91cf\u5316demo\u6587\u6863"},{"location":"tutorials/demo_guide/#embeddingdemo","text":"","title":"Embedding\u91cf\u5316demo\u6587\u6863"},{"location":"tutorials/demo_guide/#nas","text":"","title":"NAS"},{"location":"tutorials/demo_guide/#nas_1","text":"","title":"NAS\u793a\u4f8b"},{"location":"tutorials/distillation_demo/","text":"\u672c\u793a\u4f8b\u5c06\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528PaddleSlim\u84b8\u998f\u63a5\u53e3\u6765\u5bf9\u6a21\u578b\u8fdb\u884c\u84b8\u998f\u8bad\u7ec3\u3002 \u63a5\u53e3\u4ecb\u7ecd # \u8bf7\u53c2\u8003 \u84b8\u998fAPI\u6587\u6863 \u3002 PaddleSlim\u84b8\u998f\u8bad\u7ec3\u6d41\u7a0b # \u4e00\u822c\u60c5\u51b5\u4e0b\uff0c\u6a21\u578b\u53c2\u6570\u91cf\u8d8a\u591a\uff0c\u7ed3\u6784\u8d8a\u590d\u6742\uff0c\u5176\u6027\u80fd\u8d8a\u597d\uff0c\u4f46\u8fd0\u7b97\u91cf\u548c\u8d44\u6e90\u6d88\u8017\u4e5f\u8d8a\u5927\u3002 \u77e5\u8bc6\u84b8\u998f \u5c31\u662f\u4e00\u79cd\u5c06\u5927\u6a21\u578b\u5b66\u4e60\u5230\u7684\u6709\u7528\u4fe1\u606f\uff08Dark Knowledge\uff09\u538b\u7f29\u8fdb\u66f4\u5c0f\u66f4\u5feb\u7684\u6a21\u578b\uff0c\u800c\u83b7\u5f97\u53ef\u4ee5\u5339\u654c\u5927\u6a21\u578b\u7ed3\u679c\u7684\u65b9\u6cd5\u3002 \u5728\u672c\u793a\u4f8b\u4e2d\u7cbe\u5ea6\u8f83\u9ad8\u7684\u5927\u6a21\u578b\u88ab\u79f0\u4e3ateacher\uff0c\u7cbe\u5ea6\u7a0d\u900a\u4f46\u901f\u5ea6\u66f4\u5feb\u7684\u5c0f\u6a21\u578b\u88ab\u79f0\u4e3astudent\u3002 1. \u5b9a\u4e49student_program # 1 2 3 4 5 6 7 8 9 10 11 student_program = fluid . Program () student_startup = fluid . Program () with fluid . program_guard ( student_program , student_startup ): image = fluid . data ( name = 'image' , shape = [ None ] + [ 3 , 224 , 224 ], dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) # student model definition model = MobileNet () out = model . net ( input = image , class_dim = 1000 ) cost = fluid . layers . cross_entropy ( input = out , label = label ) avg_cost = fluid . layers . mean ( x = cost ) 2. \u5b9a\u4e49teacher_program # \u5728\u5b9a\u4e49\u597d teacher_program \u540e\uff0c\u53ef\u4ee5\u4e00\u5e76\u52a0\u8f7d\u8bad\u7ec3\u597d\u7684pretrained_model\u3002 \u5728 teacher_program \u5185\u9700\u8981\u52a0\u4e0a with fluid.unique_name.guard(): \uff0c\u4fdd\u8bc1teacher\u7684\u53d8\u91cf\u547d\u540d\u4e0d\u88ab student_program \u5f71\u54cd\uff0c\u4ece\u800c\u80fd\u591f\u6b63\u786e\u5730\u52a0\u8f7d\u9884\u8bad\u7ec3\u53c2\u6570\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 teacher_program = fluid . Program () teacher_startup = fluid . Program () with fluid . program_guard ( teacher_program , teacher_startup ): with fluid . unique_name . guard (): image = fluid . data ( name = 'data' , shape = [ None ] + [ 3 , 224 , 224 ], dtype = 'float32' ) # teacher model definition teacher_model = ResNet () predict = teacher_model . net ( image , class_dim = 1000 ) exe . run ( teacher_startup ) def if_exist ( var ): return os . path . exists ( os . path . join ( \"./pretrained\" , var . name ) fluid . io . load_vars ( exe , \"./pretrained\" , main_program = teacher_program , predicate = if_exist ) 3.\u9009\u62e9\u7279\u5f81\u56fe # \u5b9a\u4e49\u597d student_program \u548c teacher_program \u540e\uff0c\u6211\u4eec\u9700\u8981\u4ece\u4e2d\u4e24\u4e24\u5bf9\u5e94\u5730\u6311\u9009\u51fa\u82e5\u5e72\u4e2a\u7279\u5f81\u56fe\uff0c\u7559\u5f85\u540e\u7eed\u4e3a\u5176\u6dfb\u52a0\u77e5\u8bc6\u84b8\u998f\u635f\u5931\u51fd\u6570\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 # get all student variables student_vars = [] for v in student_program . list_vars (): try : student_vars . append (( v . name , v . shape )) except : pass print ( \"=\" * 50 + \"student_model_vars\" + \"=\" * 50 ) print ( student_vars ) # get all teacher variables teacher_vars = [] for v in teacher_program . list_vars (): try : teacher_vars . append (( v . name , v . shape )) except : pass print ( \"=\" * 50 + \"teacher_model_vars\" + \"=\" * 50 ) print ( teacher_vars ) 4. \u5408\u5e76Program\uff08merge\uff09 # PaddlePaddle\u4f7f\u7528Program\u6765\u63cf\u8ff0\u8ba1\u7b97\u56fe\uff0c\u4e3a\u4e86\u540c\u65f6\u8ba1\u7b97student\u548cteacher\u4e24\u4e2aProgram\uff0c\u8fd9\u91cc\u9700\u8981\u5c06\u5176\u4e24\u8005\u5408\u5e76\uff08merge\uff09\u4e3a\u4e00\u4e2aProgram\u3002 merge\u8fc7\u7a0b\u64cd\u4f5c\u8f83\u591a\uff0c\u5177\u4f53\u7ec6\u8282\u8bf7\u53c2\u8003 merge API\u6587\u6863 \u3002 1 2 data_name_map = { 'data' : 'image' } student_program = merge ( teacher_program , student_program , data_name_map , place ) 5.\u6dfb\u52a0\u84b8\u998floss # \u5728\u6dfb\u52a0\u84b8\u998floss\u7684\u8fc7\u7a0b\u4e2d\uff0c\u53ef\u80fd\u8fd8\u4f1a\u5f15\u5165\u90e8\u5206\u53d8\u91cf\uff08Variable\uff09\uff0c\u4e3a\u4e86\u907f\u514d\u547d\u540d\u91cd\u590d\u8fd9\u91cc\u53ef\u4ee5\u4f7f\u7528 with fluid.name_scope(\"distill\"): \u4e3a\u65b0\u5f15\u5165\u7684\u53d8\u91cf\u52a0\u4e00\u4e2a\u547d\u540d\u4f5c\u7528\u57df\u3002 \u53e6\u5916\u9700\u8981\u6ce8\u610f\u7684\u662f\uff0cmerge\u8fc7\u7a0b\u4e3a teacher_program \u7684\u53d8\u91cf\u7edf\u4e00\u52a0\u4e86\u540d\u79f0\u524d\u7f00\uff0c\u9ed8\u8ba4\u662f \"teacher_\" , \u8fd9\u91cc\u5728\u6dfb\u52a0 l2_loss \u65f6\u4e5f\u8981\u4e3ateacher\u7684\u53d8\u91cf\u52a0\u4e0a\u8fd9\u4e2a\u524d\u7f00\u3002 1 2 3 4 5 6 7 8 9 with fluid . program_guard ( student_program , student_startup ): with fluid . name_scope ( \"distill\" ): distill_loss = l2_loss ( 'teacher_bn5c_branch2b.output.1.tmp_3' , 'depthwise_conv2d_11.tmp_0' , student_program ) distill_weight = 1 loss = avg_cost + distill_loss * distill_weight opt = create_optimizer () opt . minimize ( loss ) exe . run ( student_startup ) \u81f3\u6b64\uff0c\u6211\u4eec\u5c31\u5f97\u5230\u4e86\u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684 student_program \uff0c\u540e\u9762\u5c31\u53ef\u4ee5\u4f7f\u7528\u4e00\u4e2a\u666e\u901aprogram\u4e00\u6837\u5bf9\u5176\u5f00\u59cb\u8bad\u7ec3\u548c\u8bc4\u4f30\u3002","title":"\u77e5\u8bc6\u84b8\u998f"},{"location":"tutorials/distillation_demo/#_1","text":"\u8bf7\u53c2\u8003 \u84b8\u998fAPI\u6587\u6863 \u3002","title":"\u63a5\u53e3\u4ecb\u7ecd"},{"location":"tutorials/distillation_demo/#paddleslim","text":"\u4e00\u822c\u60c5\u51b5\u4e0b\uff0c\u6a21\u578b\u53c2\u6570\u91cf\u8d8a\u591a\uff0c\u7ed3\u6784\u8d8a\u590d\u6742\uff0c\u5176\u6027\u80fd\u8d8a\u597d\uff0c\u4f46\u8fd0\u7b97\u91cf\u548c\u8d44\u6e90\u6d88\u8017\u4e5f\u8d8a\u5927\u3002 \u77e5\u8bc6\u84b8\u998f \u5c31\u662f\u4e00\u79cd\u5c06\u5927\u6a21\u578b\u5b66\u4e60\u5230\u7684\u6709\u7528\u4fe1\u606f\uff08Dark Knowledge\uff09\u538b\u7f29\u8fdb\u66f4\u5c0f\u66f4\u5feb\u7684\u6a21\u578b\uff0c\u800c\u83b7\u5f97\u53ef\u4ee5\u5339\u654c\u5927\u6a21\u578b\u7ed3\u679c\u7684\u65b9\u6cd5\u3002 \u5728\u672c\u793a\u4f8b\u4e2d\u7cbe\u5ea6\u8f83\u9ad8\u7684\u5927\u6a21\u578b\u88ab\u79f0\u4e3ateacher\uff0c\u7cbe\u5ea6\u7a0d\u900a\u4f46\u901f\u5ea6\u66f4\u5feb\u7684\u5c0f\u6a21\u578b\u88ab\u79f0\u4e3astudent\u3002","title":"PaddleSlim\u84b8\u998f\u8bad\u7ec3\u6d41\u7a0b"},{"location":"tutorials/distillation_demo/#1-student_program","text":"1 2 3 4 5 6 7 8 9 10 11 student_program = fluid . Program () student_startup = fluid . Program () with fluid . program_guard ( student_program , student_startup ): image = fluid . data ( name = 'image' , shape = [ None ] + [ 3 , 224 , 224 ], dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) # student model definition model = MobileNet () out = model . net ( input = image , class_dim = 1000 ) cost = fluid . layers . cross_entropy ( input = out , label = label ) avg_cost = fluid . layers . mean ( x = cost )","title":"1. \u5b9a\u4e49student_program"},{"location":"tutorials/distillation_demo/#2-teacher_program","text":"\u5728\u5b9a\u4e49\u597d teacher_program \u540e\uff0c\u53ef\u4ee5\u4e00\u5e76\u52a0\u8f7d\u8bad\u7ec3\u597d\u7684pretrained_model\u3002 \u5728 teacher_program \u5185\u9700\u8981\u52a0\u4e0a with fluid.unique_name.guard(): \uff0c\u4fdd\u8bc1teacher\u7684\u53d8\u91cf\u547d\u540d\u4e0d\u88ab student_program \u5f71\u54cd\uff0c\u4ece\u800c\u80fd\u591f\u6b63\u786e\u5730\u52a0\u8f7d\u9884\u8bad\u7ec3\u53c2\u6570\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 teacher_program = fluid . Program () teacher_startup = fluid . Program () with fluid . program_guard ( teacher_program , teacher_startup ): with fluid . unique_name . guard (): image = fluid . data ( name = 'data' , shape = [ None ] + [ 3 , 224 , 224 ], dtype = 'float32' ) # teacher model definition teacher_model = ResNet () predict = teacher_model . net ( image , class_dim = 1000 ) exe . run ( teacher_startup ) def if_exist ( var ): return os . path . exists ( os . path . join ( \"./pretrained\" , var . name ) fluid . io . load_vars ( exe , \"./pretrained\" , main_program = teacher_program , predicate = if_exist )","title":"2. \u5b9a\u4e49teacher_program"},{"location":"tutorials/distillation_demo/#3","text":"\u5b9a\u4e49\u597d student_program \u548c teacher_program \u540e\uff0c\u6211\u4eec\u9700\u8981\u4ece\u4e2d\u4e24\u4e24\u5bf9\u5e94\u5730\u6311\u9009\u51fa\u82e5\u5e72\u4e2a\u7279\u5f81\u56fe\uff0c\u7559\u5f85\u540e\u7eed\u4e3a\u5176\u6dfb\u52a0\u77e5\u8bc6\u84b8\u998f\u635f\u5931\u51fd\u6570\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 # get all student variables student_vars = [] for v in student_program . list_vars (): try : student_vars . append (( v . name , v . shape )) except : pass print ( \"=\" * 50 + \"student_model_vars\" + \"=\" * 50 ) print ( student_vars ) # get all teacher variables teacher_vars = [] for v in teacher_program . list_vars (): try : teacher_vars . append (( v . name , v . shape )) except : pass print ( \"=\" * 50 + \"teacher_model_vars\" + \"=\" * 50 ) print ( teacher_vars )","title":"3.\u9009\u62e9\u7279\u5f81\u56fe"},{"location":"tutorials/distillation_demo/#4-programmerge","text":"PaddlePaddle\u4f7f\u7528Program\u6765\u63cf\u8ff0\u8ba1\u7b97\u56fe\uff0c\u4e3a\u4e86\u540c\u65f6\u8ba1\u7b97student\u548cteacher\u4e24\u4e2aProgram\uff0c\u8fd9\u91cc\u9700\u8981\u5c06\u5176\u4e24\u8005\u5408\u5e76\uff08merge\uff09\u4e3a\u4e00\u4e2aProgram\u3002 merge\u8fc7\u7a0b\u64cd\u4f5c\u8f83\u591a\uff0c\u5177\u4f53\u7ec6\u8282\u8bf7\u53c2\u8003 merge API\u6587\u6863 \u3002 1 2 data_name_map = { 'data' : 'image' } student_program = merge ( teacher_program , student_program , data_name_map , place )","title":"4. \u5408\u5e76Program\uff08merge\uff09"},{"location":"tutorials/distillation_demo/#5loss","text":"\u5728\u6dfb\u52a0\u84b8\u998floss\u7684\u8fc7\u7a0b\u4e2d\uff0c\u53ef\u80fd\u8fd8\u4f1a\u5f15\u5165\u90e8\u5206\u53d8\u91cf\uff08Variable\uff09\uff0c\u4e3a\u4e86\u907f\u514d\u547d\u540d\u91cd\u590d\u8fd9\u91cc\u53ef\u4ee5\u4f7f\u7528 with fluid.name_scope(\"distill\"): \u4e3a\u65b0\u5f15\u5165\u7684\u53d8\u91cf\u52a0\u4e00\u4e2a\u547d\u540d\u4f5c\u7528\u57df\u3002 \u53e6\u5916\u9700\u8981\u6ce8\u610f\u7684\u662f\uff0cmerge\u8fc7\u7a0b\u4e3a teacher_program \u7684\u53d8\u91cf\u7edf\u4e00\u52a0\u4e86\u540d\u79f0\u524d\u7f00\uff0c\u9ed8\u8ba4\u662f \"teacher_\" , \u8fd9\u91cc\u5728\u6dfb\u52a0 l2_loss \u65f6\u4e5f\u8981\u4e3ateacher\u7684\u53d8\u91cf\u52a0\u4e0a\u8fd9\u4e2a\u524d\u7f00\u3002 1 2 3 4 5 6 7 8 9 with fluid . program_guard ( student_program , student_startup ): with fluid . name_scope ( \"distill\" ): distill_loss = l2_loss ( 'teacher_bn5c_branch2b.output.1.tmp_3' , 'depthwise_conv2d_11.tmp_0' , student_program ) distill_weight = 1 loss = avg_cost + distill_loss * distill_weight opt = create_optimizer () opt . minimize ( loss ) exe . run ( student_startup ) \u81f3\u6b64\uff0c\u6211\u4eec\u5c31\u5f97\u5230\u4e86\u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684 student_program \uff0c\u540e\u9762\u5c31\u53ef\u4ee5\u4f7f\u7528\u4e00\u4e2a\u666e\u901aprogram\u4e00\u6837\u5bf9\u5176\u5f00\u59cb\u8bad\u7ec3\u548c\u8bc4\u4f30\u3002","title":"5.\u6dfb\u52a0\u84b8\u998floss"},{"location":"tutorials/nas_demo/","text":"\u7f51\u7edc\u7ed3\u6784\u641c\u7d22\u793a\u4f8b # \u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u7f51\u7edc\u7ed3\u6784\u641c\u7d22\u63a5\u53e3\uff0c\u641c\u7d22\u5230\u4e00\u4e2a\u66f4\u5c0f\u6216\u8005\u7cbe\u5ea6\u66f4\u9ad8\u7684\u6a21\u578b\uff0c\u8be5\u6587\u6863\u4ec5\u4ecb\u7ecdpaddleslim\u4e2dSANAS\u7684\u4f7f\u7528\u53ca\u5982\u4f55\u5229\u7528SANAS\u5f97\u5230\u6a21\u578b\u7ed3\u6784\uff0c\u5b8c\u6574\u793a\u4f8b\u4ee3\u7801\u8bf7\u53c2\u8003sa_nas_mobilenetv2.py\u6216\u8005block_sa_nas_mobilenetv2.py\u3002 \u63a5\u53e3\u4ecb\u7ecd # \u8bf7\u53c2\u8003\u3002 1. \u914d\u7f6e\u641c\u7d22\u7a7a\u95f4 # \u8be6\u7ec6\u7684\u641c\u7d22\u7a7a\u95f4\u914d\u7f6e\u53ef\u4ee5\u53c2\u8003 \u795e\u7ecf\u7f51\u7edc\u641c\u7d22API\u6587\u6863 \u3002 1 config = [( 'MobileNetV2Space' )] 2. \u5229\u7528\u641c\u7d22\u7a7a\u95f4\u521d\u59cb\u5316SANAS\u5b9e\u4f8b # 1 2 3 4 5 6 7 8 9 from paddleslim.nas import SANAS sa_nas = SANAS ( config , server_addr = ( \"\" , 8881 ), init_temperature = 10.24 , reduce_rate = 0.85 , search_steps = 300 , is_server = True ) 3. \u6839\u636e\u5b9e\u4f8b\u5316\u7684NAS\u5f97\u5230\u5f53\u524d\u7684\u7f51\u7edc\u7ed3\u6784 # 1 archs = sa_nas . next_archs () 4. \u6839\u636e\u5f97\u5230\u7684\u7f51\u7edc\u7ed3\u6784\u548c\u8f93\u5165\u6784\u9020\u8bad\u7ec3\u548c\u6d4b\u8bd5program # 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid train_program = fluid . Program () test_program = fluid . Program () startup_program = fluid . Program () with fluid . program_guard ( train_program , startup_program ): data = fluid . data ( name = 'data' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) for arch in archs : data = arch ( data ) output = fluid . layers . fc ( data , 10 ) softmax_out = fluid . layers . softmax ( input = output , use_cudnn = False ) cost = fluid . layers . cross_entropy ( input = softmax_out , label = label ) avg_cost = fluid . layers . mean ( cost ) acc_top1 = fluid . layers . accuracy ( input = softmax_out , label = label , k = 1 ) test_program = train_program . clone ( for_test = True ) sgd = fluid . optimizer . SGD ( learning_rate = 1e-3 ) sgd . minimize ( avg_cost ) 5. \u6839\u636e\u6784\u9020\u7684\u8bad\u7ec3program\u6dfb\u52a0\u9650\u5236\u6761\u4ef6 # 1 2 3 4 from paddleslim.analysis import flops if flops ( train_program ) > 321208544 : continue 6. \u56de\u4f20score # 1 sa_nas . reward ( score )","title":"SA\u641c\u7d22"},{"location":"tutorials/nas_demo/#_1","text":"\u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u7f51\u7edc\u7ed3\u6784\u641c\u7d22\u63a5\u53e3\uff0c\u641c\u7d22\u5230\u4e00\u4e2a\u66f4\u5c0f\u6216\u8005\u7cbe\u5ea6\u66f4\u9ad8\u7684\u6a21\u578b\uff0c\u8be5\u6587\u6863\u4ec5\u4ecb\u7ecdpaddleslim\u4e2dSANAS\u7684\u4f7f\u7528\u53ca\u5982\u4f55\u5229\u7528SANAS\u5f97\u5230\u6a21\u578b\u7ed3\u6784\uff0c\u5b8c\u6574\u793a\u4f8b\u4ee3\u7801\u8bf7\u53c2\u8003sa_nas_mobilenetv2.py\u6216\u8005block_sa_nas_mobilenetv2.py\u3002","title":"\u7f51\u7edc\u7ed3\u6784\u641c\u7d22\u793a\u4f8b"},{"location":"tutorials/nas_demo/#_2","text":"\u8bf7\u53c2\u8003\u3002","title":"\u63a5\u53e3\u4ecb\u7ecd"},{"location":"tutorials/nas_demo/#1","text":"\u8be6\u7ec6\u7684\u641c\u7d22\u7a7a\u95f4\u914d\u7f6e\u53ef\u4ee5\u53c2\u8003 \u795e\u7ecf\u7f51\u7edc\u641c\u7d22API\u6587\u6863 \u3002 1 config = [( 'MobileNetV2Space' )]","title":"1. \u914d\u7f6e\u641c\u7d22\u7a7a\u95f4"},{"location":"tutorials/nas_demo/#2-sanas","text":"1 2 3 4 5 6 7 8 9 from paddleslim.nas import SANAS sa_nas = SANAS ( config , server_addr = ( \"\" , 8881 ), init_temperature = 10.24 , reduce_rate = 0.85 , search_steps = 300 , is_server = True )","title":"2. \u5229\u7528\u641c\u7d22\u7a7a\u95f4\u521d\u59cb\u5316SANAS\u5b9e\u4f8b"},{"location":"tutorials/nas_demo/#3-nas","text":"1 archs = sa_nas . next_archs ()","title":"3. \u6839\u636e\u5b9e\u4f8b\u5316\u7684NAS\u5f97\u5230\u5f53\u524d\u7684\u7f51\u7edc\u7ed3\u6784"},{"location":"tutorials/nas_demo/#4-program","text":"1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid train_program = fluid . Program () test_program = fluid . Program () startup_program = fluid . Program () with fluid . program_guard ( train_program , startup_program ): data = fluid . data ( name = 'data' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) for arch in archs : data = arch ( data ) output = fluid . layers . fc ( data , 10 ) softmax_out = fluid . layers . softmax ( input = output , use_cudnn = False ) cost = fluid . layers . cross_entropy ( input = softmax_out , label = label ) avg_cost = fluid . layers . mean ( cost ) acc_top1 = fluid . layers . accuracy ( input = softmax_out , label = label , k = 1 ) test_program = train_program . clone ( for_test = True ) sgd = fluid . optimizer . SGD ( learning_rate = 1e-3 ) sgd . minimize ( avg_cost )","title":"4. \u6839\u636e\u5f97\u5230\u7684\u7f51\u7edc\u7ed3\u6784\u548c\u8f93\u5165\u6784\u9020\u8bad\u7ec3\u548c\u6d4b\u8bd5program"},{"location":"tutorials/nas_demo/#5-program","text":"1 2 3 4 from paddleslim.analysis import flops if flops ( train_program ) > 321208544 : continue","title":"5. \u6839\u636e\u6784\u9020\u7684\u8bad\u7ec3program\u6dfb\u52a0\u9650\u5236\u6761\u4ef6"},{"location":"tutorials/nas_demo/#6-score","text":"1 sa_nas . reward ( score )","title":"6. \u56de\u4f20score"},{"location":"tutorials/quant_aware_demo/","text":"\u5728\u7ebf\u91cf\u5316\u793a\u4f8b # \u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u5728\u7ebf\u91cf\u5316\u63a5\u53e3\uff0c\u6765\u5bf9\u8bad\u7ec3\u597d\u7684\u5206\u7c7b\u6a21\u578b\u8fdb\u884c\u91cf\u5316, \u53ef\u4ee5\u51cf\u5c11\u6a21\u578b\u7684\u5b58\u50a8\u7a7a\u95f4\u548c\u663e\u5b58\u5360\u7528\u3002 \u63a5\u53e3\u4ecb\u7ecd # \u8bf7\u53c2\u8003 \u91cf\u5316API\u6587\u6863 \u3002 \u5206\u7c7b\u6a21\u578b\u7684\u79bb\u7ebf\u91cf\u5316\u6d41\u7a0b # 1. \u914d\u7f6e\u91cf\u5316\u53c2\u6570 # 1 2 3 4 5 6 7 8 9 10 11 12 quant_config = { 'weight_quantize_type' : 'abs_max' , 'activation_quantize_type' : 'moving_average_abs_max' , 'weight_bits' : 8 , 'activation_bits' : 8 , 'not_quant_pattern' : [ 'skip_quant' ], 'quantize_op_types' : [ 'conv2d' , 'depthwise_conv2d' , 'mul' ], 'dtype' : 'int8' , 'window_size' : 10000 , 'moving_rate' : 0 . 9 , 'quant_weight_only' : False } 2. \u5bf9\u8bad\u7ec3\u548c\u6d4b\u8bd5program\u63d2\u5165\u53ef\u8bad\u7ec3\u91cf\u5316op # 1 2 3 val_program = quant_aware ( val_program , place , quant_config , scope = None , for_test = True ) compiled_train_prog = quant_aware ( train_prog , place , quant_config , scope = None , for_test = False ) 3.\u5173\u6389\u6307\u5b9abuild\u7b56\u7565 # 1 2 3 4 5 6 7 8 build_strategy = fluid . BuildStrategy () build_strategy . fuse_all_reduce_ops = False build_strategy . sync_batch_norm = False exec_strategy = fluid . ExecutionStrategy () compiled_train_prog = compiled_train_prog . with_data_parallel ( loss_name = avg_cost . name , build_strategy = build_strategy , exec_strategy = exec_strategy ) 4. freeze program # 1 2 3 4 5 float_program , int8_program = convert ( val_program , place , quant_config , scope = None , save_int8 = True ) 5.\u4fdd\u5b58\u9884\u6d4b\u6a21\u578b # 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 fluid . io . save_inference_model ( dirname = float_path , feeded_var_names = [ image . name ], target_vars = [ out ], executor = exe , main_program = float_program , model_filename = float_path + ' /model ' , params_filename = float_path + ' /params ' ) fluid . io . save_inference_model ( dirname = int8_path , feeded_var_names = [ image . name ], target_vars = [ out ], executor = exe , main_program = int8_program , model_filename = int8_path + ' /model ' , params_filename = int8_path + ' /params ' )","title":"\u91cf\u5316\u8bad\u7ec3"},{"location":"tutorials/quant_aware_demo/#_1","text":"\u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u5728\u7ebf\u91cf\u5316\u63a5\u53e3\uff0c\u6765\u5bf9\u8bad\u7ec3\u597d\u7684\u5206\u7c7b\u6a21\u578b\u8fdb\u884c\u91cf\u5316, \u53ef\u4ee5\u51cf\u5c11\u6a21\u578b\u7684\u5b58\u50a8\u7a7a\u95f4\u548c\u663e\u5b58\u5360\u7528\u3002","title":"\u5728\u7ebf\u91cf\u5316\u793a\u4f8b"},{"location":"tutorials/quant_aware_demo/#_2","text":"\u8bf7\u53c2\u8003 \u91cf\u5316API\u6587\u6863 \u3002","title":"\u63a5\u53e3\u4ecb\u7ecd"},{"location":"tutorials/quant_aware_demo/#_3","text":"","title":"\u5206\u7c7b\u6a21\u578b\u7684\u79bb\u7ebf\u91cf\u5316\u6d41\u7a0b"},{"location":"tutorials/quant_aware_demo/#1","text":"1 2 3 4 5 6 7 8 9 10 11 12 quant_config = { 'weight_quantize_type' : 'abs_max' , 'activation_quantize_type' : 'moving_average_abs_max' , 'weight_bits' : 8 , 'activation_bits' : 8 , 'not_quant_pattern' : [ 'skip_quant' ], 'quantize_op_types' : [ 'conv2d' , 'depthwise_conv2d' , 'mul' ], 'dtype' : 'int8' , 'window_size' : 10000 , 'moving_rate' : 0 . 9 , 'quant_weight_only' : False }","title":"1. \u914d\u7f6e\u91cf\u5316\u53c2\u6570"},{"location":"tutorials/quant_aware_demo/#2-programop","text":"1 2 3 val_program = quant_aware ( val_program , place , quant_config , scope = None , for_test = True ) compiled_train_prog = quant_aware ( train_prog , place , quant_config , scope = None , for_test = False )","title":"2. \u5bf9\u8bad\u7ec3\u548c\u6d4b\u8bd5program\u63d2\u5165\u53ef\u8bad\u7ec3\u91cf\u5316op"},{"location":"tutorials/quant_aware_demo/#3build","text":"1 2 3 4 5 6 7 8 build_strategy = fluid . BuildStrategy () build_strategy . fuse_all_reduce_ops = False build_strategy . sync_batch_norm = False exec_strategy = fluid . ExecutionStrategy () compiled_train_prog = compiled_train_prog . with_data_parallel ( loss_name = avg_cost . name , build_strategy = build_strategy , exec_strategy = exec_strategy )","title":"3.\u5173\u6389\u6307\u5b9abuild\u7b56\u7565"},{"location":"tutorials/quant_aware_demo/#4-freeze-program","text":"1 2 3 4 5 float_program , int8_program = convert ( val_program , place , quant_config , scope = None , save_int8 = True )","title":"4. freeze program"},{"location":"tutorials/quant_aware_demo/#5","text":"1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 fluid . io . save_inference_model ( dirname = float_path , feeded_var_names = [ image . name ], target_vars = [ out ], executor = exe , main_program = float_program , model_filename = float_path + ' /model ' , params_filename = float_path + ' /params ' ) fluid . io . save_inference_model ( dirname = int8_path , feeded_var_names = [ image . name ], target_vars = [ out ], executor = exe , main_program = int8_program , model_filename = int8_path + ' /model ' , params_filename = int8_path + ' /params ' )","title":"5.\u4fdd\u5b58\u9884\u6d4b\u6a21\u578b"},{"location":"tutorials/quant_embedding_demo/","text":"Embedding\u91cf\u5316\u793a\u4f8b # \u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528Embedding\u91cf\u5316\u7684\u63a5\u53e3 paddleslim.quant.quant_embedding \u3002 quant_embedding \u63a5\u53e3\u5c06\u7f51\u7edc\u4e2d\u7684Embedding\u53c2\u6570\u4ece float32 \u7c7b\u578b\u91cf\u5316\u5230 8-bit \u6574\u6570\u7c7b\u578b\uff0c\u5728\u51e0\u4e4e\u4e0d\u635f\u5931\u6a21\u578b\u7cbe\u5ea6\u7684\u60c5\u51b5\u4e0b\u51cf\u5c11\u6a21\u578b\u7684\u5b58\u50a8\u7a7a\u95f4\u548c\u663e\u5b58\u5360\u7528\u3002 \u63a5\u53e3\u4ecb\u7ecd\u8bf7\u53c2\u8003 \u91cf\u5316API\u6587\u6863 \u3002 \u8be5\u63a5\u53e3\u5bf9program\u7684\u4fee\u6539\uff1a \u91cf\u5316\u524d: \u56fe1\uff1a\u91cf\u5316\u524d\u7684\u6a21\u578b\u7ed3\u6784 \u91cf\u5316\u540e\uff1a \u56fe2: \u91cf\u5316\u540e\u7684\u6a21\u578b\u7ed3\u6784 \u4ee5\u4e0b\u5c06\u4ee5 \u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b \u4e3a\u4f8b\u6765\u8bf4\u660e\u5982\u4f55\u4f7f\u7528 quant_embedding \u63a5\u53e3\u3002\u9996\u5148\u4ecb\u7ecd \u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b \u7684\u6b63\u5e38\u8bad\u7ec3\u548c\u6d4b\u8bd5\u6d41\u7a0b\u3002 \u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b # \u4ee5\u4e0b\u662f\u672c\u4f8b\u7684\u7b80\u8981\u76ee\u5f55\u7ed3\u6784\u53ca\u8bf4\u660e\uff1a 1 2 3 4 5 6 7 8 9 10 . \u251c\u2500\u2500 cluster_train.py # \u5206\u5e03\u5f0f\u8bad\u7ec3\u51fd\u6570 \u251c\u2500\u2500 cluster_train.sh # \u672c\u5730\u6a21\u62df\u591a\u673a\u811a\u672c \u251c\u2500\u2500 train.py # \u8bad\u7ec3\u51fd\u6570 \u251c\u2500\u2500 infer.py # \u9884\u6d4b\u811a\u672c \u251c\u2500\u2500 net.py # \u7f51\u7edc\u7ed3\u6784 \u251c\u2500\u2500 preprocess.py # \u9884\u5904\u7406\u811a\u672c\uff0c\u5305\u62ec\u6784\u5efa\u8bcd\u5178\u548c\u9884\u5904\u7406\u6587\u672c \u251c\u2500\u2500 reader.py # \u8bad\u7ec3\u9636\u6bb5\u7684\u6587\u672c\u8bfb\u5199 \u251c\u2500\u2500 train.py # \u8bad\u7ec3\u51fd\u6570 \u2514\u2500\u2500 utils.py # \u901a\u7528\u51fd\u6570 \u4ecb\u7ecd # \u672c\u4f8b\u5b9e\u73b0\u4e86skip-gram\u6a21\u5f0f\u7684word2vector\u6a21\u578b\u3002 \u540c\u65f6\u63a8\u8350\u7528\u6237\u53c2\u8003 IPython Notebook demo \u6570\u636e\u4e0b\u8f7d # \u5168\u91cf\u6570\u636e\u96c6\u4f7f\u7528\u7684\u662f\u6765\u81ea1 Billion Word Language Model Benchmark\u7684( http://www.statmt.org/lm-benchmark ) \u7684\u6570\u636e\u96c6. 1 2 3 4 mkdir data wget http://www.statmt.org/lm-benchmark/1-billion-word-language-modeling-benchmark-r13output.tar.gz tar xzvf 1 -billion-word-language-modeling-benchmark-r13output.tar.gz mv 1 -billion-word-language-modeling-benchmark-r13output/training-monolingual.tokenized.shuffled/ data/ \u5907\u7528\u6570\u636e\u5730\u5740\u4e0b\u8f7d\u547d\u4ee4\u5982\u4e0b 1 2 3 4 mkdir data wget https://paddlerec.bj.bcebos.com/word2vec/1-billion-word-language-modeling-benchmark-r13output.tar tar xvf 1 -billion-word-language-modeling-benchmark-r13output.tar mv 1 -billion-word-language-modeling-benchmark-r13output/training-monolingual.tokenized.shuffled/ data/ \u4e3a\u4e86\u65b9\u4fbf\u5feb\u901f\u9a8c\u8bc1\uff0c\u6211\u4eec\u4e5f\u63d0\u4f9b\u4e86\u7ecf\u5178\u7684text8\u6837\u4f8b\u6570\u636e\u96c6\uff0c\u5305\u542b1700w\u4e2a\u8bcd\u3002 \u4e0b\u8f7d\u547d\u4ee4\u5982\u4e0b 1 2 3 4 mkdir data wget https://paddlerec.bj.bcebos.com/word2vec/text.tar tar xvf text.tar mv text data/ \u6570\u636e\u9884\u5904\u7406 # \u4ee5\u6837\u4f8b\u6570\u636e\u96c6\u4e3a\u4f8b\u8fdb\u884c\u9884\u5904\u7406\u3002\u5168\u91cf\u6570\u636e\u96c6\u6ce8\u610f\u89e3\u538b\u540e\u4ee5training-monolingual.tokenized.shuffled \u76ee\u5f55\u4e3a\u9884\u5904\u7406\u76ee\u5f55\uff0c\u548c\u6837\u4f8b\u6570\u636e\u96c6\u7684text\u76ee\u5f55\u5e76\u5217\u3002 \u8bcd\u5178\u683c\u5f0f: \u8bcd<\u7a7a\u683c>\u8bcd\u9891\u3002\u6ce8\u610f\u4f4e\u9891\u8bcd\u7528'UNK'\u8868\u793a \u53ef\u4ee5\u6309\u683c\u5f0f\u81ea\u5efa\u8bcd\u5178\uff0c\u5982\u679c\u81ea\u5efa\u8bcd\u5178\u8df3\u8fc7\u7b2c\u4e00\u6b65\u3002 1 2 3 4 5 6 7 8 9 10 the 1061396 of 593677 and 416629 one 411764 in 372201 a 325873 < UNK > 324608 to 316376 zero 264975 nine 250430 \u7b2c\u4e00\u6b65\u6839\u636e\u82f1\u6587\u8bed\u6599\u751f\u6210\u8bcd\u5178\uff0c\u4e2d\u6587\u8bed\u6599\u53ef\u4ee5\u901a\u8fc7\u4fee\u6539text_strip\u65b9\u6cd5\u81ea\u5b9a\u4e49\u5904\u7406\u65b9\u6cd5\u3002 1 python preprocess.py --build_dict --build_dict_corpus_dir data/text/ --dict_path data/test_build_dict \u7b2c\u4e8c\u6b65\u6839\u636e\u8bcd\u5178\u5c06\u6587\u672c\u8f6c\u6210id, \u540c\u65f6\u8fdb\u884cdownsample\uff0c\u6309\u7167\u6982\u7387\u8fc7\u6ee4\u5e38\u89c1\u8bcd, \u540c\u65f6\u751f\u6210word\u548cid\u6620\u5c04\u7684\u6587\u4ef6\uff0c\u6587\u4ef6\u540d\u4e3a\u8bcd\u5178+\" word_to_id \"\u3002 1 python preprocess.py --filter_corpus --dict_path data/test_build_dict --input_corpus_dir data/text --output_corpus_dir data/convert_text8 --min_count 5 --downsample 0 .001 \u8bad\u7ec3 # \u5177\u4f53\u7684\u53c2\u6570\u914d\u7f6e\u53ef\u8fd0\u884c 1 python train.py -h \u5355\u673a\u591a\u7ebf\u7a0b\u8bad\u7ec3 1 OPENBLAS_NUM_THREADS = 1 CPU_NUM = 5 python train.py --train_data_dir data/convert_text8 --dict_path data/test_build_dict --num_passes 10 --batch_size 100 --model_output_dir v1_cpu5_b100_lr1dir --base_lr 1 .0 --print_batch 1000 --with_speed --is_sparse \u672c\u5730\u5355\u673a\u6a21\u62df\u591a\u673a\u8bad\u7ec3 1 sh cluster_train.sh \u672c\u793a\u4f8b\u4e2d\u6309\u7167\u5355\u673a\u591a\u7ebf\u7a0b\u8bad\u7ec3\u7684\u547d\u4ee4\u8fdb\u884c\u8bad\u7ec3\uff0c\u8bad\u7ec3\u5b8c\u6bd5\u540e\uff0c\u53ef\u770b\u5230\u5728\u5f53\u524d\u6587\u4ef6\u5939\u4e0b\u4fdd\u5b58\u6a21\u578b\u7684\u8def\u5f84\u4e3a: v1_cpu5_b100_lr1dir , \u8fd0\u884c ls v1_cpu5_b100_lr1dir \u53ef\u770b\u5230\u8be5\u6587\u4ef6\u5939\u4e0b\u4fdd\u5b58\u4e86\u8bad\u7ec3\u768410\u4e2aepoch\u7684\u6a21\u578b\u6587\u4ef6\u3002 1 pass - 0 pass - 1 pass - 2 pass - 3 pass - 4 pass - 5 pass - 6 pass - 7 pass - 8 pass - 9 \u9884\u6d4b # \u6d4b\u8bd5\u96c6\u4e0b\u8f7d\u547d\u4ee4\u5982\u4e0b 1 2 3 4 #\u5168\u91cf\u6570\u636e\u96c6\u6d4b\u8bd5\u96c6 wget https://paddlerec.bj.bcebos.com/word2vec/test_dir.tar #\u6837\u672c\u6570\u636e\u96c6\u6d4b\u8bd5\u96c6 wget https://paddlerec.bj.bcebos.com/word2vec/test_mid_dir.tar \u9884\u6d4b\u547d\u4ee4\uff0c\u6ce8\u610f\u8bcd\u5178\u540d\u79f0\u9700\u8981\u52a0\u540e\u7f00\" word_to_id \", \u6b64\u6587\u4ef6\u662f\u9884\u5904\u7406\u9636\u6bb5\u751f\u6210\u7684\u3002 1 python infer.py --infer_epoch --test_dir data/test_mid_dir --dict_path data/test_build_dict_word_to_id_ --batch_size 20000 --model_dir v1_cpu5_b100_lr1dir/ --start_index 0 --last_index 9 \u8fd0\u884c\u8be5\u9884\u6d4b\u547d\u4ee4, \u53ef\u770b\u5230\u5982\u4e0b\u8f93\u51fa 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 ( 'start index: ' , 0 , ' last_index:' , 9 ) ( 'vocab_size:' , 63642 ) step : 1 249 epoch : 0 acc : 0 . 014 step : 1 590 epoch : 1 acc : 0 . 033 step : 1 982 epoch : 2 acc : 0 . 055 step : 1 1338 epoch : 3 acc : 0 . 075 step : 1 1653 epoch : 4 acc : 0 . 093 step : 1 1914 epoch : 5 acc : 0 . 107 step : 1 2204 epoch : 6 acc : 0 . 124 step : 1 2416 epoch : 7 acc : 0 . 136 step : 1 2606 epoch : 8 acc : 0 . 146 step : 1 2722 epoch : 9 acc : 0 . 153 \u91cf\u5316 \u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b # \u91cf\u5316\u914d\u7f6e\u4e3a: 1 2 3 4 config = { 'params_name' : 'emb' , 'quantize_type' : 'abs_max' } \u8fd0\u884c\u547d\u4ee4\u4e3a\uff1a 1 python infer.py --infer_epoch --test_dir data/test_mid_dir --dict_path data/test_build_dict_word_to_id_ --batch_size 20000 --model_dir v1_cpu5_b100_lr1dir/ --start_index 0 --last_index 9 --emb_quant True \u8fd0\u884c\u8f93\u51fa\u4e3a: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 ( 'start index: ' , 0 , ' last_index:' , 9 ) ( 'vocab_size:' , 63642 ) quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 253 epoch : 0 acc : 0 . 014 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 586 epoch : 1 acc : 0 . 033 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 970 epoch : 2 acc : 0 . 054 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 1364 epoch : 3 acc : 0 . 077 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 1642 epoch : 4 acc : 0 . 092 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 1936 epoch : 5 acc : 0 . 109 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2216 epoch : 6 acc : 0 . 124 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2419 epoch : 7 acc : 0 . 136 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2603 epoch : 8 acc : 0 . 146 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2719 epoch : 9 acc : 0 . 153 \u91cf\u5316\u540e\u7684\u6a21\u578b\u4fdd\u5b58\u5728 ./output_quant \u4e2d\uff0c\u53ef\u770b\u5230\u91cf\u5316\u540e\u7684\u53c2\u6570 'emb.int8' \u7684\u5927\u5c0f\u4e3a3.9M, \u5728 ./v1_cpu5_b100_lr1dir \u4e2d\u53ef\u770b\u5230\u91cf\u5316\u524d\u7684\u53c2\u6570 'emb' \u7684\u5927\u5c0f\u4e3a16M\u3002","title":"Embedding\u91cf\u5316"},{"location":"tutorials/quant_embedding_demo/#embedding","text":"\u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528Embedding\u91cf\u5316\u7684\u63a5\u53e3 paddleslim.quant.quant_embedding \u3002 quant_embedding \u63a5\u53e3\u5c06\u7f51\u7edc\u4e2d\u7684Embedding\u53c2\u6570\u4ece float32 \u7c7b\u578b\u91cf\u5316\u5230 8-bit \u6574\u6570\u7c7b\u578b\uff0c\u5728\u51e0\u4e4e\u4e0d\u635f\u5931\u6a21\u578b\u7cbe\u5ea6\u7684\u60c5\u51b5\u4e0b\u51cf\u5c11\u6a21\u578b\u7684\u5b58\u50a8\u7a7a\u95f4\u548c\u663e\u5b58\u5360\u7528\u3002 \u63a5\u53e3\u4ecb\u7ecd\u8bf7\u53c2\u8003 \u91cf\u5316API\u6587\u6863 \u3002 \u8be5\u63a5\u53e3\u5bf9program\u7684\u4fee\u6539\uff1a \u91cf\u5316\u524d: \u56fe1\uff1a\u91cf\u5316\u524d\u7684\u6a21\u578b\u7ed3\u6784 \u91cf\u5316\u540e\uff1a \u56fe2: \u91cf\u5316\u540e\u7684\u6a21\u578b\u7ed3\u6784 \u4ee5\u4e0b\u5c06\u4ee5 \u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b \u4e3a\u4f8b\u6765\u8bf4\u660e\u5982\u4f55\u4f7f\u7528 quant_embedding \u63a5\u53e3\u3002\u9996\u5148\u4ecb\u7ecd \u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b \u7684\u6b63\u5e38\u8bad\u7ec3\u548c\u6d4b\u8bd5\u6d41\u7a0b\u3002","title":"Embedding\u91cf\u5316\u793a\u4f8b"},{"location":"tutorials/quant_embedding_demo/#skip-gramword2vector","text":"\u4ee5\u4e0b\u662f\u672c\u4f8b\u7684\u7b80\u8981\u76ee\u5f55\u7ed3\u6784\u53ca\u8bf4\u660e\uff1a 1 2 3 4 5 6 7 8 9 10 . \u251c\u2500\u2500 cluster_train.py # \u5206\u5e03\u5f0f\u8bad\u7ec3\u51fd\u6570 \u251c\u2500\u2500 cluster_train.sh # \u672c\u5730\u6a21\u62df\u591a\u673a\u811a\u672c \u251c\u2500\u2500 train.py # \u8bad\u7ec3\u51fd\u6570 \u251c\u2500\u2500 infer.py # \u9884\u6d4b\u811a\u672c \u251c\u2500\u2500 net.py # \u7f51\u7edc\u7ed3\u6784 \u251c\u2500\u2500 preprocess.py # \u9884\u5904\u7406\u811a\u672c\uff0c\u5305\u62ec\u6784\u5efa\u8bcd\u5178\u548c\u9884\u5904\u7406\u6587\u672c \u251c\u2500\u2500 reader.py # \u8bad\u7ec3\u9636\u6bb5\u7684\u6587\u672c\u8bfb\u5199 \u251c\u2500\u2500 train.py # \u8bad\u7ec3\u51fd\u6570 \u2514\u2500\u2500 utils.py # \u901a\u7528\u51fd\u6570","title":"\u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b"},{"location":"tutorials/quant_embedding_demo/#_1","text":"\u672c\u4f8b\u5b9e\u73b0\u4e86skip-gram\u6a21\u5f0f\u7684word2vector\u6a21\u578b\u3002 \u540c\u65f6\u63a8\u8350\u7528\u6237\u53c2\u8003 IPython Notebook demo","title":"\u4ecb\u7ecd"},{"location":"tutorials/quant_embedding_demo/#_2","text":"\u5168\u91cf\u6570\u636e\u96c6\u4f7f\u7528\u7684\u662f\u6765\u81ea1 Billion Word Language Model Benchmark\u7684( http://www.statmt.org/lm-benchmark ) \u7684\u6570\u636e\u96c6. 1 2 3 4 mkdir data wget http://www.statmt.org/lm-benchmark/1-billion-word-language-modeling-benchmark-r13output.tar.gz tar xzvf 1 -billion-word-language-modeling-benchmark-r13output.tar.gz mv 1 -billion-word-language-modeling-benchmark-r13output/training-monolingual.tokenized.shuffled/ data/ \u5907\u7528\u6570\u636e\u5730\u5740\u4e0b\u8f7d\u547d\u4ee4\u5982\u4e0b 1 2 3 4 mkdir data wget https://paddlerec.bj.bcebos.com/word2vec/1-billion-word-language-modeling-benchmark-r13output.tar tar xvf 1 -billion-word-language-modeling-benchmark-r13output.tar mv 1 -billion-word-language-modeling-benchmark-r13output/training-monolingual.tokenized.shuffled/ data/ \u4e3a\u4e86\u65b9\u4fbf\u5feb\u901f\u9a8c\u8bc1\uff0c\u6211\u4eec\u4e5f\u63d0\u4f9b\u4e86\u7ecf\u5178\u7684text8\u6837\u4f8b\u6570\u636e\u96c6\uff0c\u5305\u542b1700w\u4e2a\u8bcd\u3002 \u4e0b\u8f7d\u547d\u4ee4\u5982\u4e0b 1 2 3 4 mkdir data wget https://paddlerec.bj.bcebos.com/word2vec/text.tar tar xvf text.tar mv text data/","title":"\u6570\u636e\u4e0b\u8f7d"},{"location":"tutorials/quant_embedding_demo/#_3","text":"\u4ee5\u6837\u4f8b\u6570\u636e\u96c6\u4e3a\u4f8b\u8fdb\u884c\u9884\u5904\u7406\u3002\u5168\u91cf\u6570\u636e\u96c6\u6ce8\u610f\u89e3\u538b\u540e\u4ee5training-monolingual.tokenized.shuffled \u76ee\u5f55\u4e3a\u9884\u5904\u7406\u76ee\u5f55\uff0c\u548c\u6837\u4f8b\u6570\u636e\u96c6\u7684text\u76ee\u5f55\u5e76\u5217\u3002 \u8bcd\u5178\u683c\u5f0f: \u8bcd<\u7a7a\u683c>\u8bcd\u9891\u3002\u6ce8\u610f\u4f4e\u9891\u8bcd\u7528'UNK'\u8868\u793a \u53ef\u4ee5\u6309\u683c\u5f0f\u81ea\u5efa\u8bcd\u5178\uff0c\u5982\u679c\u81ea\u5efa\u8bcd\u5178\u8df3\u8fc7\u7b2c\u4e00\u6b65\u3002 1 2 3 4 5 6 7 8 9 10 the 1061396 of 593677 and 416629 one 411764 in 372201 a 325873 < UNK > 324608 to 316376 zero 264975 nine 250430 \u7b2c\u4e00\u6b65\u6839\u636e\u82f1\u6587\u8bed\u6599\u751f\u6210\u8bcd\u5178\uff0c\u4e2d\u6587\u8bed\u6599\u53ef\u4ee5\u901a\u8fc7\u4fee\u6539text_strip\u65b9\u6cd5\u81ea\u5b9a\u4e49\u5904\u7406\u65b9\u6cd5\u3002 1 python preprocess.py --build_dict --build_dict_corpus_dir data/text/ --dict_path data/test_build_dict \u7b2c\u4e8c\u6b65\u6839\u636e\u8bcd\u5178\u5c06\u6587\u672c\u8f6c\u6210id, \u540c\u65f6\u8fdb\u884cdownsample\uff0c\u6309\u7167\u6982\u7387\u8fc7\u6ee4\u5e38\u89c1\u8bcd, \u540c\u65f6\u751f\u6210word\u548cid\u6620\u5c04\u7684\u6587\u4ef6\uff0c\u6587\u4ef6\u540d\u4e3a\u8bcd\u5178+\" word_to_id \"\u3002 1 python preprocess.py --filter_corpus --dict_path data/test_build_dict --input_corpus_dir data/text --output_corpus_dir data/convert_text8 --min_count 5 --downsample 0 .001","title":"\u6570\u636e\u9884\u5904\u7406"},{"location":"tutorials/quant_embedding_demo/#_4","text":"\u5177\u4f53\u7684\u53c2\u6570\u914d\u7f6e\u53ef\u8fd0\u884c 1 python train.py -h \u5355\u673a\u591a\u7ebf\u7a0b\u8bad\u7ec3 1 OPENBLAS_NUM_THREADS = 1 CPU_NUM = 5 python train.py --train_data_dir data/convert_text8 --dict_path data/test_build_dict --num_passes 10 --batch_size 100 --model_output_dir v1_cpu5_b100_lr1dir --base_lr 1 .0 --print_batch 1000 --with_speed --is_sparse \u672c\u5730\u5355\u673a\u6a21\u62df\u591a\u673a\u8bad\u7ec3 1 sh cluster_train.sh \u672c\u793a\u4f8b\u4e2d\u6309\u7167\u5355\u673a\u591a\u7ebf\u7a0b\u8bad\u7ec3\u7684\u547d\u4ee4\u8fdb\u884c\u8bad\u7ec3\uff0c\u8bad\u7ec3\u5b8c\u6bd5\u540e\uff0c\u53ef\u770b\u5230\u5728\u5f53\u524d\u6587\u4ef6\u5939\u4e0b\u4fdd\u5b58\u6a21\u578b\u7684\u8def\u5f84\u4e3a: v1_cpu5_b100_lr1dir , \u8fd0\u884c ls v1_cpu5_b100_lr1dir \u53ef\u770b\u5230\u8be5\u6587\u4ef6\u5939\u4e0b\u4fdd\u5b58\u4e86\u8bad\u7ec3\u768410\u4e2aepoch\u7684\u6a21\u578b\u6587\u4ef6\u3002 1 pass - 0 pass - 1 pass - 2 pass - 3 pass - 4 pass - 5 pass - 6 pass - 7 pass - 8 pass - 9","title":"\u8bad\u7ec3"},{"location":"tutorials/quant_embedding_demo/#_5","text":"\u6d4b\u8bd5\u96c6\u4e0b\u8f7d\u547d\u4ee4\u5982\u4e0b 1 2 3 4 #\u5168\u91cf\u6570\u636e\u96c6\u6d4b\u8bd5\u96c6 wget https://paddlerec.bj.bcebos.com/word2vec/test_dir.tar #\u6837\u672c\u6570\u636e\u96c6\u6d4b\u8bd5\u96c6 wget https://paddlerec.bj.bcebos.com/word2vec/test_mid_dir.tar \u9884\u6d4b\u547d\u4ee4\uff0c\u6ce8\u610f\u8bcd\u5178\u540d\u79f0\u9700\u8981\u52a0\u540e\u7f00\" word_to_id \", \u6b64\u6587\u4ef6\u662f\u9884\u5904\u7406\u9636\u6bb5\u751f\u6210\u7684\u3002 1 python infer.py --infer_epoch --test_dir data/test_mid_dir --dict_path data/test_build_dict_word_to_id_ --batch_size 20000 --model_dir v1_cpu5_b100_lr1dir/ --start_index 0 --last_index 9 \u8fd0\u884c\u8be5\u9884\u6d4b\u547d\u4ee4, \u53ef\u770b\u5230\u5982\u4e0b\u8f93\u51fa 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 ( 'start index: ' , 0 , ' last_index:' , 9 ) ( 'vocab_size:' , 63642 ) step : 1 249 epoch : 0 acc : 0 . 014 step : 1 590 epoch : 1 acc : 0 . 033 step : 1 982 epoch : 2 acc : 0 . 055 step : 1 1338 epoch : 3 acc : 0 . 075 step : 1 1653 epoch : 4 acc : 0 . 093 step : 1 1914 epoch : 5 acc : 0 . 107 step : 1 2204 epoch : 6 acc : 0 . 124 step : 1 2416 epoch : 7 acc : 0 . 136 step : 1 2606 epoch : 8 acc : 0 . 146 step : 1 2722 epoch : 9 acc : 0 . 153","title":"\u9884\u6d4b"},{"location":"tutorials/quant_embedding_demo/#skip-gramword2vector_1","text":"\u91cf\u5316\u914d\u7f6e\u4e3a: 1 2 3 4 config = { 'params_name' : 'emb' , 'quantize_type' : 'abs_max' } \u8fd0\u884c\u547d\u4ee4\u4e3a\uff1a 1 python infer.py --infer_epoch --test_dir data/test_mid_dir --dict_path data/test_build_dict_word_to_id_ --batch_size 20000 --model_dir v1_cpu5_b100_lr1dir/ --start_index 0 --last_index 9 --emb_quant True \u8fd0\u884c\u8f93\u51fa\u4e3a: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 ( 'start index: ' , 0 , ' last_index:' , 9 ) ( 'vocab_size:' , 63642 ) quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 253 epoch : 0 acc : 0 . 014 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 586 epoch : 1 acc : 0 . 033 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 970 epoch : 2 acc : 0 . 054 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 1364 epoch : 3 acc : 0 . 077 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 1642 epoch : 4 acc : 0 . 092 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 1936 epoch : 5 acc : 0 . 109 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2216 epoch : 6 acc : 0 . 124 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2419 epoch : 7 acc : 0 . 136 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2603 epoch : 8 acc : 0 . 146 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2719 epoch : 9 acc : 0 . 153 \u91cf\u5316\u540e\u7684\u6a21\u578b\u4fdd\u5b58\u5728 ./output_quant \u4e2d\uff0c\u53ef\u770b\u5230\u91cf\u5316\u540e\u7684\u53c2\u6570 'emb.int8' \u7684\u5927\u5c0f\u4e3a3.9M, \u5728 ./v1_cpu5_b100_lr1dir \u4e2d\u53ef\u770b\u5230\u91cf\u5316\u524d\u7684\u53c2\u6570 'emb' \u7684\u5927\u5c0f\u4e3a16M\u3002","title":"\u91cf\u5316\u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b"},{"location":"tutorials/quant_post_demo/","text":"\u79bb\u7ebf\u91cf\u5316\u793a\u4f8b # \u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u79bb\u7ebf\u91cf\u5316\u63a5\u53e3 paddleslim.quant.quant_post \u6765\u5bf9\u8bad\u7ec3\u597d\u7684\u5206\u7c7b\u6a21\u578b\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316, \u8be5\u63a5\u53e3\u65e0\u9700\u5bf9\u6a21\u578b\u8fdb\u884c\u8bad\u7ec3\u5c31\u53ef\u5f97\u5230\u91cf\u5316\u6a21\u578b\uff0c\u51cf\u5c11\u6a21\u578b\u7684\u5b58\u50a8\u7a7a\u95f4\u548c\u663e\u5b58\u5360\u7528\u3002 \u63a5\u53e3\u4ecb\u7ecd # \u8bf7\u53c2\u8003 \u91cf\u5316API\u6587\u6863 \u3002 \u5206\u7c7b\u6a21\u578b\u7684\u79bb\u7ebf\u91cf\u5316\u6d41\u7a0b # \u51c6\u5907\u6570\u636e # \u5728\u5f53\u524d\u6587\u4ef6\u5939\u4e0b\u521b\u5efa data \u6587\u4ef6\u5939\uff0c\u5c06 imagenet \u6570\u636e\u96c6\u89e3\u538b\u5728 data \u6587\u4ef6\u5939\u4e0b\uff0c\u89e3\u538b\u540e data \u6587\u4ef6\u5939\u4e0b\u5e94\u5305\u542b\u4ee5\u4e0b\u6587\u4ef6\uff1a - 'train' \u6587\u4ef6\u5939\uff0c\u8bad\u7ec3\u56fe\u7247 - 'train_list.txt' \u6587\u4ef6 - 'val' \u6587\u4ef6\u5939\uff0c\u9a8c\u8bc1\u56fe\u7247 - 'val_list.txt' \u6587\u4ef6 \u51c6\u5907\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b # \u56e0\u4e3a\u79bb\u7ebf\u91cf\u5316\u63a5\u53e3\u53ea\u652f\u6301\u52a0\u8f7d\u901a\u8fc7 fluid.io.save_inference_model \u63a5\u53e3\u4fdd\u5b58\u7684\u6a21\u578b\uff0c\u56e0\u6b64\u5982\u679c\u60a8\u7684\u6a21\u578b\u662f\u901a\u8fc7\u5176\u4ed6\u63a5\u53e3\u4fdd\u5b58\u7684\uff0c\u90a3\u9700\u8981\u5148\u5c06\u6a21\u578b\u8fdb\u884c\u8f6c\u5316\u3002\u672c\u793a\u4f8b\u5c06\u4ee5\u5206\u7c7b\u6a21\u578b\u4e3a\u4f8b\u8fdb\u884c\u8bf4\u660e\u3002 \u9996\u5148\u5728 imagenet\u5206\u7c7b\u6a21\u578b \u4e2d\u4e0b\u8f7d\u8bad\u7ec3\u597d\u7684 mobilenetv1 \u6a21\u578b\u3002 \u5728\u5f53\u524d\u6587\u4ef6\u5939\u4e0b\u521b\u5efa 'pretrain' \u6587\u4ef6\u5939\uff0c\u5c06 mobilenetv1 \u6a21\u578b\u5728\u8be5\u6587\u4ef6\u5939\u4e0b\u89e3\u538b\uff0c\u89e3\u538b\u540e\u7684\u76ee\u5f55\u4e3a pretrain/MobileNetV1_pretrained \u5bfc\u51fa\u6a21\u578b # \u901a\u8fc7\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u53ef\u5c06\u6a21\u578b\u8f6c\u5316\u4e3a\u79bb\u7ebf\u91cf\u5316\u63a5\u53e3\u53ef\u7528\u7684\u6a21\u578b\uff1a 1 python export_model . py --model \"MobileNet\" --pretrained_model ./pretrain/MobileNetV1_pretrained --data imagenet \u8f6c\u5316\u4e4b\u540e\u7684\u6a21\u578b\u5b58\u50a8\u5728 inference_model/MobileNet/ \u6587\u4ef6\u5939\u4e0b\uff0c\u53ef\u770b\u5230\u8be5\u6587\u4ef6\u5939\u4e0b\u6709 'model' , 'weights' \u4e24\u4e2a\u6587\u4ef6\u3002 \u79bb\u7ebf\u91cf\u5316 # \u63a5\u4e0b\u6765\u5bf9\u5bfc\u51fa\u7684\u6a21\u578b\u6587\u4ef6\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316\uff0c\u79bb\u7ebf\u91cf\u5316\u7684\u811a\u672c\u4e3a quant_post.py \uff0c\u811a\u672c\u4e2d\u4f7f\u7528\u63a5\u53e3 paddleslim.quant.quant_post \u5bf9\u6a21\u578b\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316\u3002\u8fd0\u884c\u547d\u4ee4\u4e3a\uff1a 1 python quant_post . py --model_path ./inference_model/MobileNet --save_path ./quant_model_train/MobileNet --model_filename model --params_filename weights model_path : \u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u5750\u5728\u7684\u6587\u4ef6\u5939 save_path : \u91cf\u5316\u540e\u7684\u6a21\u578b\u4fdd\u5b58\u7684\u8def\u5f84 model_filename : \u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u8bbe\u7f6e\u4e3a\u8be5\u6a21\u578b\u7684\u6a21\u578b\u6587\u4ef6\u540d\u79f0\uff0c\u5982\u679c\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u591a\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u4e0d\u9700\u8981\u8bbe\u7f6e\u3002 params_filename : \u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u8bbe\u7f6e\u4e3a\u8be5\u6a21\u578b\u7684\u53c2\u6570\u6587\u4ef6\u540d\u79f0\uff0c\u5982\u679c\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u591a\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u4e0d\u9700\u8981\u8bbe\u7f6e\u3002 \u8fd0\u884c\u4ee5\u4e0a\u547d\u4ee4\u540e\uff0c\u53ef\u5728 ${save_path} \u4e0b\u770b\u5230\u91cf\u5316\u540e\u7684\u6a21\u578b\u6587\u4ef6\u548c\u53c2\u6570\u6587\u4ef6\u3002 \u4f7f\u7528\u7684\u91cf\u5316\u7b97\u6cd5\u4e3a 'KL' , \u4f7f\u7528\u8bad\u7ec3\u96c6\u4e2d\u7684160\u5f20\u56fe\u7247\u8fdb\u884c\u91cf\u5316\u53c2\u6570\u7684\u6821\u6b63\u3002 \u6d4b\u8bd5\u7cbe\u5ea6 # \u4f7f\u7528 eval.py \u811a\u672c\u5bf9\u91cf\u5316\u524d\u540e\u7684\u6a21\u578b\u8fdb\u884c\u6d4b\u8bd5\uff0c\u5f97\u5230\u6a21\u578b\u7684\u5206\u7c7b\u7cbe\u5ea6\u8fdb\u884c\u5bf9\u6bd4\u3002 \u9996\u5148\u6d4b\u8bd5\u91cf\u5316\u524d\u7684\u6a21\u578b\u7684\u7cbe\u5ea6\uff0c\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a 1 python eval . py --model_path ./inference_model/MobileNet --model_name model --params_name weights \u7cbe\u5ea6\u8f93\u51fa\u4e3a: 1 top1_acc / top5_acc = [ 0 . 70913923 0 . 89548034 ] \u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u6d4b\u8bd5\u79bb\u7ebf\u91cf\u5316\u540e\u7684\u6a21\u578b\u7684\u7cbe\u5ea6\uff1a 1 python eval . py --model_path ./quant_model_train/MobileNet \u7cbe\u5ea6\u8f93\u51fa\u4e3a 1 top1_acc / top5_acc = [ 0 . 70141864 0 . 89086477 ] \u4ece\u4ee5\u4e0a\u7cbe\u5ea6\u5bf9\u6bd4\u53ef\u4ee5\u770b\u51fa\uff0c\u5bf9 mobilenet \u5728 imagenet \u4e0a\u7684\u5206\u7c7b\u6a21\u578b\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316\u540e top1 \u7cbe\u5ea6\u635f\u5931\u4e3a 0.77% \uff0c top5 \u7cbe\u5ea6\u635f\u5931\u4e3a 0.46% .","title":"\u79bb\u7ebf\u91cf\u5316"},{"location":"tutorials/quant_post_demo/#_1","text":"\u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u79bb\u7ebf\u91cf\u5316\u63a5\u53e3 paddleslim.quant.quant_post \u6765\u5bf9\u8bad\u7ec3\u597d\u7684\u5206\u7c7b\u6a21\u578b\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316, \u8be5\u63a5\u53e3\u65e0\u9700\u5bf9\u6a21\u578b\u8fdb\u884c\u8bad\u7ec3\u5c31\u53ef\u5f97\u5230\u91cf\u5316\u6a21\u578b\uff0c\u51cf\u5c11\u6a21\u578b\u7684\u5b58\u50a8\u7a7a\u95f4\u548c\u663e\u5b58\u5360\u7528\u3002","title":"\u79bb\u7ebf\u91cf\u5316\u793a\u4f8b"},{"location":"tutorials/quant_post_demo/#_2","text":"\u8bf7\u53c2\u8003 \u91cf\u5316API\u6587\u6863 \u3002","title":"\u63a5\u53e3\u4ecb\u7ecd"},{"location":"tutorials/quant_post_demo/#_3","text":"","title":"\u5206\u7c7b\u6a21\u578b\u7684\u79bb\u7ebf\u91cf\u5316\u6d41\u7a0b"},{"location":"tutorials/quant_post_demo/#_4","text":"\u5728\u5f53\u524d\u6587\u4ef6\u5939\u4e0b\u521b\u5efa data \u6587\u4ef6\u5939\uff0c\u5c06 imagenet \u6570\u636e\u96c6\u89e3\u538b\u5728 data \u6587\u4ef6\u5939\u4e0b\uff0c\u89e3\u538b\u540e data \u6587\u4ef6\u5939\u4e0b\u5e94\u5305\u542b\u4ee5\u4e0b\u6587\u4ef6\uff1a - 'train' \u6587\u4ef6\u5939\uff0c\u8bad\u7ec3\u56fe\u7247 - 'train_list.txt' \u6587\u4ef6 - 'val' \u6587\u4ef6\u5939\uff0c\u9a8c\u8bc1\u56fe\u7247 - 'val_list.txt' \u6587\u4ef6","title":"\u51c6\u5907\u6570\u636e"},{"location":"tutorials/quant_post_demo/#_5","text":"\u56e0\u4e3a\u79bb\u7ebf\u91cf\u5316\u63a5\u53e3\u53ea\u652f\u6301\u52a0\u8f7d\u901a\u8fc7 fluid.io.save_inference_model \u63a5\u53e3\u4fdd\u5b58\u7684\u6a21\u578b\uff0c\u56e0\u6b64\u5982\u679c\u60a8\u7684\u6a21\u578b\u662f\u901a\u8fc7\u5176\u4ed6\u63a5\u53e3\u4fdd\u5b58\u7684\uff0c\u90a3\u9700\u8981\u5148\u5c06\u6a21\u578b\u8fdb\u884c\u8f6c\u5316\u3002\u672c\u793a\u4f8b\u5c06\u4ee5\u5206\u7c7b\u6a21\u578b\u4e3a\u4f8b\u8fdb\u884c\u8bf4\u660e\u3002 \u9996\u5148\u5728 imagenet\u5206\u7c7b\u6a21\u578b \u4e2d\u4e0b\u8f7d\u8bad\u7ec3\u597d\u7684 mobilenetv1 \u6a21\u578b\u3002 \u5728\u5f53\u524d\u6587\u4ef6\u5939\u4e0b\u521b\u5efa 'pretrain' \u6587\u4ef6\u5939\uff0c\u5c06 mobilenetv1 \u6a21\u578b\u5728\u8be5\u6587\u4ef6\u5939\u4e0b\u89e3\u538b\uff0c\u89e3\u538b\u540e\u7684\u76ee\u5f55\u4e3a pretrain/MobileNetV1_pretrained","title":"\u51c6\u5907\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b"},{"location":"tutorials/quant_post_demo/#_6","text":"\u901a\u8fc7\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u53ef\u5c06\u6a21\u578b\u8f6c\u5316\u4e3a\u79bb\u7ebf\u91cf\u5316\u63a5\u53e3\u53ef\u7528\u7684\u6a21\u578b\uff1a 1 python export_model . py --model \"MobileNet\" --pretrained_model ./pretrain/MobileNetV1_pretrained --data imagenet \u8f6c\u5316\u4e4b\u540e\u7684\u6a21\u578b\u5b58\u50a8\u5728 inference_model/MobileNet/ \u6587\u4ef6\u5939\u4e0b\uff0c\u53ef\u770b\u5230\u8be5\u6587\u4ef6\u5939\u4e0b\u6709 'model' , 'weights' \u4e24\u4e2a\u6587\u4ef6\u3002","title":"\u5bfc\u51fa\u6a21\u578b"},{"location":"tutorials/quant_post_demo/#_7","text":"\u63a5\u4e0b\u6765\u5bf9\u5bfc\u51fa\u7684\u6a21\u578b\u6587\u4ef6\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316\uff0c\u79bb\u7ebf\u91cf\u5316\u7684\u811a\u672c\u4e3a quant_post.py \uff0c\u811a\u672c\u4e2d\u4f7f\u7528\u63a5\u53e3 paddleslim.quant.quant_post \u5bf9\u6a21\u578b\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316\u3002\u8fd0\u884c\u547d\u4ee4\u4e3a\uff1a 1 python quant_post . py --model_path ./inference_model/MobileNet --save_path ./quant_model_train/MobileNet --model_filename model --params_filename weights model_path : \u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u5750\u5728\u7684\u6587\u4ef6\u5939 save_path : \u91cf\u5316\u540e\u7684\u6a21\u578b\u4fdd\u5b58\u7684\u8def\u5f84 model_filename : \u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u8bbe\u7f6e\u4e3a\u8be5\u6a21\u578b\u7684\u6a21\u578b\u6587\u4ef6\u540d\u79f0\uff0c\u5982\u679c\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u591a\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u4e0d\u9700\u8981\u8bbe\u7f6e\u3002 params_filename : \u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u8bbe\u7f6e\u4e3a\u8be5\u6a21\u578b\u7684\u53c2\u6570\u6587\u4ef6\u540d\u79f0\uff0c\u5982\u679c\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u591a\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u4e0d\u9700\u8981\u8bbe\u7f6e\u3002 \u8fd0\u884c\u4ee5\u4e0a\u547d\u4ee4\u540e\uff0c\u53ef\u5728 ${save_path} \u4e0b\u770b\u5230\u91cf\u5316\u540e\u7684\u6a21\u578b\u6587\u4ef6\u548c\u53c2\u6570\u6587\u4ef6\u3002 \u4f7f\u7528\u7684\u91cf\u5316\u7b97\u6cd5\u4e3a 'KL' , \u4f7f\u7528\u8bad\u7ec3\u96c6\u4e2d\u7684160\u5f20\u56fe\u7247\u8fdb\u884c\u91cf\u5316\u53c2\u6570\u7684\u6821\u6b63\u3002","title":"\u79bb\u7ebf\u91cf\u5316"},{"location":"tutorials/quant_post_demo/#_8","text":"\u4f7f\u7528 eval.py \u811a\u672c\u5bf9\u91cf\u5316\u524d\u540e\u7684\u6a21\u578b\u8fdb\u884c\u6d4b\u8bd5\uff0c\u5f97\u5230\u6a21\u578b\u7684\u5206\u7c7b\u7cbe\u5ea6\u8fdb\u884c\u5bf9\u6bd4\u3002 \u9996\u5148\u6d4b\u8bd5\u91cf\u5316\u524d\u7684\u6a21\u578b\u7684\u7cbe\u5ea6\uff0c\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a 1 python eval . py --model_path ./inference_model/MobileNet --model_name model --params_name weights \u7cbe\u5ea6\u8f93\u51fa\u4e3a: 1 top1_acc / top5_acc = [ 0 . 70913923 0 . 89548034 ] \u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u6d4b\u8bd5\u79bb\u7ebf\u91cf\u5316\u540e\u7684\u6a21\u578b\u7684\u7cbe\u5ea6\uff1a 1 python eval . py --model_path ./quant_model_train/MobileNet \u7cbe\u5ea6\u8f93\u51fa\u4e3a 1 top1_acc / top5_acc = [ 0 . 70141864 0 . 89086477 ] \u4ece\u4ee5\u4e0a\u7cbe\u5ea6\u5bf9\u6bd4\u53ef\u4ee5\u770b\u51fa\uff0c\u5bf9 mobilenet \u5728 imagenet \u4e0a\u7684\u5206\u7c7b\u6a21\u578b\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316\u540e top1 \u7cbe\u5ea6\u635f\u5931\u4e3a 0.77% \uff0c top5 \u7cbe\u5ea6\u635f\u5931\u4e3a 0.46% .","title":"\u6d4b\u8bd5\u7cbe\u5ea6"},{"location":"tutorials/sensitivity_demo/","text":"\u8be5\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u5206\u6790\u5377\u79ef\u7f51\u7edc\u4e2d\u5404\u5377\u79ef\u5c42\u7684\u654f\u611f\u5ea6\uff0c\u4ee5\u53ca\u5982\u4f55\u6839\u636e\u8ba1\u7b97\u51fa\u7684\u654f\u611f\u5ea6\u9009\u62e9\u4e00\u7ec4\u5408\u9002\u7684\u526a\u88c1\u7387\u3002 \u8be5\u793a\u4f8b\u9ed8\u8ba4\u4f1a\u81ea\u52a8\u4e0b\u8f7d\u5e76\u4f7f\u7528MNIST\u6570\u636e\u3002\u652f\u6301\u4ee5\u4e0b\u6a21\u578b\uff1a MobileNetV1 MobileNetV2 ResNet50 1. \u63a5\u53e3\u4ecb\u7ecd # \u8be5\u793a\u4f8b\u6d89\u53ca\u4ee5\u4e0b\u63a5\u53e3\uff1a paddleslim.prune.sensitivity paddleslim.prune.merge_sensitive paddleslim.prune.get_ratios_by_loss 2. \u8fd0\u884c\u793a\u4f8b # \u5728\u8def\u5f84 PaddleSlim/demo/sensitive \u4e0b\u6267\u884c\u4ee5\u4e0b\u4ee3\u7801\u8fd0\u884c\u793a\u4f8b\uff1a 1 2 export CUDA_VISIBLE_DEVICES = 0 python train . py --model \"MobileNetV1\" \u901a\u8fc7 python train.py --help \u67e5\u770b\u66f4\u591a\u9009\u9879\u3002 3. \u91cd\u8981\u6b65\u9aa4\u8bf4\u660e # 3.1 \u8ba1\u7b97\u654f\u611f\u5ea6 # \u8ba1\u7b97\u654f\u611f\u5ea6\u4e4b\u524d\uff0c\u7528\u6237\u9700\u8981\u642d\u5efa\u597d\u7528\u4e8e\u6d4b\u8bd5\u7684\u7f51\u7edc\uff0c\u4ee5\u53ca\u5b9e\u73b0\u8bc4\u4f30\u6a21\u578b\u7cbe\u5ea6\u7684\u56de\u8c03\u51fd\u6570\u3002 \u8c03\u7528 paddleslim.prune.sensitivity \u63a5\u53e3\u8ba1\u7b97\u654f\u611f\u5ea6\u3002\u654f\u611f\u5ea6\u4fe1\u606f\u4f1a\u8ffd\u52a0\u5230 sensitivities_file \u9009\u9879\u6240\u6307\u5b9a\u7684\u6587\u4ef6\u4e2d\uff0c\u5982\u679c\u9700\u8981\u91cd\u65b0\u8ba1\u7b97\u654f\u611f\u5ea6\uff0c\u9700\u8981\u5148\u5220\u9664 sensitivities_file \u6587\u4ef6\u3002 \u5982\u679c\u6a21\u578b\u8bc4\u4f30\u901f\u5ea6\u8f83\u6162\uff0c\u53ef\u4ee5\u901a\u8fc7\u591a\u8fdb\u7a0b\u7684\u65b9\u5f0f\u52a0\u901f\u654f\u611f\u5ea6\u8ba1\u7b97\u8fc7\u7a0b\u3002\u6bd4\u5982\u5728\u8fdb\u7a0b1\u4e2d\u8bbe\u7f6e pruned_ratios=[0.1, 0.2, 0.3, 0.4] \uff0c\u5e76\u5c06\u654f\u611f\u5ea6\u4fe1\u606f\u5b58\u653e\u5728\u6587\u4ef6 sensitivities_0.data \u4e2d\uff0c\u7136\u540e\u5728\u8fdb\u7a0b2\u4e2d\u8bbe\u7f6e pruned_ratios=[0.5, 0.6, 0.7] \uff0c\u5e76\u5c06\u654f\u611f\u5ea6\u4fe1\u606f\u5b58\u50a8\u5728\u6587\u4ef6 sensitivities_1.data \u4e2d\u3002\u8fd9\u6837\u6bcf\u4e2a\u8fdb\u7a0b\u53ea\u4f1a\u8ba1\u7b97\u6307\u5b9a\u526a\u5207\u7387\u4e0b\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u3002\u591a\u8fdb\u7a0b\u53ef\u4ee5\u8fd0\u884c\u5728\u5355\u673a\u591a\u5361\uff0c\u6216\u591a\u673a\u591a\u5361\u3002 \u4ee3\u7801\u5982\u4e0b\uff1a 1 2 3 4 5 6 7 8 # \u8fdb\u7a0b 1 sensitivity ( val_program , place , params , test , sensitivities_file = \"sensitivities_0.data\" , pruned_ratios = [ 0 . 1 , 0 . 2 , 0 . 3 , 0 . 4 ]) 1 2 3 4 5 6 7 8 # \u8fdb\u7a0b 2 sensitivity ( val_program , place , params , test , sensitivities_file = \"sensitivities_1.data\" , pruned_ratios = [ 0 . 5 , 0 . 6 , 0 . 7 ]) 3.2 \u5408\u5e76\u654f\u611f\u5ea6 # \u5982\u679c\u7528\u6237\u901a\u8fc7\u4e0a\u4e00\u8282\u591a\u8fdb\u7a0b\u7684\u65b9\u5f0f\u751f\u6210\u4e86\u591a\u4e2a\u5b58\u50a8\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u6587\u4ef6\uff0c\u53ef\u4ee5\u901a\u8fc7 paddleslim.prune.merge_sensitive \u5c06\u5176\u5408\u5e76\uff0c\u5408\u5e76\u540e\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u5b58\u50a8\u5728\u4e00\u4e2a dict \u4e2d\u3002\u4ee3\u7801\u5982\u4e0b\uff1a 1 sens = merge_sensitive ([ \"./sensitivities_0.data\" , \"./sensitivities_1.data\" ]) 3.3 \u8ba1\u7b97\u526a\u88c1\u7387 # \u8c03\u7528 paddleslim.prune.get_ratios_by_loss \u63a5\u53e3\u8ba1\u7b97\u4e00\u7ec4\u526a\u88c1\u7387\u3002 1 ratios = get_ratios_by_loss ( sens , 0 . 01 ) \u5176\u4e2d\uff0c 0.01 \u4e3a\u4e00\u4e2a\u9608\u503c\uff0c\u5bf9\u4e8e\u4efb\u610f\u5377\u79ef\u5c42\uff0c\u5176\u526a\u88c1\u7387\u4e3a\u4f7f\u7cbe\u5ea6\u635f\u5931\u4f4e\u4e8e\u9608\u503c 0.01 \u7684\u6700\u5927\u526a\u88c1\u7387\u3002 \u7528\u6237\u5728\u8ba1\u7b97\u51fa\u4e00\u7ec4\u526a\u88c1\u7387\u4e4b\u540e\u53ef\u4ee5\u901a\u8fc7\u63a5\u53e3 paddleslim.prune.Pruner \u526a\u88c1\u7f51\u7edc\uff0c\u5e76\u7528\u63a5\u53e3 paddleslim.analysis.flops \u8ba1\u7b97 FLOPs \u3002\u5982\u679c FLOPs \u4e0d\u6ee1\u8db3\u8981\u6c42\uff0c\u8c03\u6574\u9608\u503c\u91cd\u65b0\u8ba1\u7b97\u51fa\u4e00\u7ec4\u526a\u88c1\u7387\u3002","title":"Sensitivity demo"},{"location":"tutorials/sensitivity_demo/#1","text":"\u8be5\u793a\u4f8b\u6d89\u53ca\u4ee5\u4e0b\u63a5\u53e3\uff1a paddleslim.prune.sensitivity paddleslim.prune.merge_sensitive paddleslim.prune.get_ratios_by_loss","title":"1. \u63a5\u53e3\u4ecb\u7ecd"},{"location":"tutorials/sensitivity_demo/#2","text":"\u5728\u8def\u5f84 PaddleSlim/demo/sensitive \u4e0b\u6267\u884c\u4ee5\u4e0b\u4ee3\u7801\u8fd0\u884c\u793a\u4f8b\uff1a 1 2 export CUDA_VISIBLE_DEVICES = 0 python train . py --model \"MobileNetV1\" \u901a\u8fc7 python train.py --help \u67e5\u770b\u66f4\u591a\u9009\u9879\u3002","title":"2. \u8fd0\u884c\u793a\u4f8b"},{"location":"tutorials/sensitivity_demo/#3","text":"","title":"3. \u91cd\u8981\u6b65\u9aa4\u8bf4\u660e"},{"location":"tutorials/sensitivity_demo/#31","text":"\u8ba1\u7b97\u654f\u611f\u5ea6\u4e4b\u524d\uff0c\u7528\u6237\u9700\u8981\u642d\u5efa\u597d\u7528\u4e8e\u6d4b\u8bd5\u7684\u7f51\u7edc\uff0c\u4ee5\u53ca\u5b9e\u73b0\u8bc4\u4f30\u6a21\u578b\u7cbe\u5ea6\u7684\u56de\u8c03\u51fd\u6570\u3002 \u8c03\u7528 paddleslim.prune.sensitivity \u63a5\u53e3\u8ba1\u7b97\u654f\u611f\u5ea6\u3002\u654f\u611f\u5ea6\u4fe1\u606f\u4f1a\u8ffd\u52a0\u5230 sensitivities_file \u9009\u9879\u6240\u6307\u5b9a\u7684\u6587\u4ef6\u4e2d\uff0c\u5982\u679c\u9700\u8981\u91cd\u65b0\u8ba1\u7b97\u654f\u611f\u5ea6\uff0c\u9700\u8981\u5148\u5220\u9664 sensitivities_file \u6587\u4ef6\u3002 \u5982\u679c\u6a21\u578b\u8bc4\u4f30\u901f\u5ea6\u8f83\u6162\uff0c\u53ef\u4ee5\u901a\u8fc7\u591a\u8fdb\u7a0b\u7684\u65b9\u5f0f\u52a0\u901f\u654f\u611f\u5ea6\u8ba1\u7b97\u8fc7\u7a0b\u3002\u6bd4\u5982\u5728\u8fdb\u7a0b1\u4e2d\u8bbe\u7f6e pruned_ratios=[0.1, 0.2, 0.3, 0.4] \uff0c\u5e76\u5c06\u654f\u611f\u5ea6\u4fe1\u606f\u5b58\u653e\u5728\u6587\u4ef6 sensitivities_0.data \u4e2d\uff0c\u7136\u540e\u5728\u8fdb\u7a0b2\u4e2d\u8bbe\u7f6e pruned_ratios=[0.5, 0.6, 0.7] \uff0c\u5e76\u5c06\u654f\u611f\u5ea6\u4fe1\u606f\u5b58\u50a8\u5728\u6587\u4ef6 sensitivities_1.data \u4e2d\u3002\u8fd9\u6837\u6bcf\u4e2a\u8fdb\u7a0b\u53ea\u4f1a\u8ba1\u7b97\u6307\u5b9a\u526a\u5207\u7387\u4e0b\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u3002\u591a\u8fdb\u7a0b\u53ef\u4ee5\u8fd0\u884c\u5728\u5355\u673a\u591a\u5361\uff0c\u6216\u591a\u673a\u591a\u5361\u3002 \u4ee3\u7801\u5982\u4e0b\uff1a 1 2 3 4 5 6 7 8 # \u8fdb\u7a0b 1 sensitivity ( val_program , place , params , test , sensitivities_file = \"sensitivities_0.data\" , pruned_ratios = [ 0 . 1 , 0 . 2 , 0 . 3 , 0 . 4 ]) 1 2 3 4 5 6 7 8 # \u8fdb\u7a0b 2 sensitivity ( val_program , place , params , test , sensitivities_file = \"sensitivities_1.data\" , pruned_ratios = [ 0 . 5 , 0 . 6 , 0 . 7 ])","title":"3.1 \u8ba1\u7b97\u654f\u611f\u5ea6"},{"location":"tutorials/sensitivity_demo/#32","text":"\u5982\u679c\u7528\u6237\u901a\u8fc7\u4e0a\u4e00\u8282\u591a\u8fdb\u7a0b\u7684\u65b9\u5f0f\u751f\u6210\u4e86\u591a\u4e2a\u5b58\u50a8\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u6587\u4ef6\uff0c\u53ef\u4ee5\u901a\u8fc7 paddleslim.prune.merge_sensitive \u5c06\u5176\u5408\u5e76\uff0c\u5408\u5e76\u540e\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u5b58\u50a8\u5728\u4e00\u4e2a dict \u4e2d\u3002\u4ee3\u7801\u5982\u4e0b\uff1a 1 sens = merge_sensitive ([ \"./sensitivities_0.data\" , \"./sensitivities_1.data\" ])","title":"3.2 \u5408\u5e76\u654f\u611f\u5ea6"},{"location":"tutorials/sensitivity_demo/#33","text":"\u8c03\u7528 paddleslim.prune.get_ratios_by_loss \u63a5\u53e3\u8ba1\u7b97\u4e00\u7ec4\u526a\u88c1\u7387\u3002 1 ratios = get_ratios_by_loss ( sens , 0 . 01 ) \u5176\u4e2d\uff0c 0.01 \u4e3a\u4e00\u4e2a\u9608\u503c\uff0c\u5bf9\u4e8e\u4efb\u610f\u5377\u79ef\u5c42\uff0c\u5176\u526a\u88c1\u7387\u4e3a\u4f7f\u7cbe\u5ea6\u635f\u5931\u4f4e\u4e8e\u9608\u503c 0.01 \u7684\u6700\u5927\u526a\u88c1\u7387\u3002 \u7528\u6237\u5728\u8ba1\u7b97\u51fa\u4e00\u7ec4\u526a\u88c1\u7387\u4e4b\u540e\u53ef\u4ee5\u901a\u8fc7\u63a5\u53e3 paddleslim.prune.Pruner \u526a\u88c1\u7f51\u7edc\uff0c\u5e76\u7528\u63a5\u53e3 paddleslim.analysis.flops \u8ba1\u7b97 FLOPs \u3002\u5982\u679c FLOPs \u4e0d\u6ee1\u8db3\u8981\u6c42\uff0c\u8c03\u6574\u9608\u503c\u91cd\u65b0\u8ba1\u7b97\u51fa\u4e00\u7ec4\u526a\u88c1\u7387\u3002","title":"3.3 \u8ba1\u7b97\u526a\u88c1\u7387"}]} \ No newline at end of file +{"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"PaddleSlim # PaddleSlim\u662fPaddlePaddle\u6846\u67b6\u7684\u4e00\u4e2a\u5b50\u6a21\u5757\uff0c\u4e3b\u8981\u7528\u4e8e\u538b\u7f29\u56fe\u50cf\u9886\u57df\u6a21\u578b\u3002\u5728PaddleSlim\u4e2d\uff0c\u4e0d\u4ec5\u5b9e\u73b0\u4e86\u76ee\u524d\u4e3b\u6d41\u7684\u7f51\u7edc\u526a\u679d\u3001\u91cf\u5316\u3001\u84b8\u998f\u4e09\u79cd\u538b\u7f29\u7b56\u7565\uff0c\u8fd8\u5b9e\u73b0\u4e86\u8d85\u53c2\u6570\u641c\u7d22\u548c\u5c0f\u6a21\u578b\u7f51\u7edc\u7ed3\u6784\u641c\u7d22\u529f\u80fd\u3002\u5728\u540e\u7eed\u7248\u672c\u4e2d\uff0c\u4f1a\u6dfb\u52a0\u66f4\u591a\u7684\u538b\u7f29\u7b56\u7565\uff0c\u4ee5\u53ca\u5b8c\u5584\u5bf9NLP\u9886\u57df\u6a21\u578b\u7684\u652f\u6301\u3002 \u529f\u80fd # \u6a21\u578b\u526a\u88c1 \u652f\u6301\u901a\u9053\u5747\u5300\u6a21\u578b\u526a\u88c1\uff08uniform pruning) \u57fa\u4e8e\u654f\u611f\u5ea6\u7684\u6a21\u578b\u526a\u88c1 \u57fa\u4e8e\u8fdb\u5316\u7b97\u6cd5\u7684\u81ea\u52a8\u6a21\u578b\u526a\u88c1\u4e09\u79cd\u65b9\u5f0f \u91cf\u5316\u8bad\u7ec3 \u5728\u7ebf\u91cf\u5316\u8bad\u7ec3\uff08training aware\uff09 \u79bb\u7ebf\u91cf\u5316\uff08post training\uff09 \u652f\u6301\u5bf9\u6743\u91cd\u5168\u5c40\u91cf\u5316\u548cChannel-Wise\u91cf\u5316 \u84b8\u998f \u8f7b\u91cf\u795e\u7ecf\u7f51\u7edc\u7ed3\u6784\u81ea\u52a8\u641c\u7d22\uff08Light-NAS\uff09 \u652f\u6301\u57fa\u4e8e\u8fdb\u5316\u7b97\u6cd5\u7684\u8f7b\u91cf\u795e\u7ecf\u7f51\u7edc\u7ed3\u6784\u81ea\u52a8\u641c\u7d22\uff08Light-NAS\uff09 \u652f\u6301 FLOPS / \u786c\u4ef6\u5ef6\u65f6\u7ea6\u675f \u652f\u6301\u591a\u5e73\u53f0\u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30 \u5b89\u88c5 # \u5b89\u88c5PaddleSlim\u524d\uff0c\u8bf7\u786e\u8ba4\u5df2\u6b63\u786e\u5b89\u88c5Paddle1.6\u7248\u672c\u6216\u66f4\u65b0\u7248\u672c\u3002Paddle\u5b89\u88c5\u8bf7\u53c2\u8003\uff1a Paddle\u5b89\u88c5\u6559\u7a0b \u3002 \u5b89\u88c5develop\u7248\u672c 1 2 3 git clone https : // github . com / PaddlePaddle / PaddleSlim . git cd PaddleSlim python setup . py install \u5b89\u88c5\u5b98\u65b9\u53d1\u5e03\u7684\u6700\u65b0\u7248\u672c 1 pip install paddleslim - i https : // pypi . org / simple \u5b89\u88c5\u5386\u53f2\u7248\u672c \u8bf7\u70b9\u51fb pypi.org \u67e5\u770b\u53ef\u5b89\u88c5\u5386\u53f2\u7248\u672c\u3002 \u4f7f\u7528 # API\u6587\u6863 \uff1aAPI\u4f7f\u7528\u4ecb\u7ecd\uff0c\u5305\u62ec \u84b8\u998f \u3001 \u526a\u88c1 \u3001 \u91cf\u5316 \u548c \u6a21\u578b\u7ed3\u6784\u641c\u7d22 \u3002 \u793a\u4f8b \uff1a\u57fa\u4e8emnist\u548ccifar10\u7b49\u7b80\u5355\u5206\u7c7b\u4efb\u52a1\u7684\u6a21\u578b\u538b\u7f29\u793a\u4f8b\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u8be5\u90e8\u5206\u5feb\u901f\u4f53\u9a8c\u548c\u4e86\u89e3PaddleSlim\u7684\u529f\u80fd\u3002 \u5b9e\u8df5\u6559\u7a0b \uff1a\u7ecf\u5178\u6a21\u578b\u7684\u5206\u6790\u548c\u538b\u7f29\u5b9e\u9a8c\u6559\u7a0b\u3002 \u6a21\u578b\u5e93 \uff1a\u7ecf\u8fc7\u538b\u7f29\u7684\u5206\u7c7b\u3001\u68c0\u6d4b\u3001\u8bed\u4e49\u5206\u5272\u6a21\u578b\uff0c\u5305\u62ec\u6743\u91cd\u6587\u4ef6\u3001\u7f51\u7edc\u7ed3\u6784\u6587\u4ef6\u548c\u6027\u80fd\u6570\u636e\u3002 Paddle\u68c0\u6d4b\u5e93 \uff1a\u4ecb\u7ecd\u5982\u4f55\u5728\u68c0\u6d4b\u5e93\u4e2d\u4f7f\u7528PaddleSlim\u3002 Paddle\u5206\u5272\u5e93 \uff1a\u4ecb\u7ecd\u5982\u4f55\u5728\u5206\u5272\u5e93\u4e2d\u4f7f\u7528PaddleSlim\u3002 PaddleLite \uff1a\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u9884\u6d4b\u5e93PaddleLite\u90e8\u7f72PaddleSlim\u4ea7\u51fa\u7684\u6a21\u578b\u3002 \u8d21\u732e\u4e0e\u53cd\u9988 #","title":"Home"},{"location":"#paddleslim","text":"PaddleSlim\u662fPaddlePaddle\u6846\u67b6\u7684\u4e00\u4e2a\u5b50\u6a21\u5757\uff0c\u4e3b\u8981\u7528\u4e8e\u538b\u7f29\u56fe\u50cf\u9886\u57df\u6a21\u578b\u3002\u5728PaddleSlim\u4e2d\uff0c\u4e0d\u4ec5\u5b9e\u73b0\u4e86\u76ee\u524d\u4e3b\u6d41\u7684\u7f51\u7edc\u526a\u679d\u3001\u91cf\u5316\u3001\u84b8\u998f\u4e09\u79cd\u538b\u7f29\u7b56\u7565\uff0c\u8fd8\u5b9e\u73b0\u4e86\u8d85\u53c2\u6570\u641c\u7d22\u548c\u5c0f\u6a21\u578b\u7f51\u7edc\u7ed3\u6784\u641c\u7d22\u529f\u80fd\u3002\u5728\u540e\u7eed\u7248\u672c\u4e2d\uff0c\u4f1a\u6dfb\u52a0\u66f4\u591a\u7684\u538b\u7f29\u7b56\u7565\uff0c\u4ee5\u53ca\u5b8c\u5584\u5bf9NLP\u9886\u57df\u6a21\u578b\u7684\u652f\u6301\u3002","title":"PaddleSlim"},{"location":"#_1","text":"\u6a21\u578b\u526a\u88c1 \u652f\u6301\u901a\u9053\u5747\u5300\u6a21\u578b\u526a\u88c1\uff08uniform pruning) \u57fa\u4e8e\u654f\u611f\u5ea6\u7684\u6a21\u578b\u526a\u88c1 \u57fa\u4e8e\u8fdb\u5316\u7b97\u6cd5\u7684\u81ea\u52a8\u6a21\u578b\u526a\u88c1\u4e09\u79cd\u65b9\u5f0f \u91cf\u5316\u8bad\u7ec3 \u5728\u7ebf\u91cf\u5316\u8bad\u7ec3\uff08training aware\uff09 \u79bb\u7ebf\u91cf\u5316\uff08post training\uff09 \u652f\u6301\u5bf9\u6743\u91cd\u5168\u5c40\u91cf\u5316\u548cChannel-Wise\u91cf\u5316 \u84b8\u998f \u8f7b\u91cf\u795e\u7ecf\u7f51\u7edc\u7ed3\u6784\u81ea\u52a8\u641c\u7d22\uff08Light-NAS\uff09 \u652f\u6301\u57fa\u4e8e\u8fdb\u5316\u7b97\u6cd5\u7684\u8f7b\u91cf\u795e\u7ecf\u7f51\u7edc\u7ed3\u6784\u81ea\u52a8\u641c\u7d22\uff08Light-NAS\uff09 \u652f\u6301 FLOPS / \u786c\u4ef6\u5ef6\u65f6\u7ea6\u675f \u652f\u6301\u591a\u5e73\u53f0\u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30","title":"\u529f\u80fd"},{"location":"#_2","text":"\u5b89\u88c5PaddleSlim\u524d\uff0c\u8bf7\u786e\u8ba4\u5df2\u6b63\u786e\u5b89\u88c5Paddle1.6\u7248\u672c\u6216\u66f4\u65b0\u7248\u672c\u3002Paddle\u5b89\u88c5\u8bf7\u53c2\u8003\uff1a Paddle\u5b89\u88c5\u6559\u7a0b \u3002 \u5b89\u88c5develop\u7248\u672c 1 2 3 git clone https : // github . com / PaddlePaddle / PaddleSlim . git cd PaddleSlim python setup . py install \u5b89\u88c5\u5b98\u65b9\u53d1\u5e03\u7684\u6700\u65b0\u7248\u672c 1 pip install paddleslim - i https : // pypi . org / simple \u5b89\u88c5\u5386\u53f2\u7248\u672c \u8bf7\u70b9\u51fb pypi.org \u67e5\u770b\u53ef\u5b89\u88c5\u5386\u53f2\u7248\u672c\u3002","title":"\u5b89\u88c5"},{"location":"#_3","text":"API\u6587\u6863 \uff1aAPI\u4f7f\u7528\u4ecb\u7ecd\uff0c\u5305\u62ec \u84b8\u998f \u3001 \u526a\u88c1 \u3001 \u91cf\u5316 \u548c \u6a21\u578b\u7ed3\u6784\u641c\u7d22 \u3002 \u793a\u4f8b \uff1a\u57fa\u4e8emnist\u548ccifar10\u7b49\u7b80\u5355\u5206\u7c7b\u4efb\u52a1\u7684\u6a21\u578b\u538b\u7f29\u793a\u4f8b\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u8be5\u90e8\u5206\u5feb\u901f\u4f53\u9a8c\u548c\u4e86\u89e3PaddleSlim\u7684\u529f\u80fd\u3002 \u5b9e\u8df5\u6559\u7a0b \uff1a\u7ecf\u5178\u6a21\u578b\u7684\u5206\u6790\u548c\u538b\u7f29\u5b9e\u9a8c\u6559\u7a0b\u3002 \u6a21\u578b\u5e93 \uff1a\u7ecf\u8fc7\u538b\u7f29\u7684\u5206\u7c7b\u3001\u68c0\u6d4b\u3001\u8bed\u4e49\u5206\u5272\u6a21\u578b\uff0c\u5305\u62ec\u6743\u91cd\u6587\u4ef6\u3001\u7f51\u7edc\u7ed3\u6784\u6587\u4ef6\u548c\u6027\u80fd\u6570\u636e\u3002 Paddle\u68c0\u6d4b\u5e93 \uff1a\u4ecb\u7ecd\u5982\u4f55\u5728\u68c0\u6d4b\u5e93\u4e2d\u4f7f\u7528PaddleSlim\u3002 Paddle\u5206\u5272\u5e93 \uff1a\u4ecb\u7ecd\u5982\u4f55\u5728\u5206\u5272\u5e93\u4e2d\u4f7f\u7528PaddleSlim\u3002 PaddleLite \uff1a\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u9884\u6d4b\u5e93PaddleLite\u90e8\u7f72PaddleSlim\u4ea7\u51fa\u7684\u6a21\u578b\u3002","title":"\u4f7f\u7528"},{"location":"#_4","text":"","title":"\u8d21\u732e\u4e0e\u53cd\u9988"},{"location":"model_zoo/","text":"1. \u91cf\u5316 # 1.1 \u56fe\u8c61\u5206\u7c7b # \u6570\u636e\u96c6\uff1aImageNet1000\u7c7b \u8bc4\u4ef7\u6307\u6807\uff1aTop-1/Top-5\u51c6\u786e\u7387 Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 MobileNetV1 FP32 70.99%/89.68% xx%/xx% xx%/xx% MobileNetV2 FP32 72.15%/90.65% ResNet50 FP32 76.50%/93.00% \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 MobileNetV1 17M xxM xxM MobileNetV2 xxM ResNet50 99M 1.2 \u76ee\u6807\u68c0\u6d4b # \u6570\u636e\u96c6\uff1aCOCO 2017 Model \u8f93\u5165\u5c3a\u5bf8 Image/GPU FP32 BoxAP \u79bb\u7ebf\u91cf\u5316 BoxAP \u91cf\u5316\u8bad\u7ec3 BoxAP MobileNet-V1-YOLOv3 608 8 29.3 xx xx MobileNet-V1-YOLOv3 416 MobileNet-V1-YOLOv3 320 R50-dcn-YOLOv3 608 41.4 R50-dcn-YOLOv3 416 R50-dcn-YOLOv3 320 \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 MobileNet-V1-YOLOv3 xxM xxM xxM R50-dcn-YOLOv3 xxM # \u6570\u636e\u96c6\uff1aWIDER-FACE \u8bc4\u4ef7\u6307\u6807\uff1aEasy/Medium/Hard mAP Model \u8f93\u5165\u5c3a\u5bf8 Image/GPU FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 BlazeFace 640 8 0.915/0.892/0.797 xx/xx/xx xx/xx/xx BlazeFace-Lite 640 0.909/0.885/0.781 BlazeFace-NAS 640 0.837/0.807/0.658 \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 BlazeFace xxM xxM xxM BlazeFace-Lite xxM BlazeFace-NAS xxM # 1.3 \u56fe\u50cf\u5206\u5272 # \u6570\u636e\u96c6\uff1aCityscapes Model FP32 mIoU \u79bb\u7ebf\u91cf\u5316 mIoU \u91cf\u5316\u8bad\u7ec3 mIoU DeepLabv3+/MobileNetv1 63.26 xx xx DeepLabv3+/MobileNetv2 69.81 \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 DeepLabv3+/MobileNetv1 xxM xxM xxM DeepLabv3+/MobileNetv2 xxM # 2. \u526a\u679d # 2.1 \u56fe\u50cf\u5206\u7c7b # \u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model Top-1/Top-5 MobileNetV1 70.99%/89.68% MobileNetV1 uniform -50% MobileNetV1 sensitive -xx% MobileNetV2 72.15%/90.65% MobileNetV2 uniform -50% MobileNetV2 sensitive -xx% ResNet34 74.57%/92.14% ResNet34 uniform -50% ResNet34 auto -50% \u526a\u679d\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u548c\u8ba1\u7b97\u91cf\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model baseline FLOPs baseline size \u526a\u679d\u540e FlOPs \u526a\u679d\u540e size MobileNetV1 xx xx xx xx MobileNetV2 xx ResNet34 xx # 2.2 \u76ee\u6807\u68c0\u6d4b # \u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u6570\u636e\u96c6 \u8f93\u5165\u5c3a\u5bf8 Image/GPU baseline mAP \u654f\u611f\u5ea6\u526a\u679d mAP MobileNet-V1-YOLOv3 Pasacl VOC 608 8 76.2 77.59 (-50%) MobileNet-V1-YOLOv3 Pasacl VOC 416 76.7 xx MobileNet-V1-YOLOv3 Pasacl VOC 320 75.2 xx MobileNet-V1-YOLOv3 COCO 608 29.3 29.56 (-20%) MobileNet-V1-YOLOv3 COCO 416 29.3 xx MobileNet-V1-YOLOv3 COCO 320 27.1 xx R50-dcn-YOLOv3 COCO 608 41.4 37.8 (-30%) R50-dcn-YOLOv3 COCO 416 R50-dcn-YOLOv3 COCO 320 \u526a\u679d\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u548c\u8ba1\u7b97\u91cf\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model baseline FLOPs baseline size \u526a\u679d\u540e FlOPs \u526a\u679d\u540e size MobileNet-V1-YOLOv3-VOC xx xxM xx xxM MobileNet-V1-YOLOv3-COCO xx R50-dcn-YOLOv3-COCO xx 2.3 \u56fe\u50cf\u5206\u5272 # \u6570\u636e\u96c6\uff1aCityscapes Model Baseline mIoU xx\u526a\u679d mIoU DeepLabv3+/MobileNetv2 69.81 xx \u526a\u679d\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u548c\u8ba1\u7b97\u91cf\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model baseline FLOPs baseline size \u526a\u679d\u540e FlOPs \u526a\u679d\u540e size DeepLabv3+/MobileNetv2 xx xxM xx xxM 3. \u84b8\u998f # 3.1 \u56fe\u8c61\u5206\u7c7b # \u6570\u636e\u96c6\uff1aImageNet1000\u7c7b \u8bc4\u4ef7\u6307\u6807\uff1aTop-1/Top-5\u51c6\u786e\u7387 Model baseline \u84b8\u998f\u540e MobileNetV1 70.99%/89.68% 72.79%/90.69% (teacher: ResNet50_vd 1 ) MobileNetV2 72.15%/90.65% 74.30%/91.52% (teacher: ResNet50_vd) ResNet50 76.50%/93.00% 77.40%/93.48% (teacher: ResNet101 2 ) Note [1] \uff1a ResNet50_vd \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a79.12%/94.44% \u5e26_vd\u540e\u7f00\u4ee3\u8868\u5f00\u542f\u4e86Mixup\u8bad\u7ec3\uff0cMixup\u76f8\u5173\u4ecb\u7ecd\u53c2\u8003 mixup: Beyond Empirical Risk Minimization [2] \uff1a ResNet101 \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a77.56%/93.64% 3.2 \u76ee\u6807\u68c0\u6d4b # \u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u6570\u636e\u96c6 \u8f93\u5165\u5c3a\u5bf8 Image/GPU baseline \u84b8\u998f\u540e mAP MobileNet-V1-YOLOv3 Pasacl VOC 640 16 76.2 79.0 (teacher: ResNet34-YOLOv3-VOC 3 ) MobileNet-V1-YOLOv3 Pasacl VOC 416 76.7 78.2 MobileNet-V1-YOLOv3 Pasacl VOC 320 75.2 75.5 MobileNet-V1-YOLOv3 COCO 640 29.3 31.0 (teacher: ResNet34-YOLOv3-COCO 4 ) MobileNet-V1-YOLOv3 COCO 416 MobileNet-V1-YOLOv3 COCO 320 Note [3] \uff1a ResNet34-YOLOv3-VOC \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a82.6 [4] \uff1a ResNet34-YOLOv3-COCO \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a36.2","title":"\u6a21\u578b\u5e93"},{"location":"model_zoo/#1","text":"","title":"1. \u91cf\u5316"},{"location":"model_zoo/#11","text":"\u6570\u636e\u96c6\uff1aImageNet1000\u7c7b \u8bc4\u4ef7\u6307\u6807\uff1aTop-1/Top-5\u51c6\u786e\u7387 Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 MobileNetV1 FP32 70.99%/89.68% xx%/xx% xx%/xx% MobileNetV2 FP32 72.15%/90.65% ResNet50 FP32 76.50%/93.00% \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 MobileNetV1 17M xxM xxM MobileNetV2 xxM ResNet50 99M","title":"1.1 \u56fe\u8c61\u5206\u7c7b"},{"location":"model_zoo/#12","text":"\u6570\u636e\u96c6\uff1aCOCO 2017 Model \u8f93\u5165\u5c3a\u5bf8 Image/GPU FP32 BoxAP \u79bb\u7ebf\u91cf\u5316 BoxAP \u91cf\u5316\u8bad\u7ec3 BoxAP MobileNet-V1-YOLOv3 608 8 29.3 xx xx MobileNet-V1-YOLOv3 416 MobileNet-V1-YOLOv3 320 R50-dcn-YOLOv3 608 41.4 R50-dcn-YOLOv3 416 R50-dcn-YOLOv3 320 \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 MobileNet-V1-YOLOv3 xxM xxM xxM R50-dcn-YOLOv3 xxM","title":"1.2 \u76ee\u6807\u68c0\u6d4b"},{"location":"model_zoo/#13","text":"\u6570\u636e\u96c6\uff1aCityscapes Model FP32 mIoU \u79bb\u7ebf\u91cf\u5316 mIoU \u91cf\u5316\u8bad\u7ec3 mIoU DeepLabv3+/MobileNetv1 63.26 xx xx DeepLabv3+/MobileNetv2 69.81 \u91cf\u5316\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model FP32 \u79bb\u7ebf\u91cf\u5316 \u91cf\u5316\u8bad\u7ec3 DeepLabv3+/MobileNetv1 xxM xxM xxM DeepLabv3+/MobileNetv2 xxM","title":"1.3 \u56fe\u50cf\u5206\u5272"},{"location":"model_zoo/#2","text":"","title":"2. \u526a\u679d"},{"location":"model_zoo/#21","text":"\u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model Top-1/Top-5 MobileNetV1 70.99%/89.68% MobileNetV1 uniform -50% MobileNetV1 sensitive -xx% MobileNetV2 72.15%/90.65% MobileNetV2 uniform -50% MobileNetV2 sensitive -xx% ResNet34 74.57%/92.14% ResNet34 uniform -50% ResNet34 auto -50% \u526a\u679d\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u548c\u8ba1\u7b97\u91cf\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model baseline FLOPs baseline size \u526a\u679d\u540e FlOPs \u526a\u679d\u540e size MobileNetV1 xx xx xx xx MobileNetV2 xx ResNet34 xx","title":"2.1 \u56fe\u50cf\u5206\u7c7b"},{"location":"model_zoo/#22","text":"\u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u6570\u636e\u96c6 \u8f93\u5165\u5c3a\u5bf8 Image/GPU baseline mAP \u654f\u611f\u5ea6\u526a\u679d mAP MobileNet-V1-YOLOv3 Pasacl VOC 608 8 76.2 77.59 (-50%) MobileNet-V1-YOLOv3 Pasacl VOC 416 76.7 xx MobileNet-V1-YOLOv3 Pasacl VOC 320 75.2 xx MobileNet-V1-YOLOv3 COCO 608 29.3 29.56 (-20%) MobileNet-V1-YOLOv3 COCO 416 29.3 xx MobileNet-V1-YOLOv3 COCO 320 27.1 xx R50-dcn-YOLOv3 COCO 608 41.4 37.8 (-30%) R50-dcn-YOLOv3 COCO 416 R50-dcn-YOLOv3 COCO 320 \u526a\u679d\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u548c\u8ba1\u7b97\u91cf\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model baseline FLOPs baseline size \u526a\u679d\u540e FlOPs \u526a\u679d\u540e size MobileNet-V1-YOLOv3-VOC xx xxM xx xxM MobileNet-V1-YOLOv3-COCO xx R50-dcn-YOLOv3-COCO xx","title":"2.2 \u76ee\u6807\u68c0\u6d4b"},{"location":"model_zoo/#23","text":"\u6570\u636e\u96c6\uff1aCityscapes Model Baseline mIoU xx\u526a\u679d mIoU DeepLabv3+/MobileNetv2 69.81 xx \u526a\u679d\u524d\u540e\uff0c\u6a21\u578b\u5927\u5c0f\u548c\u8ba1\u7b97\u91cf\u7684\u53d8\u5316\u5bf9\u6bd4\u5982\u4e0b\uff1a Model baseline FLOPs baseline size \u526a\u679d\u540e FlOPs \u526a\u679d\u540e size DeepLabv3+/MobileNetv2 xx xxM xx xxM","title":"2.3 \u56fe\u50cf\u5206\u5272"},{"location":"model_zoo/#3","text":"","title":"3. \u84b8\u998f"},{"location":"model_zoo/#31","text":"\u6570\u636e\u96c6\uff1aImageNet1000\u7c7b \u8bc4\u4ef7\u6307\u6807\uff1aTop-1/Top-5\u51c6\u786e\u7387 Model baseline \u84b8\u998f\u540e MobileNetV1 70.99%/89.68% 72.79%/90.69% (teacher: ResNet50_vd 1 ) MobileNetV2 72.15%/90.65% 74.30%/91.52% (teacher: ResNet50_vd) ResNet50 76.50%/93.00% 77.40%/93.48% (teacher: ResNet101 2 ) Note [1] \uff1a ResNet50_vd \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a79.12%/94.44% \u5e26_vd\u540e\u7f00\u4ee3\u8868\u5f00\u542f\u4e86Mixup\u8bad\u7ec3\uff0cMixup\u76f8\u5173\u4ecb\u7ecd\u53c2\u8003 mixup: Beyond Empirical Risk Minimization [2] \uff1a ResNet101 \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a77.56%/93.64%","title":"3.1 \u56fe\u8c61\u5206\u7c7b"},{"location":"model_zoo/#32","text":"\u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u6570\u636e\u96c6 \u8f93\u5165\u5c3a\u5bf8 Image/GPU baseline \u84b8\u998f\u540e mAP MobileNet-V1-YOLOv3 Pasacl VOC 640 16 76.2 79.0 (teacher: ResNet34-YOLOv3-VOC 3 ) MobileNet-V1-YOLOv3 Pasacl VOC 416 76.7 78.2 MobileNet-V1-YOLOv3 Pasacl VOC 320 75.2 75.5 MobileNet-V1-YOLOv3 COCO 640 29.3 31.0 (teacher: ResNet34-YOLOv3-COCO 4 ) MobileNet-V1-YOLOv3 COCO 416 MobileNet-V1-YOLOv3 COCO 320 Note [3] \uff1a ResNet34-YOLOv3-VOC \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a82.6 [4] \uff1a ResNet34-YOLOv3-COCO \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a36.2","title":"3.2 \u76ee\u6807\u68c0\u6d4b"},{"location":"model_zoo2/","text":"1. \u91cf\u5316 # 1.1 \u56fe\u8c61\u5206\u7c7b # \u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model \u538b\u7f29\u65b9\u6cd5 Top-1/Top-5 \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 \u4e0b\u8f7d MobileNetV1 - 70.99%/89.68% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 quant_psot xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 quant_aware xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 - 72.15%/90.65% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 quant_post xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 quant_aware xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 ResNet50 - 76.50%/93.00% xx \u4e0b\u8f7d\u94fe\u63a5 ResNet50 quant_post xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 ResNet50 quant_aware xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 1.2 \u76ee\u6807\u68c0\u6d4b # \u6570\u636e\u96c6\uff1aCOCO 2017 Model \u538b\u7f29\u65b9\u6cd5 Image/GPU \u8f93\u5165608 Box AP \u8f93\u5165416 Box AP \u8f93\u5165320 Box AP \u6a21\u578b\u5927\u5c0f(MB) \u4e0b\u8f7d MobileNet-V1-YOLOv3 - 8 29.3 29.3 27.1 xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 quant_post 8 xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 quant_aware 8 xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 FP32 - 8 41.4 - - xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 quant_post 8 xx - - xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 quant_aware 8 xx - - xx \u4e0b\u8f7d\u94fe\u63a5 \u6570\u636e\u96c6\uff1aWIDER-FACE Model \u538b\u7f29\u65b9\u6cd5 Image/GPU \u8f93\u5165\u5c3a\u5bf8 Easy/Medium/Hard \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 \u4e0b\u8f7d BlazeFace - 8 640 0.915/0.892/0.797 xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace quant_post 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace quant_aware 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-Lite - 8 640 0.909/0.885/0.781 xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-Lite quant_post 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-Lite quant_aware 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-NAS - 8 640 0.837/0.807/0.658 xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-NAS quant_post 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-NAS quant_aware 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 1.3 \u56fe\u50cf\u5206\u5272 # \u6570\u636e\u96c6\uff1aCityscapes Model \u538b\u7f29\u65b9\u6cd5 mIoU \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 \u4e0b\u8f7d DeepLabv3+/MobileNetv1 - 63.26 xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv1 quant_post xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv1 quant_aware xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 - 69.81 xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 quant_post xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 quant_aware xx xx \u4e0b\u8f7d\u94fe\u63a5 2. \u526a\u679d # 2.1 \u56fe\u50cf\u5206\u7c7b # \u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model \u538b\u7f29\u65b9\u6cd5 Top-1/Top-5 \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 FLOPs \u4e0b\u8f7d MobileNetV1 - 70.99%/89.68% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 uniform -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 sensitive -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 - 72.15%/90.65% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 uniform -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 sensitive -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 ResNet34 - 74.57%/92.14% xx xx \u4e0b\u8f7d\u94fe\u63a5 ResNet34 uniform -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 ResNet34 auto -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 2.2 \u76ee\u6807\u68c0\u6d4b # \u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u538b\u7f29\u65b9\u6cd5 \u6570\u636e\u96c6 Image/GPU \u8f93\u5165608 mAP \u8f93\u5165416 mAP \u8f93\u5165320 mAP \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 FLOPs \u4e0b\u8f7d MobileNet-V1-YOLOv3 - Pasacl VOC 8 76.2 76.7 75.3 xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 uniform -xx% Pasacl VOC 8 xx xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 - COCO 8 29.3 29.3 27.1 xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 uniform -xx% COCO 8 xx xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 - COCO 8 41.4 - - xx xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 uniform -xx% COCO 8 xx - - xx xx \u4e0b\u8f7d\u94fe\u63a5 2.3 \u56fe\u50cf\u5206\u5272 # \u6570\u636e\u96c6\uff1aCityscapes Model \u538b\u7f29\u65b9\u6cd5 mIoU \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 FLOPs \u4e0b\u8f7d DeepLabv3+/MobileNetv2 - 69.81 xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 prune -xx% xx xx xx \u4e0b\u8f7d\u94fe\u63a5 3. \u84b8\u998f # 3.1 \u56fe\u8c61\u5206\u7c7b # \u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model \u84b8\u998f teacher baseline \u4e0b\u8f7d MobileNetV1 - 70.99%/89.68% \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 ResNet50_vd 1 72.79%/90.69% \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 - 72.15%/90.65% \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 ResNet50_vd 1 74.30%/91.52% \u4e0b\u8f7d\u94fe\u63a5 ResNet50 - 76.50%/93.00% \u4e0b\u8f7d\u94fe\u63a5 ResNet50 ResNet101 2 77.40%/93.48% \u4e0b\u8f7d\u94fe\u63a5 Note [1] \uff1a ResNet50_vd \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a79.12%/94.44% \u5e26_vd\u540e\u7f00\u4ee3\u8868\u5f00\u542f\u4e86Mixup\u8bad\u7ec3\uff0cMixup\u76f8\u5173\u4ecb\u7ecd\u53c2\u8003 mixup: Beyond Empirical Risk Minimization [2] \uff1a ResNet101 \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a77.56%/93.64% 3.2 \u76ee\u6807\u68c0\u6d4b # \u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u84b8\u998f teacher \u6570\u636e\u96c6 Image/GPU \u8f93\u5165640 mAP \u8f93\u5165416 mAP \u8f93\u5165320 mAP \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 - Pasacl VOC 16 76.2 76.7 75.3 \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 ResNet34-YOLOv3-VOC 3 Pasacl VOC 16 xx xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 - COCO 16 29.3 29.3 27.1 \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 ResNet34-YOLOv3-COCO 4 COCO 16 xx xx xx \u4e0b\u8f7d\u94fe\u63a5 Note [3] \uff1a ResNet34-YOLOv3-VOC \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a82.6 [4] \uff1a ResNet34-YOLOv3-COCO \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a36.2","title":"\u6a21\u578b\u5e932"},{"location":"model_zoo2/#1","text":"","title":"1. \u91cf\u5316"},{"location":"model_zoo2/#11","text":"\u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model \u538b\u7f29\u65b9\u6cd5 Top-1/Top-5 \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 \u4e0b\u8f7d MobileNetV1 - 70.99%/89.68% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 quant_psot xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 quant_aware xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 - 72.15%/90.65% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 quant_post xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 quant_aware xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 ResNet50 - 76.50%/93.00% xx \u4e0b\u8f7d\u94fe\u63a5 ResNet50 quant_post xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5 ResNet50 quant_aware xx%/xx% xx \u4e0b\u8f7d\u94fe\u63a5","title":"1.1 \u56fe\u8c61\u5206\u7c7b"},{"location":"model_zoo2/#12","text":"\u6570\u636e\u96c6\uff1aCOCO 2017 Model \u538b\u7f29\u65b9\u6cd5 Image/GPU \u8f93\u5165608 Box AP \u8f93\u5165416 Box AP \u8f93\u5165320 Box AP \u6a21\u578b\u5927\u5c0f(MB) \u4e0b\u8f7d MobileNet-V1-YOLOv3 - 8 29.3 29.3 27.1 xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 quant_post 8 xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 quant_aware 8 xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 FP32 - 8 41.4 - - xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 quant_post 8 xx - - xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 quant_aware 8 xx - - xx \u4e0b\u8f7d\u94fe\u63a5 \u6570\u636e\u96c6\uff1aWIDER-FACE Model \u538b\u7f29\u65b9\u6cd5 Image/GPU \u8f93\u5165\u5c3a\u5bf8 Easy/Medium/Hard \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 \u4e0b\u8f7d BlazeFace - 8 640 0.915/0.892/0.797 xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace quant_post 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace quant_aware 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-Lite - 8 640 0.909/0.885/0.781 xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-Lite quant_post 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-Lite quant_aware 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-NAS - 8 640 0.837/0.807/0.658 xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-NAS quant_post 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5 BlazeFace-NAS quant_aware 8 640 xx/xx/xx xx \u4e0b\u8f7d\u94fe\u63a5","title":"1.2 \u76ee\u6807\u68c0\u6d4b"},{"location":"model_zoo2/#13","text":"\u6570\u636e\u96c6\uff1aCityscapes Model \u538b\u7f29\u65b9\u6cd5 mIoU \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 \u4e0b\u8f7d DeepLabv3+/MobileNetv1 - 63.26 xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv1 quant_post xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv1 quant_aware xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 - 69.81 xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 quant_post xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 quant_aware xx xx \u4e0b\u8f7d\u94fe\u63a5","title":"1.3 \u56fe\u50cf\u5206\u5272"},{"location":"model_zoo2/#2","text":"","title":"2. \u526a\u679d"},{"location":"model_zoo2/#21","text":"\u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model \u538b\u7f29\u65b9\u6cd5 Top-1/Top-5 \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 FLOPs \u4e0b\u8f7d MobileNetV1 - 70.99%/89.68% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 uniform -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 sensitive -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 - 72.15%/90.65% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 uniform -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 sensitive -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 ResNet34 - 74.57%/92.14% xx xx \u4e0b\u8f7d\u94fe\u63a5 ResNet34 uniform -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5 ResNet34 auto -xx% xx%/xx% xx xx \u4e0b\u8f7d\u94fe\u63a5","title":"2.1 \u56fe\u50cf\u5206\u7c7b"},{"location":"model_zoo2/#22","text":"\u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u538b\u7f29\u65b9\u6cd5 \u6570\u636e\u96c6 Image/GPU \u8f93\u5165608 mAP \u8f93\u5165416 mAP \u8f93\u5165320 mAP \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 FLOPs \u4e0b\u8f7d MobileNet-V1-YOLOv3 - Pasacl VOC 8 76.2 76.7 75.3 xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 uniform -xx% Pasacl VOC 8 xx xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 - COCO 8 29.3 29.3 27.1 xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 uniform -xx% COCO 8 xx xx xx xx xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 - COCO 8 41.4 - - xx xx \u4e0b\u8f7d\u94fe\u63a5 R50-dcn-YOLOv3 uniform -xx% COCO 8 xx - - xx xx \u4e0b\u8f7d\u94fe\u63a5","title":"2.2 \u76ee\u6807\u68c0\u6d4b"},{"location":"model_zoo2/#23","text":"\u6570\u636e\u96c6\uff1aCityscapes Model \u538b\u7f29\u65b9\u6cd5 mIoU \u6a21\u578b\u5927\u5c0f\uff08MB\uff09 FLOPs \u4e0b\u8f7d DeepLabv3+/MobileNetv2 - 69.81 xx xx \u4e0b\u8f7d\u94fe\u63a5 DeepLabv3+/MobileNetv2 prune -xx% xx xx xx \u4e0b\u8f7d\u94fe\u63a5","title":"2.3 \u56fe\u50cf\u5206\u5272"},{"location":"model_zoo2/#3","text":"","title":"3. \u84b8\u998f"},{"location":"model_zoo2/#31","text":"\u6570\u636e\u96c6\uff1aImageNet1000\u7c7b Model \u84b8\u998f teacher baseline \u4e0b\u8f7d MobileNetV1 - 70.99%/89.68% \u4e0b\u8f7d\u94fe\u63a5 MobileNetV1 ResNet50_vd 1 72.79%/90.69% \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 - 72.15%/90.65% \u4e0b\u8f7d\u94fe\u63a5 MobileNetV2 ResNet50_vd 1 74.30%/91.52% \u4e0b\u8f7d\u94fe\u63a5 ResNet50 - 76.50%/93.00% \u4e0b\u8f7d\u94fe\u63a5 ResNet50 ResNet101 2 77.40%/93.48% \u4e0b\u8f7d\u94fe\u63a5 Note [1] \uff1a ResNet50_vd \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a79.12%/94.44% \u5e26_vd\u540e\u7f00\u4ee3\u8868\u5f00\u542f\u4e86Mixup\u8bad\u7ec3\uff0cMixup\u76f8\u5173\u4ecb\u7ecd\u53c2\u8003 mixup: Beyond Empirical Risk Minimization [2] \uff1a ResNet101 \u9884\u8bad\u7ec3\u6a21\u578bTop-1/Top-5\u51c6\u786e\u7387\u5206\u522b\u4e3a77.56%/93.64%","title":"3.1 \u56fe\u8c61\u5206\u7c7b"},{"location":"model_zoo2/#32","text":"\u6570\u636e\u96c6\uff1aPasacl VOC & COCO 2017 Model \u84b8\u998f teacher \u6570\u636e\u96c6 Image/GPU \u8f93\u5165640 mAP \u8f93\u5165416 mAP \u8f93\u5165320 mAP \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 - Pasacl VOC 16 76.2 76.7 75.3 \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 ResNet34-YOLOv3-VOC 3 Pasacl VOC 16 xx xx xx \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 - COCO 16 29.3 29.3 27.1 \u4e0b\u8f7d\u94fe\u63a5 MobileNet-V1-YOLOv3 ResNet34-YOLOv3-COCO 4 COCO 16 xx xx xx \u4e0b\u8f7d\u94fe\u63a5 Note [3] \uff1a ResNet34-YOLOv3-VOC \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a82.6 [4] \uff1a ResNet34-YOLOv3-COCO \u9884\u8bad\u7ec3\u6a21\u578b\u7684Box AP\u4e3a36.2","title":"3.2 \u76ee\u6807\u68c0\u6d4b"},{"location":"table_latency/","text":"\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868 # \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u7528\u4e8e\u5feb\u901f\u8bc4\u4f30\u4e00\u4e2a\u6a21\u578b\u5728\u7279\u5b9a\u786c\u4ef6\u73af\u5883\u548c\u63a8\u7406\u5f15\u64ce\u4e0a\u7684\u63a8\u7406\u901f\u5ea6\u3002 \u8be5\u6587\u6863\u4e3b\u8981\u7528\u4e8e\u5b9a\u4e49PaddleSlim\u652f\u6301\u7684\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u7684\u683c\u5f0f\u3002 \u6982\u8ff0 # \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u4e2d\u5b58\u653e\u7740\u6240\u6709\u53ef\u80fd\u7684\u64cd\u4f5c\u5bf9\u5e94\u7684\u5ef6\u65f6\u4fe1\u606f\uff0c\u8be5\u8868\u4e2d\u7684\u4e00\u4e2a\u64cd\u4f5c\u5305\u62ec\u64cd\u4f5c\u7c7b\u578b\u548c\u64cd\u4f5c\u53c2\u6570\uff0c\u6bd4\u5982\uff1a\u64cd\u4f5c\u7c7b\u578b\u53ef\u4ee5\u662f conv2d \uff0c\u5bf9\u5e94\u7684\u64cd\u4f5c\u53c2\u6570\u6709\u8f93\u5165\u7279\u5f81\u56fe\u7684\u5927\u5c0f\u3001\u5377\u79ef\u6838\u4e2a\u6570\u3001\u5377\u79ef\u6838\u5927\u5c0f\u7b49\u3002 \u7ed9\u5b9a\u64cd\u4f5c\u7684\u5ef6\u65f6\u4f9d\u8d56\u4e8e\u786c\u4ef6\u73af\u5883\u548c\u63a8\u7406\u5f15\u64ce\u3002 \u6574\u4f53\u683c\u5f0f # \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u4ee5\u6587\u4ef6\u6216\u591a\u884c\u5b57\u7b26\u4e32\u7684\u5f62\u5f0f\u4fdd\u5b58\u3002 \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u7b2c\u4e00\u884c\u4fdd\u5b58\u7248\u672c\u4fe1\u606f\uff0c\u540e\u7eed\u6bcf\u884c\u4e3a\u4e00\u4e2a\u64cd\u4f5c\u548c\u5bf9\u5e94\u7684\u5ef6\u65f6\u4fe1\u606f\u3002 \u7248\u672c\u4fe1\u606f # \u7248\u672c\u4fe1\u606f\u4ee5\u82f1\u6587\u5b57\u7b26\u9017\u53f7\u5206\u5272\uff0c\u5185\u5bb9\u4f9d\u6b21\u4e3a\u786c\u4ef6\u73af\u5883\u540d\u79f0\u3001\u63a8\u7406\u5f15\u64ce\u540d\u79f0\u548c\u65f6\u95f4\u6233\u3002 \u786c\u4ef6\u73af\u5883\u540d\u79f0\uff1a \u7528\u4e8e\u6807\u8bc6\u786c\u4ef6\u73af\u5883\uff0c\u53ef\u4ee5\u5305\u542b\u8ba1\u7b97\u67b6\u6784\u7c7b\u578b\u3001\u7248\u672c\u53f7\u7b49\u4fe1\u606f\u3002 \u63a8\u7406\u5f15\u64ce\u540d\u79f0\uff1a \u7528\u4e8e\u6807\u8bc6\u63a8\u7406\u5f15\u64ce\uff0c\u53ef\u4ee5\u5305\u542b\u63a8\u7406\u5f15\u64ce\u540d\u79f0\u3001\u7248\u672c\u53f7\u3001\u4f18\u5316\u9009\u9879\u7b49\u4fe1\u606f\u3002 \u65f6\u95f4\u6233\uff1a \u8be5\u8bc4\u4f30\u8868\u7684\u521b\u5efa\u65f6\u95f4\u3002 \u64cd\u4f5c\u4fe1\u606f # \u64cd\u4f5c\u4fe1\u606f\u5b57\u6bb5\u4e4b\u95f4\u4ee5\u9017\u53f7\u5206\u5272\u3002\u64cd\u4f5c\u4fe1\u606f\u4e0e\u5ef6\u8fdf\u4fe1\u606f\u4e4b\u95f4\u4ee5\u5236\u8868\u7b26\u5206\u5272\u3002 conv2d # \u683c\u5f0f 1 op_type , flag_bias , flag_relu , n_in , c_in , h_in , w_in , c_out , groups , kernel , padding , stride , dilation \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 flag_bias (int) - \u662f\u5426\u6709 bias\uff080\uff1a\u65e0\uff0c1\uff1a\u6709\uff09\u3002 flag_relu (int) - \u662f\u5426\u6709 relu\uff080\uff1a\u65e0\uff0c1\uff1a\u6709\uff09\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 c_out (int) - \u8f93\u51fa Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 groups (int) - \u5377\u79ef\u4e8c\u7ef4\u5c42\uff08Conv2D Layer\uff09\u7684\u7ec4\u6570\u3002 kernel (int) - \u5377\u79ef\u6838\u5927\u5c0f\u3002 padding (int) - \u586b\u5145 (padding) \u5927\u5c0f\u3002 stride (int) - \u6b65\u957f (stride) \u5927\u5c0f\u3002 dilation (int) - \u81a8\u80c0 (dilation) \u5927\u5c0f\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4 activation # \u683c\u5f0f 1 op_type , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4 batch_norm # \u683c\u5f0f 1 op_type , active_type , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 active_type (string|None) - \u6fc0\u6d3b\u51fd\u6570\u7c7b\u578b\uff0c\u5305\u542b\uff1arelu, prelu, sigmoid, relu6, tanh\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4 eltwise # \u683c\u5f0f 1 op_type , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4 pooling # \u683c\u5f0f 1 op_type , flag_global_pooling , n_in , c_in , h_in , w_in , kernel , padding , stride , ceil_mode , pool_type \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 flag_global_pooling (int) - \u662f\u5426\u4e3a\u5168\u5c40\u6c60\u5316\uff080\uff1a\u4e0d\u662f\uff0c1\uff1a\u662f\uff09\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 kernel (int) - \u5377\u79ef\u6838\u5927\u5c0f\u3002 padding (int) - \u586b\u5145 (padding) \u5927\u5c0f\u3002 stride (int) - \u6b65\u957f (stride) \u5927\u5c0f\u3002 ceil_mode (int) - \u662f\u5426\u7528 ceil \u51fd\u6570\u8ba1\u7b97\u8f93\u51fa\u9ad8\u5ea6\u548c\u5bbd\u5ea6\u30020 \u8868\u793a\u4f7f\u7528 floor \u51fd\u6570\uff0c1 \u8868\u793a\u4f7f\u7528 ceil \u51fd\u6570\u3002 pool_type (int) - \u6c60\u5316\u7c7b\u578b\uff0c\u5176\u4e2d 1 \u8868\u793a pooling_max\uff0c2 \u8868\u793a pooling_average_include_padding\uff0c3 \u8868\u793a pooling_average_exclude_padding\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4 softmax # \u683c\u5f0f 1 op_type , axis , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 axis (int) - \u6267\u884c softmax \u8ba1\u7b97\u7684\u7ef4\u5ea6\u7d22\u5f15\uff0c\u5e94\u8be5\u5728 [\u22121\uff0crank \u2212 1] \u8303\u56f4\u5185\uff0c\u5176\u4e2d rank \u662f\u8f93\u5165\u53d8\u91cf\u7684\u79e9\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868"},{"location":"table_latency/#_1","text":"\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u7528\u4e8e\u5feb\u901f\u8bc4\u4f30\u4e00\u4e2a\u6a21\u578b\u5728\u7279\u5b9a\u786c\u4ef6\u73af\u5883\u548c\u63a8\u7406\u5f15\u64ce\u4e0a\u7684\u63a8\u7406\u901f\u5ea6\u3002 \u8be5\u6587\u6863\u4e3b\u8981\u7528\u4e8e\u5b9a\u4e49PaddleSlim\u652f\u6301\u7684\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u7684\u683c\u5f0f\u3002","title":"\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868"},{"location":"table_latency/#_2","text":"\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u4e2d\u5b58\u653e\u7740\u6240\u6709\u53ef\u80fd\u7684\u64cd\u4f5c\u5bf9\u5e94\u7684\u5ef6\u65f6\u4fe1\u606f\uff0c\u8be5\u8868\u4e2d\u7684\u4e00\u4e2a\u64cd\u4f5c\u5305\u62ec\u64cd\u4f5c\u7c7b\u578b\u548c\u64cd\u4f5c\u53c2\u6570\uff0c\u6bd4\u5982\uff1a\u64cd\u4f5c\u7c7b\u578b\u53ef\u4ee5\u662f conv2d \uff0c\u5bf9\u5e94\u7684\u64cd\u4f5c\u53c2\u6570\u6709\u8f93\u5165\u7279\u5f81\u56fe\u7684\u5927\u5c0f\u3001\u5377\u79ef\u6838\u4e2a\u6570\u3001\u5377\u79ef\u6838\u5927\u5c0f\u7b49\u3002 \u7ed9\u5b9a\u64cd\u4f5c\u7684\u5ef6\u65f6\u4f9d\u8d56\u4e8e\u786c\u4ef6\u73af\u5883\u548c\u63a8\u7406\u5f15\u64ce\u3002","title":"\u6982\u8ff0"},{"location":"table_latency/#_3","text":"\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u4ee5\u6587\u4ef6\u6216\u591a\u884c\u5b57\u7b26\u4e32\u7684\u5f62\u5f0f\u4fdd\u5b58\u3002 \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u7b2c\u4e00\u884c\u4fdd\u5b58\u7248\u672c\u4fe1\u606f\uff0c\u540e\u7eed\u6bcf\u884c\u4e3a\u4e00\u4e2a\u64cd\u4f5c\u548c\u5bf9\u5e94\u7684\u5ef6\u65f6\u4fe1\u606f\u3002","title":"\u6574\u4f53\u683c\u5f0f"},{"location":"table_latency/#_4","text":"\u7248\u672c\u4fe1\u606f\u4ee5\u82f1\u6587\u5b57\u7b26\u9017\u53f7\u5206\u5272\uff0c\u5185\u5bb9\u4f9d\u6b21\u4e3a\u786c\u4ef6\u73af\u5883\u540d\u79f0\u3001\u63a8\u7406\u5f15\u64ce\u540d\u79f0\u548c\u65f6\u95f4\u6233\u3002 \u786c\u4ef6\u73af\u5883\u540d\u79f0\uff1a \u7528\u4e8e\u6807\u8bc6\u786c\u4ef6\u73af\u5883\uff0c\u53ef\u4ee5\u5305\u542b\u8ba1\u7b97\u67b6\u6784\u7c7b\u578b\u3001\u7248\u672c\u53f7\u7b49\u4fe1\u606f\u3002 \u63a8\u7406\u5f15\u64ce\u540d\u79f0\uff1a \u7528\u4e8e\u6807\u8bc6\u63a8\u7406\u5f15\u64ce\uff0c\u53ef\u4ee5\u5305\u542b\u63a8\u7406\u5f15\u64ce\u540d\u79f0\u3001\u7248\u672c\u53f7\u3001\u4f18\u5316\u9009\u9879\u7b49\u4fe1\u606f\u3002 \u65f6\u95f4\u6233\uff1a \u8be5\u8bc4\u4f30\u8868\u7684\u521b\u5efa\u65f6\u95f4\u3002","title":"\u7248\u672c\u4fe1\u606f"},{"location":"table_latency/#_5","text":"\u64cd\u4f5c\u4fe1\u606f\u5b57\u6bb5\u4e4b\u95f4\u4ee5\u9017\u53f7\u5206\u5272\u3002\u64cd\u4f5c\u4fe1\u606f\u4e0e\u5ef6\u8fdf\u4fe1\u606f\u4e4b\u95f4\u4ee5\u5236\u8868\u7b26\u5206\u5272\u3002","title":"\u64cd\u4f5c\u4fe1\u606f"},{"location":"table_latency/#conv2d","text":"\u683c\u5f0f 1 op_type , flag_bias , flag_relu , n_in , c_in , h_in , w_in , c_out , groups , kernel , padding , stride , dilation \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 flag_bias (int) - \u662f\u5426\u6709 bias\uff080\uff1a\u65e0\uff0c1\uff1a\u6709\uff09\u3002 flag_relu (int) - \u662f\u5426\u6709 relu\uff080\uff1a\u65e0\uff0c1\uff1a\u6709\uff09\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 c_out (int) - \u8f93\u51fa Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 groups (int) - \u5377\u79ef\u4e8c\u7ef4\u5c42\uff08Conv2D Layer\uff09\u7684\u7ec4\u6570\u3002 kernel (int) - \u5377\u79ef\u6838\u5927\u5c0f\u3002 padding (int) - \u586b\u5145 (padding) \u5927\u5c0f\u3002 stride (int) - \u6b65\u957f (stride) \u5927\u5c0f\u3002 dilation (int) - \u81a8\u80c0 (dilation) \u5927\u5c0f\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"conv2d"},{"location":"table_latency/#activation","text":"\u683c\u5f0f 1 op_type , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"activation"},{"location":"table_latency/#batch_norm","text":"\u683c\u5f0f 1 op_type , active_type , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 active_type (string|None) - \u6fc0\u6d3b\u51fd\u6570\u7c7b\u578b\uff0c\u5305\u542b\uff1arelu, prelu, sigmoid, relu6, tanh\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"batch_norm"},{"location":"table_latency/#eltwise","text":"\u683c\u5f0f 1 op_type , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"eltwise"},{"location":"table_latency/#pooling","text":"\u683c\u5f0f 1 op_type , flag_global_pooling , n_in , c_in , h_in , w_in , kernel , padding , stride , ceil_mode , pool_type \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 flag_global_pooling (int) - \u662f\u5426\u4e3a\u5168\u5c40\u6c60\u5316\uff080\uff1a\u4e0d\u662f\uff0c1\uff1a\u662f\uff09\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 kernel (int) - \u5377\u79ef\u6838\u5927\u5c0f\u3002 padding (int) - \u586b\u5145 (padding) \u5927\u5c0f\u3002 stride (int) - \u6b65\u957f (stride) \u5927\u5c0f\u3002 ceil_mode (int) - \u662f\u5426\u7528 ceil \u51fd\u6570\u8ba1\u7b97\u8f93\u51fa\u9ad8\u5ea6\u548c\u5bbd\u5ea6\u30020 \u8868\u793a\u4f7f\u7528 floor \u51fd\u6570\uff0c1 \u8868\u793a\u4f7f\u7528 ceil \u51fd\u6570\u3002 pool_type (int) - \u6c60\u5316\u7c7b\u578b\uff0c\u5176\u4e2d 1 \u8868\u793a pooling_max\uff0c2 \u8868\u793a pooling_average_include_padding\uff0c3 \u8868\u793a pooling_average_exclude_padding\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"pooling"},{"location":"table_latency/#softmax","text":"\u683c\u5f0f 1 op_type , axis , n_in , c_in , h_in , w_in \\ tlatency \u5b57\u6bb5\u89e3\u91ca op_type(str) - \u5f53\u524dop\u7c7b\u578b\u3002 axis (int) - \u6267\u884c softmax \u8ba1\u7b97\u7684\u7ef4\u5ea6\u7d22\u5f15\uff0c\u5e94\u8be5\u5728 [\u22121\uff0crank \u2212 1] \u8303\u56f4\u5185\uff0c\u5176\u4e2d rank \u662f\u8f93\u5165\u53d8\u91cf\u7684\u79e9\u3002 n_in (int) - \u8f93\u5165 Tensor \u7684\u6279\u5c3a\u5bf8 (batch size)\u3002 c_in (int) - \u8f93\u5165 Tensor \u7684\u901a\u9053 (channel) \u6570\u3002 h_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u9ad8\u5ea6\u3002 w_in (int) - \u8f93\u5165 Tensor \u7684\u7279\u5f81\u5bbd\u5ea6\u3002 latency (float) - \u5f53\u524dop\u7684\u5ef6\u65f6\u65f6\u95f4","title":"softmax"},{"location":"algo/algo/","text":"\u76ee\u5f55 # \u91cf\u5316\u539f\u7406\u4ecb\u7ecd \u526a\u88c1\u539f\u7406\u4ecb\u7ecd \u84b8\u998f\u539f\u7406\u4ecb\u7ecd \u8f7b\u91cf\u7ea7\u6a21\u578b\u7ed3\u6784\u641c\u7d22\u539f\u7406\u4ecb\u7ecd 1. Quantization Aware Training\u91cf\u5316\u4ecb\u7ecd # 1.1 \u80cc\u666f # \u8fd1\u5e74\u6765\uff0c\u5b9a\u70b9\u91cf\u5316\u4f7f\u7528\u66f4\u5c11\u7684\u6bd4\u7279\u6570\uff08\u59828-bit\u30013-bit\u30012-bit\u7b49\uff09\u8868\u793a\u795e\u7ecf\u7f51\u7edc\u7684\u6743\u91cd\u548c\u6fc0\u6d3b\u5df2\u88ab\u9a8c\u8bc1\u662f\u6709\u6548\u7684\u3002\u5b9a\u70b9\u91cf\u5316\u7684\u4f18\u70b9\u5305\u62ec\u4f4e\u5185\u5b58\u5e26\u5bbd\u3001\u4f4e\u529f\u8017\u3001\u4f4e\u8ba1\u7b97\u8d44\u6e90\u5360\u7528\u4ee5\u53ca\u4f4e\u6a21\u578b\u5b58\u50a8\u9700\u6c42\u7b49\u3002 \u88681: \u4e0d\u540c\u7c7b\u578b\u64cd\u4f5c\u7684\u5f00\u9500\u5bf9\u6bd4 \u7531\u88681\u53ef\u77e5\uff0c\u4f4e\u7cbe\u5ea6\u5b9a\u70b9\u6570\u64cd\u4f5c\u7684\u786c\u4ef6\u9762\u79ef\u5927\u5c0f\u53ca\u80fd\u8017\u6bd4\u9ad8\u7cbe\u5ea6\u6d6e\u70b9\u6570\u8981\u5c11\u51e0\u4e2a\u6570\u91cf\u7ea7\u3002 \u4f7f\u7528\u5b9a\u70b9\u91cf\u5316\u53ef\u5e26\u67654\u500d\u7684\u6a21\u578b\u538b\u7f29\u30014\u500d\u7684\u5185\u5b58\u5e26\u5bbd\u63d0\u5347\uff0c\u4ee5\u53ca\u66f4\u9ad8\u6548\u7684cache\u5229\u7528(\u5f88\u591a\u786c\u4ef6\u8bbe\u5907\uff0c\u5185\u5b58\u8bbf\u95ee\u662f\u4e3b\u8981\u80fd\u8017)\u3002\u9664\u6b64\u4e4b\u5916\uff0c\u8ba1\u7b97\u901f\u5ea6\u4e5f\u4f1a\u66f4\u5feb(\u901a\u5e38\u5177\u67092x-3x\u7684\u6027\u80fd\u63d0\u5347)\u3002\u7531\u88682\u53ef\u77e5\uff0c\u5728\u5f88\u591a\u573a\u666f\u4e0b\uff0c\u5b9a\u70b9\u91cf\u5316\u64cd\u4f5c\u5bf9\u7cbe\u5ea6\u5e76\u4e0d\u4f1a\u9020\u6210\u635f\u5931\u3002\u53e6\u5916\uff0c\u5b9a\u70b9\u91cf\u5316\u5bf9\u795e\u7ecf\u7f51\u7edc\u4e8e\u5d4c\u5165\u5f0f\u8bbe\u5907\u4e0a\u7684\u63a8\u65ad\u6765\u8bf4\u662f\u6781\u5176\u91cd\u8981\u7684\u3002 \u88682\uff1a\u6a21\u578b\u91cf\u5316\u524d\u540e\u7cbe\u5ea6\u5bf9\u6bd4 \u76ee\u524d\uff0c\u5b66\u672f\u754c\u4e3b\u8981\u5c06\u91cf\u5316\u5206\u4e3a\u4e24\u5927\u7c7b\uff1a Post Training Quantization \u548c Quantization Aware Training \u3002 Post Training Quantization \u662f\u6307\u4f7f\u7528KL\u6563\u5ea6\u3001\u6ed1\u52a8\u5e73\u5747\u7b49\u65b9\u6cd5\u786e\u5b9a\u91cf\u5316\u53c2\u6570\u4e14\u4e0d\u9700\u8981\u91cd\u65b0\u8bad\u7ec3\u7684\u5b9a\u70b9\u91cf\u5316\u65b9\u6cd5\u3002 Quantization Aware Training \u662f\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u5bf9\u91cf\u5316\u8fdb\u884c\u5efa\u6a21\u4ee5\u786e\u5b9a\u91cf\u5316\u53c2\u6570\uff0c\u5b83\u4e0e Post Training Quantization \u6a21\u5f0f\u76f8\u6bd4\u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7684\u9884\u6d4b\u7cbe\u5ea6\u3002 1.2 \u91cf\u5316\u539f\u7406 # 1.2.1 \u91cf\u5316\u65b9\u5f0f # \u76ee\u524d\uff0c\u5b58\u5728\u7740\u8bb8\u591a\u65b9\u6cd5\u53ef\u4ee5\u5c06\u6d6e\u70b9\u6570\u91cf\u5316\u6210\u5b9a\u70b9\u6570\u3002\u4f8b\u5982\uff1a r = min(max(x, a), b) s = \\frac{b - a}{n - 1} q = \\left \\lfloor \\frac{r - a}{s} \\right \\rceil \u5f0f\u4e2d\uff0c x x \u662f\u5f85\u91cf\u5316\u7684\u6d6e\u70b9\u503c\uff0c [a, b] [a, b] \u662f\u91cf\u5316\u8303\u56f4\uff0c a a \u662f\u5f85\u91cf\u5316\u6d6e\u70b9\u6570\u4e2d\u7684\u6700\u5c0f\u503c\uff0c b b \u662f\u5f85\u91cf\u5316\u6d6e\u70b9\u6570\u4e2d\u7684\u6700\u5927\u503c\u3002 \\left \\lfloor \\right \\rceil \\left \\lfloor \\right \\rceil \u8868\u793a\u5c06\u7ed3\u679c\u56db\u820d\u4e94\u5165\u5230\u6700\u8fd1\u7684\u6574\u6570\u3002\u5982\u679c\u91cf\u5316\u7ea7\u522b\u4e3a k k \uff0c\u5219 n n \u4e3a 2^k 2^k \u3002\u4f8b\u5982\uff0c\u82e5 k k \u4e3a8\uff0c\u5219 n n \u4e3a256\u3002 q q \u662f\u91cf\u5316\u5f97\u5230\u7684\u6574\u6570\u3002 PaddleSlim\u6846\u67b6\u4e2d\u9009\u62e9\u7684\u91cf\u5316\u65b9\u6cd5\u4e3a\u6700\u5927\u7edd\u5bf9\u503c\u91cf\u5316( max-abs )\uff0c\u5177\u4f53\u63cf\u8ff0\u5982\u4e0b\uff1a M = max(abs(x)) q = \\left \\lfloor \\frac{x}{M} * (n - 1) \\right \\rceil \u5f0f\u4e2d\uff0c x x \u662f\u5f85\u88ab\u91cf\u5316\u7684\u6d6e\u70b9\u503c\uff0c M M \u662f\u5f85\u91cf\u5316\u6d6e\u70b9\u6570\u4e2d\u7684\u7edd\u5bf9\u503c\u6700\u5927\u503c\u3002 \\left \\lfloor \\right \\rceil \\left \\lfloor \\right \\rceil \u8868\u793a\u5c06\u7ed3\u679c\u56db\u820d\u4e94\u5165\u5230\u6700\u8fd1\u7684\u6574\u6570\u3002\u5bf9\u4e8e8bit\u91cf\u5316\uff0cPaddleSlim\u91c7\u7528 int8_t \uff0c\u5373 n=2^7=128 n=2^7=128 \u3002 q q \u662f\u91cf\u5316\u5f97\u5230\u7684\u6574\u6570\u3002 \u65e0\u8bba\u662f min-max\u91cf\u5316 \u8fd8\u662f max-abs\u91cf\u5316 \uff0c\u4ed6\u4eec\u90fd\u53ef\u4ee5\u8868\u793a\u4e3a\u5982\u4e0b\u5f62\u5f0f\uff1a q = scale * r + b q = scale * r + b \u5176\u4e2d min-max \u548c max-abs \u88ab\u79f0\u4e3a\u91cf\u5316\u53c2\u6570\u6216\u8005\u91cf\u5316\u6bd4\u4f8b\u6216\u8005\u91cf\u5316\u8303\u56f4\u3002 1.2.2 \u91cf\u5316\u8bad\u7ec3 # 1.2.2.1 \u524d\u5411\u4f20\u64ad # \u524d\u5411\u4f20\u64ad\u8fc7\u7a0b\u91c7\u7528\u6a21\u62df\u91cf\u5316\u7684\u65b9\u5f0f\uff0c\u5177\u4f53\u63cf\u8ff0\u5982\u4e0b\uff1a \u56fe1\uff1a\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u7684\u524d\u5411\u8fc7\u7a0b \u7531\u56fe1\u53ef\u77e5\uff0c\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u7684\u524d\u5411\u8fc7\u7a0b\u53ef\u88ab\u63cf\u8ff0\u4e3a\u4ee5\u4e0b\u56db\u4e2a\u90e8\u5206\uff1a 1) \u8f93\u5165\u548c\u6743\u91cd\u5747\u88ab\u91cf\u5316\u62108-bit\u6574\u6570\u3002 2) \u57288-bit\u6574\u6570\u4e0a\u6267\u884c\u77e9\u9635\u4e58\u6cd5\u6216\u8005\u5377\u79ef\u64cd\u4f5c\u3002 3) \u53cd\u91cf\u5316\u77e9\u9635\u4e58\u6cd5\u6216\u8005\u5377\u79ef\u64cd\u4f5c\u7684\u8f93\u51fa\u7ed3\u679c\u4e3a32-bit\u6d6e\u70b9\u578b\u6570\u636e\u3002 4) \u572832-bit\u6d6e\u70b9\u578b\u6570\u636e\u4e0a\u6267\u884c\u504f\u7f6e\u52a0\u6cd5\u64cd\u4f5c\u3002\u6b64\u5904\uff0c\u504f\u7f6e\u5e76\u672a\u88ab\u91cf\u5316\u3002 \u5bf9\u4e8e\u901a\u7528\u77e9\u9635\u4e58\u6cd5( GEMM )\uff0c\u8f93\u5165 X X \u548c\u6743\u91cd W W \u7684\u91cf\u5316\u64cd\u4f5c\u53ef\u88ab\u8868\u8ff0\u4e3a\u5982\u4e0b\u8fc7\u7a0b\uff1a X_q = \\left \\lfloor \\frac{X}{X_m} * (n - 1) \\right \\rceil W_q = \\left \\lfloor \\frac{W}{W_m} * (n - 1) \\right \\rceil \u6267\u884c\u901a\u7528\u77e9\u9635\u4e58\u6cd5\uff1a Y_q = X_q * W_q \u5bf9\u91cf\u5316\u4e58\u79ef\u7ed3\u679c Yq Yq \u8fdb\u884c\u53cd\u91cf\u5316: \\begin{align} Y_{dq} = \\frac{Y_q}{(n - 1) * (n - 1)} * X_m * W_m \\ =\\frac{X_q * W_q}{(n - 1) * (n - 1)} * X_m * W_m \\ =(\\frac{X_q}{n - 1} * X_m) * (\\frac{W_q}{n - 1} * W_m) \\ \\end{align} \u4e0a\u8ff0\u516c\u5f0f\u8868\u660e\u53cd\u91cf\u5316\u64cd\u4f5c\u53ef\u4ee5\u88ab\u79fb\u52a8\u5230 GEMM \u4e4b\u524d\uff0c\u5373\u5148\u5bf9 Xq Xq \u548c Wq Wq \u6267\u884c\u53cd\u91cf\u5316\u64cd\u4f5c\u518d\u505a GEMM \u64cd\u4f5c\u3002\u56e0\u6b64\uff0c\u524d\u5411\u4f20\u64ad\u7684\u5de5\u4f5c\u6d41\u4ea6\u53ef\u8868\u793a\u4e3a\u5982\u4e0b\u65b9\u5f0f\uff1a \u56fe2\uff1a\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u524d\u5411\u8fc7\u7a0b\u7684\u7b49\u4ef7\u5de5\u4f5c\u6d41 \u8bad\u7ec3\u8fc7\u7a0b\u4e2d\uff0cPaddleSlim\u4f7f\u7528\u56fe2\u4e2d\u6240\u793a\u7684\u7b49\u4ef7\u5de5\u4f5c\u6d41\u3002\u5728\u8bbe\u8ba1\u4e2d\uff0c\u91cf\u5316Pass\u5728IrGraph\u4e2d\u63d2\u5165\u91cf\u5316op\u548c\u53cd\u91cf\u5316op\u3002\u56e0\u4e3a\u5728\u8fde\u7eed\u7684\u91cf\u5316\u3001\u53cd\u91cf\u5316\u64cd\u4f5c\u4e4b\u540e\u8f93\u5165\u4ecd\u7136\u4e3a32-bit\u6d6e\u70b9\u578b\u6570\u636e\u3002\u56e0\u6b64\uff0cPaddleSlim\u91cf\u5316\u8bad\u7ec3\u6846\u67b6\u6240\u91c7\u7528\u7684\u91cf\u5316\u65b9\u5f0f\u88ab\u79f0\u4e3a\u6a21\u62df\u91cf\u5316\u3002 1.2.2.2 \u53cd\u5411\u4f20\u64ad # \u7531\u56fe3\u53ef\u77e5\uff0c\u6743\u91cd\u66f4\u65b0\u6240\u9700\u7684\u68af\u5ea6\u503c\u53ef\u4ee5\u7531\u91cf\u5316\u540e\u7684\u6743\u91cd\u548c\u91cf\u5316\u540e\u7684\u6fc0\u6d3b\u6c42\u5f97\u3002\u53cd\u5411\u4f20\u64ad\u8fc7\u7a0b\u4e2d\u7684\u6240\u6709\u8f93\u5165\u548c\u8f93\u51fa\u5747\u4e3a32-bit\u6d6e\u70b9\u578b\u6570\u636e\u3002\u6ce8\u610f\uff0c\u68af\u5ea6\u66f4\u65b0\u64cd\u4f5c\u9700\u8981\u5728\u539f\u59cb\u6743\u91cd\u4e0a\u8fdb\u884c\uff0c\u5373\u8ba1\u7b97\u51fa\u7684\u68af\u5ea6\u5c06\u88ab\u52a0\u5230\u539f\u59cb\u6743\u91cd\u4e0a\u800c\u975e\u91cf\u5316\u540e\u6216\u53cd\u91cf\u5316\u540e\u7684\u6743\u91cd\u4e0a\u3002 \u56fe3\uff1a\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u7684\u53cd\u5411\u4f20\u64ad\u548c\u6743\u91cd\u66f4\u65b0\u8fc7\u7a0b \u56e0\u6b64\uff0c\u91cf\u5316Pass\u4e5f\u4f1a\u6539\u53d8\u76f8\u5e94\u53cd\u5411\u7b97\u5b50\u7684\u67d0\u4e9b\u8f93\u5165\u3002 1.2.2.3 \u786e\u5b9a\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570 # \u5b58\u5728\u7740\u4e24\u79cd\u7b56\u7565\u53ef\u4ee5\u8ba1\u7b97\u6c42\u53d6\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\uff0c\u5373\u52a8\u6001\u7b56\u7565\u548c\u9759\u6001\u7b56\u7565\u3002\u52a8\u6001\u7b56\u7565\u4f1a\u5728\u6bcf\u6b21\u8fed\u4ee3\u8fc7\u7a0b\u4e2d\u8ba1\u7b97\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u7684\u503c\u3002\u9759\u6001\u7b56\u7565\u5219\u5bf9\u4e0d\u540c\u7684\u8f93\u5165\u91c7\u7528\u76f8\u540c\u7684\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u3002 \u5bf9\u4e8e\u6743\u91cd\u800c\u8a00\uff0c\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u91c7\u7528\u52a8\u6001\u7b56\u7565\u3002\u6362\u53e5\u8bdd\u8bf4\uff0c\u5728\u6bcf\u6b21\u8fed\u4ee3\u8fc7\u7a0b\u4e2d\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u5747\u4f1a\u88ab\u91cd\u65b0\u8ba1\u7b97\u5f97\u5230\u76f4\u81f3\u8bad\u7ec3\u8fc7\u7a0b\u7ed3\u675f\u3002 \u5bf9\u4e8e\u6fc0\u6d3b\u800c\u8a00\uff0c\u53ef\u4ee5\u9009\u62e9\u52a8\u6001\u7b56\u7565\u4e5f\u53ef\u4ee5\u9009\u62e9\u9759\u6001\u7b56\u7565\u3002\u82e5\u9009\u62e9\u4f7f\u7528\u9759\u6001\u7b56\u7565\uff0c\u5219\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u4f1a\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u88ab\u8bc4\u4f30\u6c42\u5f97\uff0c\u4e14\u5728\u63a8\u65ad\u8fc7\u7a0b\u4e2d\u88ab\u4f7f\u7528(\u4e0d\u540c\u7684\u8f93\u5165\u5747\u4fdd\u6301\u4e0d\u53d8)\u3002\u9759\u6001\u7b56\u7565\u4e2d\u7684\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u53ef\u4e8e\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u901a\u8fc7\u5982\u4e0b\u4e09\u79cd\u65b9\u5f0f\u8fdb\u884c\u8bc4\u4f30\uff1a \u5728\u4e00\u4e2a\u7a97\u53e3\u4e2d\u8ba1\u7b97\u6fc0\u6d3b\u6700\u5927\u7edd\u5bf9\u503c\u7684\u5e73\u5747\u503c\u3002 \u5728\u4e00\u4e2a\u7a97\u53e3\u4e2d\u8ba1\u7b97\u6fc0\u6d3b\u6700\u5927\u7edd\u5bf9\u503c\u7684\u6700\u5927\u503c\u3002 \u5728\u4e00\u4e2a\u7a97\u53e3\u4e2d\u8ba1\u7b97\u6fc0\u6d3b\u6700\u5927\u7edd\u5bf9\u503c\u7684\u6ed1\u52a8\u5e73\u5747\u503c\uff0c\u8ba1\u7b97\u516c\u5f0f\u5982\u4e0b\uff1a Vt = (1 - k) * V + k * V_{t-1} Vt = (1 - k) * V + k * V_{t-1} \u5f0f\u4e2d\uff0c V V \u662f\u5f53\u524dbatch\u7684\u6700\u5927\u7edd\u5bf9\u503c\uff0c Vt Vt \u662f\u6ed1\u52a8\u5e73\u5747\u503c\u3002 k k \u662f\u4e00\u4e2a\u56e0\u5b50\uff0c\u4f8b\u5982\u5176\u503c\u53ef\u53d6\u4e3a0.9\u3002 1.2.4 \u8bad\u7ec3\u540e\u91cf\u5316 # \u8bad\u7ec3\u540e\u91cf\u5316\u662f\u57fa\u4e8e\u91c7\u6837\u6570\u636e\uff0c\u91c7\u7528KL\u6563\u5ea6\u7b49\u65b9\u6cd5\u8ba1\u7b97\u91cf\u5316\u6bd4\u4f8b\u56e0\u5b50\u7684\u65b9\u6cd5\u3002\u76f8\u6bd4\u91cf\u5316\u8bad\u7ec3\uff0c\u8bad\u7ec3\u540e\u91cf\u5316\u4e0d\u9700\u8981\u91cd\u65b0\u8bad\u7ec3\uff0c\u53ef\u4ee5\u5feb\u901f\u5f97\u5230\u91cf\u5316\u6a21\u578b\u3002 \u8bad\u7ec3\u540e\u91cf\u5316\u7684\u76ee\u6807\u662f\u6c42\u53d6\u91cf\u5316\u6bd4\u4f8b\u56e0\u5b50\uff0c\u4e3b\u8981\u6709\u4e24\u79cd\u65b9\u6cd5\uff1a\u975e\u9971\u548c\u91cf\u5316\u65b9\u6cd5 ( No Saturation) \u548c\u9971\u548c\u91cf\u5316\u65b9\u6cd5 (Saturation)\u3002\u975e\u9971\u548c\u91cf\u5316\u65b9\u6cd5\u8ba1\u7b97FP32\u7c7b\u578bTensor\u4e2d\u7edd\u5bf9\u503c\u7684\u6700\u5927\u503c abs_max \uff0c\u5c06\u5176\u6620\u5c04\u4e3a127\uff0c\u5219\u91cf\u5316\u6bd4\u4f8b\u56e0\u5b50\u7b49\u4e8e abs_max/127 \u3002\u9971\u548c\u91cf\u5316\u65b9\u6cd5\u4f7f\u7528KL\u6563\u5ea6\u8ba1\u7b97\u4e00\u4e2a\u5408\u9002\u7684\u9608\u503c T ( 0=r \\end{cases} \\end{equation} \\begin{equation} P(r_k) = \\begin{cases} e^{\\frac{(r_k-r)}{T_k}} & r_k < r\\\\ 1 & r_k>=r \\end{cases} \\end{equation} \u5728\u7b2ck\u6b21\u8fed\u4ee3\uff0c\u641c\u5230\u7684\u7f51\u7edc\u4e3a N_k N_k , \u5bf9 N_k N_k \u8bad\u7ec3\u82e5\u5e72epoch\u540e\uff0c\u5728\u6d4b\u8bd5\u96c6\u4e0a\u5f97\u5230reward\u4e3a r_k r_k , \u4ee5\u6982\u7387 P(r_k) P(r_k) \u63a5\u53d7 r_k r_k \uff0c\u5373\u6267\u884c r=r_k r=r_k \u3002 r r \u5728\u641c\u7d22\u8fc7\u7a0b\u8d77\u59cb\u65f6\u88ab\u521d\u59cb\u5316\u4e3a0. T_0 T_0 \u4e3a\u521d\u59cb\u5316\u6e29\u5ea6\uff0c \\theta \\theta \u4e3a\u6e29\u5ea6\u8870\u51cf\u7cfb\u6570\uff0c T_k T_k \u4e3a\u7b2ck\u6b21\u8fed\u4ee3\u7684\u6e29\u5ea6\u3002 \u5728\u6211\u4eec\u7684NAS\u4efb\u52a1\u4e2d\uff0c\u533a\u522b\u4e8eRL\u6bcf\u6b21\u91cd\u65b0\u751f\u6210\u4e00\u4e2a\u5b8c\u6574\u7684\u7f51\u7edc\uff0c\u6211\u4eec\u5c06\u7f51\u7edc\u7ed3\u6784\u6620\u5c04\u6210\u4e00\u6bb5\u7f16\u7801\uff0c\u7b2c\u4e00\u6b21\u968f\u673a\u521d\u59cb\u5316\uff0c\u7136\u540e\u6bcf\u6b21\u968f\u673a\u4fee\u6539\u7f16\u7801\u4e2d\u7684\u4e00\u90e8\u5206\uff08\u5bf9\u5e94\u4e8e\u7f51\u7edc\u7ed3\u6784\u7684\u4e00\u90e8\u5206\uff09\u751f\u6210\u4e00\u4e2a\u65b0\u7684\u7f16\u7801\uff0c\u7136\u540e\u5c06\u8fd9\u4e2a\u7f16\u7801\u518d\u6620\u5c04\u56de\u7f51\u7edc\u7ed3\u6784\uff0c\u901a\u8fc7\u5728\u8bad\u7ec3\u96c6\u4e0a\u8bad\u7ec3\u4e00\u5b9a\u7684epochs\u540e\u7684\u7cbe\u5ea6\u4ee5\u53ca\u7f51\u7edc\u5ef6\u65f6\u878d\u5408\u83b7\u5f97reward\uff0c\u6765\u6307\u5bfc\u9000\u706b\u7b97\u6cd5\u7684\u6536\u655b\u3002 4.2 \u641c\u7d22\u7a7a\u95f4 # \u641c\u7d22\u7a7a\u95f4\u5b9a\u4e49\u4e86\u4f18\u5316\u95ee\u9898\u7684\u53d8\u91cf\uff0c\u53d8\u91cf\u89c4\u6a21\u51b3\u5b9a\u4e86\u641c\u7d22\u7b97\u6cd5\u7684\u96be\u5ea6\u548c\u641c\u7d22\u65f6\u95f4\u3002\u56e0\u6b64\u4e3a\u4e86\u52a0\u5feb\u641c\u7d22\u901f\u5ea6\uff0c\u5b9a\u4e49\u4e00\u4e2a\u5408\u7406\u7684\u641c\u7d22\u7a7a\u95f4\u81f3\u5173\u91cd\u8981\u3002\u5728Light-NAS\u4e2d\uff0c\u4e3a\u4e86\u52a0\u901f\u641c\u7d22\u901f\u5ea6\uff0c\u6211\u4eec\u5c06\u4e00\u4e2a\u7f51\u7edc\u5212\u5206\u4e3a\u591a\u4e2ablock\uff0c\u5148\u624b\u52a8\u6309\u94fe\u72b6\u5c42\u7ea7\u7ed3\u6784\u5806\u53e0c\uff0c\u518d \u4f7f\u7528\u641c\u7d22\u7b97\u6cd5\u81ea\u52a8\u641c\u7d22\u6bcf\u4e2ablock\u5185\u90e8\u7684\u7ed3\u6784\u3002 \u56e0\u4e3a\u8981\u641c\u7d22\u51fa\u5728\u79fb\u52a8\u7aef\u8fd0\u884c\u901f\u5ea6\u5feb\u7684\u6a21\u578b\uff0c\u6211\u4eec\u53c2\u8003\u4e86MobileNetV2\u4e2d\u7684Linear Bottlenecks\u548cInverted residuals\u7ed3\u6784\uff0c\u641c\u7d22\u6bcf\u4e00\u4e2aInverted residuals\u4e2d\u7684\u5177\u4f53\u53c2\u6570\uff0c\u5305\u62eckernelsize\u3001channel\u6269\u5f20\u500d\u6570\u3001\u91cd\u590d\u6b21\u6570\u3001channels number\u3002\u5982\u56fe10\u6240\u793a\uff1a \u56fe10 4.3 \u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30 # \u641c\u7d22\u8fc7\u7a0b\u652f\u6301 FLOPS \u7ea6\u675f\u548c\u6a21\u578b\u5ef6\u65f6\u7ea6\u675f\u3002\u800c\u57fa\u4e8e Android/iOS \u79fb\u52a8\u7aef\u3001\u5f00\u53d1\u677f\u7b49\u786c\u4ef6\u5e73\u53f0\uff0c\u8fed\u4ee3\u641c\u7d22\u8fc7\u7a0b\u4e2d\u4e0d\u65ad\u6d4b\u8bd5\u6a21\u578b\u7684\u5ef6\u65f6\u4e0d\u4ec5\u6d88\u8017\u65f6\u95f4\u800c\u4e14\u975e\u5e38\u4e0d\u65b9\u4fbf\uff0c\u56e0\u6b64\u6211\u4eec\u5f00\u53d1\u4e86\u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30\u5668\u6765\u8bc4\u4f30\u641c\u7d22\u5f97\u5230\u6a21\u578b\u7684\u5ef6\u65f6\u3002\u901a\u8fc7\u5ef6\u65f6\u8bc4\u4f30\u5668\u8bc4\u4f30\u5f97\u5230\u7684\u5ef6\u65f6\u4e0e\u6a21\u578b\u5b9e\u9645\u6d4b\u8bd5\u7684\u5ef6\u65f6\u6ce2\u52a8\u504f\u5dee\u5c0f\u4e8e 10%\u3002 \u5ef6\u65f6\u8bc4\u4f30\u5668\u5206\u4e3a\u914d\u7f6e\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668\u548c\u8bc4\u4f30\u6a21\u578b\u5ef6\u65f6\u4e24\u4e2a\u9636\u6bb5\uff0c\u914d\u7f6e\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668\u53ea\u9700\u8981\u6267\u884c\u4e00\u6b21\uff0c\u800c\u8bc4\u4f30\u6a21\u578b\u5ef6\u65f6\u5219\u5728\u641c\u7d22\u8fc7\u7a0b\u4e2d\u4e0d\u65ad\u8bc4\u4f30\u641c\u7d22\u5f97\u5230\u7684\u6a21\u578b\u5ef6\u65f6\u3002 \u914d\u7f6e\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668 \u83b7\u53d6\u641c\u7d22\u7a7a\u95f4\u4e2d\u6240\u6709\u4e0d\u91cd\u590d\u7684 op \u53ca\u5176\u53c2\u6570 \u83b7\u53d6\u6bcf\u7ec4 op \u53ca\u5176\u53c2\u6570\u7684\u5ef6\u65f6 \u8bc4\u4f30\u6a21\u578b\u5ef6\u65f6 \u83b7\u53d6\u7ed9\u5b9a\u6a21\u578b\u7684\u6240\u6709 op \u53ca\u5176\u53c2\u6570 \u6839\u636e\u7ed9\u5b9a\u6a21\u578b\u7684\u6240\u6709 op \u53ca\u53c2\u6570\uff0c\u5229\u7528\u5ef6\u65f6\u8bc4\u4f30\u5668\u53bb\u4f30\u8ba1\u6a21\u578b\u7684\u5ef6\u65f6 5. \u53c2\u8003\u6587\u732e # High-Performance Hardware for Machine Learning Quantizing deep convolutional networks for efficient inference: A whitepaper Pruning Filters for Efficient ConvNets Distilling the Knowledge in a Neural Network A Gift from Knowledge Distillation: Fast Optimization, Network Minimization and Transfer Learning","title":"\u7b97\u6cd5\u539f\u7406"},{"location":"algo/algo/#_1","text":"\u91cf\u5316\u539f\u7406\u4ecb\u7ecd \u526a\u88c1\u539f\u7406\u4ecb\u7ecd \u84b8\u998f\u539f\u7406\u4ecb\u7ecd \u8f7b\u91cf\u7ea7\u6a21\u578b\u7ed3\u6784\u641c\u7d22\u539f\u7406\u4ecb\u7ecd","title":"\u76ee\u5f55"},{"location":"algo/algo/#1-quantization-aware-training","text":"","title":"1. Quantization Aware Training\u91cf\u5316\u4ecb\u7ecd"},{"location":"algo/algo/#11","text":"\u8fd1\u5e74\u6765\uff0c\u5b9a\u70b9\u91cf\u5316\u4f7f\u7528\u66f4\u5c11\u7684\u6bd4\u7279\u6570\uff08\u59828-bit\u30013-bit\u30012-bit\u7b49\uff09\u8868\u793a\u795e\u7ecf\u7f51\u7edc\u7684\u6743\u91cd\u548c\u6fc0\u6d3b\u5df2\u88ab\u9a8c\u8bc1\u662f\u6709\u6548\u7684\u3002\u5b9a\u70b9\u91cf\u5316\u7684\u4f18\u70b9\u5305\u62ec\u4f4e\u5185\u5b58\u5e26\u5bbd\u3001\u4f4e\u529f\u8017\u3001\u4f4e\u8ba1\u7b97\u8d44\u6e90\u5360\u7528\u4ee5\u53ca\u4f4e\u6a21\u578b\u5b58\u50a8\u9700\u6c42\u7b49\u3002 \u88681: \u4e0d\u540c\u7c7b\u578b\u64cd\u4f5c\u7684\u5f00\u9500\u5bf9\u6bd4 \u7531\u88681\u53ef\u77e5\uff0c\u4f4e\u7cbe\u5ea6\u5b9a\u70b9\u6570\u64cd\u4f5c\u7684\u786c\u4ef6\u9762\u79ef\u5927\u5c0f\u53ca\u80fd\u8017\u6bd4\u9ad8\u7cbe\u5ea6\u6d6e\u70b9\u6570\u8981\u5c11\u51e0\u4e2a\u6570\u91cf\u7ea7\u3002 \u4f7f\u7528\u5b9a\u70b9\u91cf\u5316\u53ef\u5e26\u67654\u500d\u7684\u6a21\u578b\u538b\u7f29\u30014\u500d\u7684\u5185\u5b58\u5e26\u5bbd\u63d0\u5347\uff0c\u4ee5\u53ca\u66f4\u9ad8\u6548\u7684cache\u5229\u7528(\u5f88\u591a\u786c\u4ef6\u8bbe\u5907\uff0c\u5185\u5b58\u8bbf\u95ee\u662f\u4e3b\u8981\u80fd\u8017)\u3002\u9664\u6b64\u4e4b\u5916\uff0c\u8ba1\u7b97\u901f\u5ea6\u4e5f\u4f1a\u66f4\u5feb(\u901a\u5e38\u5177\u67092x-3x\u7684\u6027\u80fd\u63d0\u5347)\u3002\u7531\u88682\u53ef\u77e5\uff0c\u5728\u5f88\u591a\u573a\u666f\u4e0b\uff0c\u5b9a\u70b9\u91cf\u5316\u64cd\u4f5c\u5bf9\u7cbe\u5ea6\u5e76\u4e0d\u4f1a\u9020\u6210\u635f\u5931\u3002\u53e6\u5916\uff0c\u5b9a\u70b9\u91cf\u5316\u5bf9\u795e\u7ecf\u7f51\u7edc\u4e8e\u5d4c\u5165\u5f0f\u8bbe\u5907\u4e0a\u7684\u63a8\u65ad\u6765\u8bf4\u662f\u6781\u5176\u91cd\u8981\u7684\u3002 \u88682\uff1a\u6a21\u578b\u91cf\u5316\u524d\u540e\u7cbe\u5ea6\u5bf9\u6bd4 \u76ee\u524d\uff0c\u5b66\u672f\u754c\u4e3b\u8981\u5c06\u91cf\u5316\u5206\u4e3a\u4e24\u5927\u7c7b\uff1a Post Training Quantization \u548c Quantization Aware Training \u3002 Post Training Quantization \u662f\u6307\u4f7f\u7528KL\u6563\u5ea6\u3001\u6ed1\u52a8\u5e73\u5747\u7b49\u65b9\u6cd5\u786e\u5b9a\u91cf\u5316\u53c2\u6570\u4e14\u4e0d\u9700\u8981\u91cd\u65b0\u8bad\u7ec3\u7684\u5b9a\u70b9\u91cf\u5316\u65b9\u6cd5\u3002 Quantization Aware Training \u662f\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u5bf9\u91cf\u5316\u8fdb\u884c\u5efa\u6a21\u4ee5\u786e\u5b9a\u91cf\u5316\u53c2\u6570\uff0c\u5b83\u4e0e Post Training Quantization \u6a21\u5f0f\u76f8\u6bd4\u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7684\u9884\u6d4b\u7cbe\u5ea6\u3002","title":"1.1 \u80cc\u666f"},{"location":"algo/algo/#12","text":"","title":"1.2 \u91cf\u5316\u539f\u7406"},{"location":"algo/algo/#121","text":"\u76ee\u524d\uff0c\u5b58\u5728\u7740\u8bb8\u591a\u65b9\u6cd5\u53ef\u4ee5\u5c06\u6d6e\u70b9\u6570\u91cf\u5316\u6210\u5b9a\u70b9\u6570\u3002\u4f8b\u5982\uff1a r = min(max(x, a), b) s = \\frac{b - a}{n - 1} q = \\left \\lfloor \\frac{r - a}{s} \\right \\rceil \u5f0f\u4e2d\uff0c x x \u662f\u5f85\u91cf\u5316\u7684\u6d6e\u70b9\u503c\uff0c [a, b] [a, b] \u662f\u91cf\u5316\u8303\u56f4\uff0c a a \u662f\u5f85\u91cf\u5316\u6d6e\u70b9\u6570\u4e2d\u7684\u6700\u5c0f\u503c\uff0c b b \u662f\u5f85\u91cf\u5316\u6d6e\u70b9\u6570\u4e2d\u7684\u6700\u5927\u503c\u3002 \\left \\lfloor \\right \\rceil \\left \\lfloor \\right \\rceil \u8868\u793a\u5c06\u7ed3\u679c\u56db\u820d\u4e94\u5165\u5230\u6700\u8fd1\u7684\u6574\u6570\u3002\u5982\u679c\u91cf\u5316\u7ea7\u522b\u4e3a k k \uff0c\u5219 n n \u4e3a 2^k 2^k \u3002\u4f8b\u5982\uff0c\u82e5 k k \u4e3a8\uff0c\u5219 n n \u4e3a256\u3002 q q \u662f\u91cf\u5316\u5f97\u5230\u7684\u6574\u6570\u3002 PaddleSlim\u6846\u67b6\u4e2d\u9009\u62e9\u7684\u91cf\u5316\u65b9\u6cd5\u4e3a\u6700\u5927\u7edd\u5bf9\u503c\u91cf\u5316( max-abs )\uff0c\u5177\u4f53\u63cf\u8ff0\u5982\u4e0b\uff1a M = max(abs(x)) q = \\left \\lfloor \\frac{x}{M} * (n - 1) \\right \\rceil \u5f0f\u4e2d\uff0c x x \u662f\u5f85\u88ab\u91cf\u5316\u7684\u6d6e\u70b9\u503c\uff0c M M \u662f\u5f85\u91cf\u5316\u6d6e\u70b9\u6570\u4e2d\u7684\u7edd\u5bf9\u503c\u6700\u5927\u503c\u3002 \\left \\lfloor \\right \\rceil \\left \\lfloor \\right \\rceil \u8868\u793a\u5c06\u7ed3\u679c\u56db\u820d\u4e94\u5165\u5230\u6700\u8fd1\u7684\u6574\u6570\u3002\u5bf9\u4e8e8bit\u91cf\u5316\uff0cPaddleSlim\u91c7\u7528 int8_t \uff0c\u5373 n=2^7=128 n=2^7=128 \u3002 q q \u662f\u91cf\u5316\u5f97\u5230\u7684\u6574\u6570\u3002 \u65e0\u8bba\u662f min-max\u91cf\u5316 \u8fd8\u662f max-abs\u91cf\u5316 \uff0c\u4ed6\u4eec\u90fd\u53ef\u4ee5\u8868\u793a\u4e3a\u5982\u4e0b\u5f62\u5f0f\uff1a q = scale * r + b q = scale * r + b \u5176\u4e2d min-max \u548c max-abs \u88ab\u79f0\u4e3a\u91cf\u5316\u53c2\u6570\u6216\u8005\u91cf\u5316\u6bd4\u4f8b\u6216\u8005\u91cf\u5316\u8303\u56f4\u3002","title":"1.2.1 \u91cf\u5316\u65b9\u5f0f"},{"location":"algo/algo/#122","text":"","title":"1.2.2 \u91cf\u5316\u8bad\u7ec3"},{"location":"algo/algo/#1221","text":"\u524d\u5411\u4f20\u64ad\u8fc7\u7a0b\u91c7\u7528\u6a21\u62df\u91cf\u5316\u7684\u65b9\u5f0f\uff0c\u5177\u4f53\u63cf\u8ff0\u5982\u4e0b\uff1a \u56fe1\uff1a\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u7684\u524d\u5411\u8fc7\u7a0b \u7531\u56fe1\u53ef\u77e5\uff0c\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u7684\u524d\u5411\u8fc7\u7a0b\u53ef\u88ab\u63cf\u8ff0\u4e3a\u4ee5\u4e0b\u56db\u4e2a\u90e8\u5206\uff1a 1) \u8f93\u5165\u548c\u6743\u91cd\u5747\u88ab\u91cf\u5316\u62108-bit\u6574\u6570\u3002 2) \u57288-bit\u6574\u6570\u4e0a\u6267\u884c\u77e9\u9635\u4e58\u6cd5\u6216\u8005\u5377\u79ef\u64cd\u4f5c\u3002 3) \u53cd\u91cf\u5316\u77e9\u9635\u4e58\u6cd5\u6216\u8005\u5377\u79ef\u64cd\u4f5c\u7684\u8f93\u51fa\u7ed3\u679c\u4e3a32-bit\u6d6e\u70b9\u578b\u6570\u636e\u3002 4) \u572832-bit\u6d6e\u70b9\u578b\u6570\u636e\u4e0a\u6267\u884c\u504f\u7f6e\u52a0\u6cd5\u64cd\u4f5c\u3002\u6b64\u5904\uff0c\u504f\u7f6e\u5e76\u672a\u88ab\u91cf\u5316\u3002 \u5bf9\u4e8e\u901a\u7528\u77e9\u9635\u4e58\u6cd5( GEMM )\uff0c\u8f93\u5165 X X \u548c\u6743\u91cd W W \u7684\u91cf\u5316\u64cd\u4f5c\u53ef\u88ab\u8868\u8ff0\u4e3a\u5982\u4e0b\u8fc7\u7a0b\uff1a X_q = \\left \\lfloor \\frac{X}{X_m} * (n - 1) \\right \\rceil W_q = \\left \\lfloor \\frac{W}{W_m} * (n - 1) \\right \\rceil \u6267\u884c\u901a\u7528\u77e9\u9635\u4e58\u6cd5\uff1a Y_q = X_q * W_q \u5bf9\u91cf\u5316\u4e58\u79ef\u7ed3\u679c Yq Yq \u8fdb\u884c\u53cd\u91cf\u5316: \\begin{align} Y_{dq} = \\frac{Y_q}{(n - 1) * (n - 1)} * X_m * W_m \\ =\\frac{X_q * W_q}{(n - 1) * (n - 1)} * X_m * W_m \\ =(\\frac{X_q}{n - 1} * X_m) * (\\frac{W_q}{n - 1} * W_m) \\ \\end{align} \u4e0a\u8ff0\u516c\u5f0f\u8868\u660e\u53cd\u91cf\u5316\u64cd\u4f5c\u53ef\u4ee5\u88ab\u79fb\u52a8\u5230 GEMM \u4e4b\u524d\uff0c\u5373\u5148\u5bf9 Xq Xq \u548c Wq Wq \u6267\u884c\u53cd\u91cf\u5316\u64cd\u4f5c\u518d\u505a GEMM \u64cd\u4f5c\u3002\u56e0\u6b64\uff0c\u524d\u5411\u4f20\u64ad\u7684\u5de5\u4f5c\u6d41\u4ea6\u53ef\u8868\u793a\u4e3a\u5982\u4e0b\u65b9\u5f0f\uff1a \u56fe2\uff1a\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u524d\u5411\u8fc7\u7a0b\u7684\u7b49\u4ef7\u5de5\u4f5c\u6d41 \u8bad\u7ec3\u8fc7\u7a0b\u4e2d\uff0cPaddleSlim\u4f7f\u7528\u56fe2\u4e2d\u6240\u793a\u7684\u7b49\u4ef7\u5de5\u4f5c\u6d41\u3002\u5728\u8bbe\u8ba1\u4e2d\uff0c\u91cf\u5316Pass\u5728IrGraph\u4e2d\u63d2\u5165\u91cf\u5316op\u548c\u53cd\u91cf\u5316op\u3002\u56e0\u4e3a\u5728\u8fde\u7eed\u7684\u91cf\u5316\u3001\u53cd\u91cf\u5316\u64cd\u4f5c\u4e4b\u540e\u8f93\u5165\u4ecd\u7136\u4e3a32-bit\u6d6e\u70b9\u578b\u6570\u636e\u3002\u56e0\u6b64\uff0cPaddleSlim\u91cf\u5316\u8bad\u7ec3\u6846\u67b6\u6240\u91c7\u7528\u7684\u91cf\u5316\u65b9\u5f0f\u88ab\u79f0\u4e3a\u6a21\u62df\u91cf\u5316\u3002","title":"1.2.2.1 \u524d\u5411\u4f20\u64ad"},{"location":"algo/algo/#1222","text":"\u7531\u56fe3\u53ef\u77e5\uff0c\u6743\u91cd\u66f4\u65b0\u6240\u9700\u7684\u68af\u5ea6\u503c\u53ef\u4ee5\u7531\u91cf\u5316\u540e\u7684\u6743\u91cd\u548c\u91cf\u5316\u540e\u7684\u6fc0\u6d3b\u6c42\u5f97\u3002\u53cd\u5411\u4f20\u64ad\u8fc7\u7a0b\u4e2d\u7684\u6240\u6709\u8f93\u5165\u548c\u8f93\u51fa\u5747\u4e3a32-bit\u6d6e\u70b9\u578b\u6570\u636e\u3002\u6ce8\u610f\uff0c\u68af\u5ea6\u66f4\u65b0\u64cd\u4f5c\u9700\u8981\u5728\u539f\u59cb\u6743\u91cd\u4e0a\u8fdb\u884c\uff0c\u5373\u8ba1\u7b97\u51fa\u7684\u68af\u5ea6\u5c06\u88ab\u52a0\u5230\u539f\u59cb\u6743\u91cd\u4e0a\u800c\u975e\u91cf\u5316\u540e\u6216\u53cd\u91cf\u5316\u540e\u7684\u6743\u91cd\u4e0a\u3002 \u56fe3\uff1a\u57fa\u4e8e\u6a21\u62df\u91cf\u5316\u8bad\u7ec3\u7684\u53cd\u5411\u4f20\u64ad\u548c\u6743\u91cd\u66f4\u65b0\u8fc7\u7a0b \u56e0\u6b64\uff0c\u91cf\u5316Pass\u4e5f\u4f1a\u6539\u53d8\u76f8\u5e94\u53cd\u5411\u7b97\u5b50\u7684\u67d0\u4e9b\u8f93\u5165\u3002","title":"1.2.2.2 \u53cd\u5411\u4f20\u64ad"},{"location":"algo/algo/#1223","text":"\u5b58\u5728\u7740\u4e24\u79cd\u7b56\u7565\u53ef\u4ee5\u8ba1\u7b97\u6c42\u53d6\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\uff0c\u5373\u52a8\u6001\u7b56\u7565\u548c\u9759\u6001\u7b56\u7565\u3002\u52a8\u6001\u7b56\u7565\u4f1a\u5728\u6bcf\u6b21\u8fed\u4ee3\u8fc7\u7a0b\u4e2d\u8ba1\u7b97\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u7684\u503c\u3002\u9759\u6001\u7b56\u7565\u5219\u5bf9\u4e0d\u540c\u7684\u8f93\u5165\u91c7\u7528\u76f8\u540c\u7684\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u3002 \u5bf9\u4e8e\u6743\u91cd\u800c\u8a00\uff0c\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u91c7\u7528\u52a8\u6001\u7b56\u7565\u3002\u6362\u53e5\u8bdd\u8bf4\uff0c\u5728\u6bcf\u6b21\u8fed\u4ee3\u8fc7\u7a0b\u4e2d\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u5747\u4f1a\u88ab\u91cd\u65b0\u8ba1\u7b97\u5f97\u5230\u76f4\u81f3\u8bad\u7ec3\u8fc7\u7a0b\u7ed3\u675f\u3002 \u5bf9\u4e8e\u6fc0\u6d3b\u800c\u8a00\uff0c\u53ef\u4ee5\u9009\u62e9\u52a8\u6001\u7b56\u7565\u4e5f\u53ef\u4ee5\u9009\u62e9\u9759\u6001\u7b56\u7565\u3002\u82e5\u9009\u62e9\u4f7f\u7528\u9759\u6001\u7b56\u7565\uff0c\u5219\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u4f1a\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u88ab\u8bc4\u4f30\u6c42\u5f97\uff0c\u4e14\u5728\u63a8\u65ad\u8fc7\u7a0b\u4e2d\u88ab\u4f7f\u7528(\u4e0d\u540c\u7684\u8f93\u5165\u5747\u4fdd\u6301\u4e0d\u53d8)\u3002\u9759\u6001\u7b56\u7565\u4e2d\u7684\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570\u53ef\u4e8e\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u901a\u8fc7\u5982\u4e0b\u4e09\u79cd\u65b9\u5f0f\u8fdb\u884c\u8bc4\u4f30\uff1a \u5728\u4e00\u4e2a\u7a97\u53e3\u4e2d\u8ba1\u7b97\u6fc0\u6d3b\u6700\u5927\u7edd\u5bf9\u503c\u7684\u5e73\u5747\u503c\u3002 \u5728\u4e00\u4e2a\u7a97\u53e3\u4e2d\u8ba1\u7b97\u6fc0\u6d3b\u6700\u5927\u7edd\u5bf9\u503c\u7684\u6700\u5927\u503c\u3002 \u5728\u4e00\u4e2a\u7a97\u53e3\u4e2d\u8ba1\u7b97\u6fc0\u6d3b\u6700\u5927\u7edd\u5bf9\u503c\u7684\u6ed1\u52a8\u5e73\u5747\u503c\uff0c\u8ba1\u7b97\u516c\u5f0f\u5982\u4e0b\uff1a Vt = (1 - k) * V + k * V_{t-1} Vt = (1 - k) * V + k * V_{t-1} \u5f0f\u4e2d\uff0c V V \u662f\u5f53\u524dbatch\u7684\u6700\u5927\u7edd\u5bf9\u503c\uff0c Vt Vt \u662f\u6ed1\u52a8\u5e73\u5747\u503c\u3002 k k \u662f\u4e00\u4e2a\u56e0\u5b50\uff0c\u4f8b\u5982\u5176\u503c\u53ef\u53d6\u4e3a0.9\u3002","title":"1.2.2.3 \u786e\u5b9a\u91cf\u5316\u6bd4\u4f8b\u7cfb\u6570"},{"location":"algo/algo/#124","text":"\u8bad\u7ec3\u540e\u91cf\u5316\u662f\u57fa\u4e8e\u91c7\u6837\u6570\u636e\uff0c\u91c7\u7528KL\u6563\u5ea6\u7b49\u65b9\u6cd5\u8ba1\u7b97\u91cf\u5316\u6bd4\u4f8b\u56e0\u5b50\u7684\u65b9\u6cd5\u3002\u76f8\u6bd4\u91cf\u5316\u8bad\u7ec3\uff0c\u8bad\u7ec3\u540e\u91cf\u5316\u4e0d\u9700\u8981\u91cd\u65b0\u8bad\u7ec3\uff0c\u53ef\u4ee5\u5feb\u901f\u5f97\u5230\u91cf\u5316\u6a21\u578b\u3002 \u8bad\u7ec3\u540e\u91cf\u5316\u7684\u76ee\u6807\u662f\u6c42\u53d6\u91cf\u5316\u6bd4\u4f8b\u56e0\u5b50\uff0c\u4e3b\u8981\u6709\u4e24\u79cd\u65b9\u6cd5\uff1a\u975e\u9971\u548c\u91cf\u5316\u65b9\u6cd5 ( No Saturation) \u548c\u9971\u548c\u91cf\u5316\u65b9\u6cd5 (Saturation)\u3002\u975e\u9971\u548c\u91cf\u5316\u65b9\u6cd5\u8ba1\u7b97FP32\u7c7b\u578bTensor\u4e2d\u7edd\u5bf9\u503c\u7684\u6700\u5927\u503c abs_max \uff0c\u5c06\u5176\u6620\u5c04\u4e3a127\uff0c\u5219\u91cf\u5316\u6bd4\u4f8b\u56e0\u5b50\u7b49\u4e8e abs_max/127 \u3002\u9971\u548c\u91cf\u5316\u65b9\u6cd5\u4f7f\u7528KL\u6563\u5ea6\u8ba1\u7b97\u4e00\u4e2a\u5408\u9002\u7684\u9608\u503c T ( 0=r \\end{cases} \\end{equation} \\begin{equation} P(r_k) = \\begin{cases} e^{\\frac{(r_k-r)}{T_k}} & r_k < r\\\\ 1 & r_k>=r \\end{cases} \\end{equation} \u5728\u7b2ck\u6b21\u8fed\u4ee3\uff0c\u641c\u5230\u7684\u7f51\u7edc\u4e3a N_k N_k , \u5bf9 N_k N_k \u8bad\u7ec3\u82e5\u5e72epoch\u540e\uff0c\u5728\u6d4b\u8bd5\u96c6\u4e0a\u5f97\u5230reward\u4e3a r_k r_k , \u4ee5\u6982\u7387 P(r_k) P(r_k) \u63a5\u53d7 r_k r_k \uff0c\u5373\u6267\u884c r=r_k r=r_k \u3002 r r \u5728\u641c\u7d22\u8fc7\u7a0b\u8d77\u59cb\u65f6\u88ab\u521d\u59cb\u5316\u4e3a0. T_0 T_0 \u4e3a\u521d\u59cb\u5316\u6e29\u5ea6\uff0c \\theta \\theta \u4e3a\u6e29\u5ea6\u8870\u51cf\u7cfb\u6570\uff0c T_k T_k \u4e3a\u7b2ck\u6b21\u8fed\u4ee3\u7684\u6e29\u5ea6\u3002 \u5728\u6211\u4eec\u7684NAS\u4efb\u52a1\u4e2d\uff0c\u533a\u522b\u4e8eRL\u6bcf\u6b21\u91cd\u65b0\u751f\u6210\u4e00\u4e2a\u5b8c\u6574\u7684\u7f51\u7edc\uff0c\u6211\u4eec\u5c06\u7f51\u7edc\u7ed3\u6784\u6620\u5c04\u6210\u4e00\u6bb5\u7f16\u7801\uff0c\u7b2c\u4e00\u6b21\u968f\u673a\u521d\u59cb\u5316\uff0c\u7136\u540e\u6bcf\u6b21\u968f\u673a\u4fee\u6539\u7f16\u7801\u4e2d\u7684\u4e00\u90e8\u5206\uff08\u5bf9\u5e94\u4e8e\u7f51\u7edc\u7ed3\u6784\u7684\u4e00\u90e8\u5206\uff09\u751f\u6210\u4e00\u4e2a\u65b0\u7684\u7f16\u7801\uff0c\u7136\u540e\u5c06\u8fd9\u4e2a\u7f16\u7801\u518d\u6620\u5c04\u56de\u7f51\u7edc\u7ed3\u6784\uff0c\u901a\u8fc7\u5728\u8bad\u7ec3\u96c6\u4e0a\u8bad\u7ec3\u4e00\u5b9a\u7684epochs\u540e\u7684\u7cbe\u5ea6\u4ee5\u53ca\u7f51\u7edc\u5ef6\u65f6\u878d\u5408\u83b7\u5f97reward\uff0c\u6765\u6307\u5bfc\u9000\u706b\u7b97\u6cd5\u7684\u6536\u655b\u3002","title":"4.1.1 \u6a21\u62df\u9000\u706b"},{"location":"algo/algo/#42","text":"\u641c\u7d22\u7a7a\u95f4\u5b9a\u4e49\u4e86\u4f18\u5316\u95ee\u9898\u7684\u53d8\u91cf\uff0c\u53d8\u91cf\u89c4\u6a21\u51b3\u5b9a\u4e86\u641c\u7d22\u7b97\u6cd5\u7684\u96be\u5ea6\u548c\u641c\u7d22\u65f6\u95f4\u3002\u56e0\u6b64\u4e3a\u4e86\u52a0\u5feb\u641c\u7d22\u901f\u5ea6\uff0c\u5b9a\u4e49\u4e00\u4e2a\u5408\u7406\u7684\u641c\u7d22\u7a7a\u95f4\u81f3\u5173\u91cd\u8981\u3002\u5728Light-NAS\u4e2d\uff0c\u4e3a\u4e86\u52a0\u901f\u641c\u7d22\u901f\u5ea6\uff0c\u6211\u4eec\u5c06\u4e00\u4e2a\u7f51\u7edc\u5212\u5206\u4e3a\u591a\u4e2ablock\uff0c\u5148\u624b\u52a8\u6309\u94fe\u72b6\u5c42\u7ea7\u7ed3\u6784\u5806\u53e0c\uff0c\u518d \u4f7f\u7528\u641c\u7d22\u7b97\u6cd5\u81ea\u52a8\u641c\u7d22\u6bcf\u4e2ablock\u5185\u90e8\u7684\u7ed3\u6784\u3002 \u56e0\u4e3a\u8981\u641c\u7d22\u51fa\u5728\u79fb\u52a8\u7aef\u8fd0\u884c\u901f\u5ea6\u5feb\u7684\u6a21\u578b\uff0c\u6211\u4eec\u53c2\u8003\u4e86MobileNetV2\u4e2d\u7684Linear Bottlenecks\u548cInverted residuals\u7ed3\u6784\uff0c\u641c\u7d22\u6bcf\u4e00\u4e2aInverted residuals\u4e2d\u7684\u5177\u4f53\u53c2\u6570\uff0c\u5305\u62eckernelsize\u3001channel\u6269\u5f20\u500d\u6570\u3001\u91cd\u590d\u6b21\u6570\u3001channels number\u3002\u5982\u56fe10\u6240\u793a\uff1a \u56fe10","title":"4.2 \u641c\u7d22\u7a7a\u95f4"},{"location":"algo/algo/#43","text":"\u641c\u7d22\u8fc7\u7a0b\u652f\u6301 FLOPS \u7ea6\u675f\u548c\u6a21\u578b\u5ef6\u65f6\u7ea6\u675f\u3002\u800c\u57fa\u4e8e Android/iOS \u79fb\u52a8\u7aef\u3001\u5f00\u53d1\u677f\u7b49\u786c\u4ef6\u5e73\u53f0\uff0c\u8fed\u4ee3\u641c\u7d22\u8fc7\u7a0b\u4e2d\u4e0d\u65ad\u6d4b\u8bd5\u6a21\u578b\u7684\u5ef6\u65f6\u4e0d\u4ec5\u6d88\u8017\u65f6\u95f4\u800c\u4e14\u975e\u5e38\u4e0d\u65b9\u4fbf\uff0c\u56e0\u6b64\u6211\u4eec\u5f00\u53d1\u4e86\u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30\u5668\u6765\u8bc4\u4f30\u641c\u7d22\u5f97\u5230\u6a21\u578b\u7684\u5ef6\u65f6\u3002\u901a\u8fc7\u5ef6\u65f6\u8bc4\u4f30\u5668\u8bc4\u4f30\u5f97\u5230\u7684\u5ef6\u65f6\u4e0e\u6a21\u578b\u5b9e\u9645\u6d4b\u8bd5\u7684\u5ef6\u65f6\u6ce2\u52a8\u504f\u5dee\u5c0f\u4e8e 10%\u3002 \u5ef6\u65f6\u8bc4\u4f30\u5668\u5206\u4e3a\u914d\u7f6e\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668\u548c\u8bc4\u4f30\u6a21\u578b\u5ef6\u65f6\u4e24\u4e2a\u9636\u6bb5\uff0c\u914d\u7f6e\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668\u53ea\u9700\u8981\u6267\u884c\u4e00\u6b21\uff0c\u800c\u8bc4\u4f30\u6a21\u578b\u5ef6\u65f6\u5219\u5728\u641c\u7d22\u8fc7\u7a0b\u4e2d\u4e0d\u65ad\u8bc4\u4f30\u641c\u7d22\u5f97\u5230\u7684\u6a21\u578b\u5ef6\u65f6\u3002 \u914d\u7f6e\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668 \u83b7\u53d6\u641c\u7d22\u7a7a\u95f4\u4e2d\u6240\u6709\u4e0d\u91cd\u590d\u7684 op \u53ca\u5176\u53c2\u6570 \u83b7\u53d6\u6bcf\u7ec4 op \u53ca\u5176\u53c2\u6570\u7684\u5ef6\u65f6 \u8bc4\u4f30\u6a21\u578b\u5ef6\u65f6 \u83b7\u53d6\u7ed9\u5b9a\u6a21\u578b\u7684\u6240\u6709 op \u53ca\u5176\u53c2\u6570 \u6839\u636e\u7ed9\u5b9a\u6a21\u578b\u7684\u6240\u6709 op \u53ca\u53c2\u6570\uff0c\u5229\u7528\u5ef6\u65f6\u8bc4\u4f30\u5668\u53bb\u4f30\u8ba1\u6a21\u578b\u7684\u5ef6\u65f6","title":"4.3 \u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30"},{"location":"algo/algo/#5","text":"High-Performance Hardware for Machine Learning Quantizing deep convolutional networks for efficient inference: A whitepaper Pruning Filters for Efficient ConvNets Distilling the Knowledge in a Neural Network A Gift from Knowledge Distillation: Fast Optimization, Network Minimization and Transfer Learning","title":"5. \u53c2\u8003\u6587\u732e"},{"location":"api/analysis_api/","text":"FLOPs # paddleslim.analysis.flops(program, detail=False) \u6e90\u4ee3\u7801 \u83b7\u5f97\u6307\u5b9a\u7f51\u7edc\u7684\u6d6e\u70b9\u8fd0\u7b97\u6b21\u6570(FLOPs)\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u5f85\u5206\u6790\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 detail(bool) - \u662f\u5426\u8fd4\u56de\u6bcf\u4e2a\u5377\u79ef\u5c42\u7684FLOPs\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 only_conv(bool) - \u5982\u679c\u8bbe\u7f6e\u4e3aTrue\uff0c\u5219\u4ec5\u8ba1\u7b97\u5377\u79ef\u5c42\u548c\u5168\u8fde\u63a5\u5c42\u7684FLOPs\uff0c\u5373\u6d6e\u70b9\u6570\u7684\u4e58\u52a0\uff08multiplication-adds\uff09\u64cd\u4f5c\u6b21\u6570\u3002\u5982\u679c\u8bbe\u7f6e\u4e3aFalse\uff0c\u5219\u4e5f\u4f1a\u8ba1\u7b97\u5377\u79ef\u548c\u5168\u8fde\u63a5\u5c42\u4e4b\u5916\u7684\u64cd\u4f5c\u7684FLOPs\u3002 \u8fd4\u56de\u503c\uff1a flops(float) - \u6574\u4e2a\u7f51\u7edc\u7684FLOPs\u3002 params2flops(dict) - \u6bcf\u5c42\u5377\u79ef\u5bf9\u5e94\u7684FLOPs\uff0c\u5176\u4e2dkey\u4e3a\u5377\u79ef\u5c42\u53c2\u6570\u540d\u79f0\uff0cvalue\u4e3aFLOPs\u503c\u3002 \u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.analysis import flops def conv_bn_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) bn_name = name + \"_bn\" return fluid . layers . batch_norm ( input = conv , act = act , name = bn_name + '_output' , param_attr = ParamAttr ( name = bn_name + '_scale' ), bias_attr = ParamAttr ( bn_name + '_offset' ), moving_mean_name = bn_name + '_mean' , moving_variance_name = bn_name + '_variance' , ) main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels with fluid . program_guard ( main_program , startup_program ): input = fluid . data ( name = \"image\" , shape = [ None , 3 , 16 , 16 ]) conv1 = conv_bn_layer ( input , 8 , 3 , \"conv1\" ) conv2 = conv_bn_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_bn_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_bn_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_bn_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_bn_layer ( conv5 , 8 , 3 , \"conv6\" ) print ( \"FLOPs: {}\" . format ( flops ( main_program ))) model_size # paddleslim.analysis.model_size(program) \u6e90\u4ee3\u7801 \u83b7\u5f97\u6307\u5b9a\u7f51\u7edc\u7684\u53c2\u6570\u6570\u91cf\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u5f85\u5206\u6790\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 \u8fd4\u56de\u503c\uff1a model_size(int) - \u6574\u4e2a\u7f51\u7edc\u7684\u53c2\u6570\u6570\u91cf\u3002 \u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.analysis import model_size def conv_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) return conv main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels with fluid . program_guard ( main_program , startup_program ): input = fluid . data ( name = \"image\" , shape = [ None , 3 , 16 , 16 ]) conv1 = conv_layer ( input , 8 , 3 , \"conv1\" ) conv2 = conv_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_layer ( conv5 , 8 , 3 , \"conv6\" ) print ( \"FLOPs: {}\" . format ( model_size ( main_program ))) TableLatencyEvaluator # paddleslim.analysis.TableLatencyEvaluator(table_file, delimiter=\",\") \u6e90\u4ee3\u7801 \u57fa\u4e8e\u786c\u4ef6\u5ef6\u65f6\u8868\u7684\u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30\u5668\u3002 \u53c2\u6570\uff1a table_file(str) - \u6240\u4f7f\u7528\u7684\u5ef6\u65f6\u8bc4\u4f30\u8868\u7684\u7edd\u5bf9\u8def\u5f84\u3002\u5173\u4e8e\u6f14\u793a\u8bc4\u4f30\u8868\u683c\u5f0f\u8bf7\u53c2\u8003\uff1a PaddleSlim\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u683c\u5f0f delimiter(str) - \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u4e2d\uff0c\u64cd\u4f5c\u4fe1\u606f\u4e4b\u524d\u6240\u4f7f\u7528\u7684\u5206\u5272\u7b26\uff0c\u9ed8\u8ba4\u4e3a\u82f1\u6587\u5b57\u7b26\u9017\u53f7\u3002 \u8fd4\u56de\u503c\uff1a Evaluator - \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668\u7684\u5b9e\u4f8b\u3002 paddleslim.analysis.TableLatencyEvaluator.latency(graph) \u6e90\u4ee3\u7801 \u83b7\u5f97\u6307\u5b9a\u7f51\u7edc\u7684\u9884\u4f30\u5ef6\u65f6\u3002 \u53c2\u6570\uff1a graph(Program) - \u5f85\u9884\u4f30\u7684\u76ee\u6807\u7f51\u7edc\u3002 \u8fd4\u56de\u503c\uff1a latency - \u76ee\u6807\u7f51\u7edc\u7684\u9884\u4f30\u5ef6\u65f6\u3002","title":"\u6a21\u578b\u5206\u6790"},{"location":"api/analysis_api/#flops","text":"paddleslim.analysis.flops(program, detail=False) \u6e90\u4ee3\u7801 \u83b7\u5f97\u6307\u5b9a\u7f51\u7edc\u7684\u6d6e\u70b9\u8fd0\u7b97\u6b21\u6570(FLOPs)\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u5f85\u5206\u6790\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 detail(bool) - \u662f\u5426\u8fd4\u56de\u6bcf\u4e2a\u5377\u79ef\u5c42\u7684FLOPs\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 only_conv(bool) - \u5982\u679c\u8bbe\u7f6e\u4e3aTrue\uff0c\u5219\u4ec5\u8ba1\u7b97\u5377\u79ef\u5c42\u548c\u5168\u8fde\u63a5\u5c42\u7684FLOPs\uff0c\u5373\u6d6e\u70b9\u6570\u7684\u4e58\u52a0\uff08multiplication-adds\uff09\u64cd\u4f5c\u6b21\u6570\u3002\u5982\u679c\u8bbe\u7f6e\u4e3aFalse\uff0c\u5219\u4e5f\u4f1a\u8ba1\u7b97\u5377\u79ef\u548c\u5168\u8fde\u63a5\u5c42\u4e4b\u5916\u7684\u64cd\u4f5c\u7684FLOPs\u3002 \u8fd4\u56de\u503c\uff1a flops(float) - \u6574\u4e2a\u7f51\u7edc\u7684FLOPs\u3002 params2flops(dict) - \u6bcf\u5c42\u5377\u79ef\u5bf9\u5e94\u7684FLOPs\uff0c\u5176\u4e2dkey\u4e3a\u5377\u79ef\u5c42\u53c2\u6570\u540d\u79f0\uff0cvalue\u4e3aFLOPs\u503c\u3002 \u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.analysis import flops def conv_bn_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) bn_name = name + \"_bn\" return fluid . layers . batch_norm ( input = conv , act = act , name = bn_name + '_output' , param_attr = ParamAttr ( name = bn_name + '_scale' ), bias_attr = ParamAttr ( bn_name + '_offset' ), moving_mean_name = bn_name + '_mean' , moving_variance_name = bn_name + '_variance' , ) main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels with fluid . program_guard ( main_program , startup_program ): input = fluid . data ( name = \"image\" , shape = [ None , 3 , 16 , 16 ]) conv1 = conv_bn_layer ( input , 8 , 3 , \"conv1\" ) conv2 = conv_bn_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_bn_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_bn_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_bn_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_bn_layer ( conv5 , 8 , 3 , \"conv6\" ) print ( \"FLOPs: {}\" . format ( flops ( main_program )))","title":"FLOPs"},{"location":"api/analysis_api/#model_size","text":"paddleslim.analysis.model_size(program) \u6e90\u4ee3\u7801 \u83b7\u5f97\u6307\u5b9a\u7f51\u7edc\u7684\u53c2\u6570\u6570\u91cf\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u5f85\u5206\u6790\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 \u8fd4\u56de\u503c\uff1a model_size(int) - \u6574\u4e2a\u7f51\u7edc\u7684\u53c2\u6570\u6570\u91cf\u3002 \u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.analysis import model_size def conv_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) return conv main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels with fluid . program_guard ( main_program , startup_program ): input = fluid . data ( name = \"image\" , shape = [ None , 3 , 16 , 16 ]) conv1 = conv_layer ( input , 8 , 3 , \"conv1\" ) conv2 = conv_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_layer ( conv5 , 8 , 3 , \"conv6\" ) print ( \"FLOPs: {}\" . format ( model_size ( main_program )))","title":"model_size"},{"location":"api/analysis_api/#tablelatencyevaluator","text":"paddleslim.analysis.TableLatencyEvaluator(table_file, delimiter=\",\") \u6e90\u4ee3\u7801 \u57fa\u4e8e\u786c\u4ef6\u5ef6\u65f6\u8868\u7684\u6a21\u578b\u5ef6\u65f6\u8bc4\u4f30\u5668\u3002 \u53c2\u6570\uff1a table_file(str) - \u6240\u4f7f\u7528\u7684\u5ef6\u65f6\u8bc4\u4f30\u8868\u7684\u7edd\u5bf9\u8def\u5f84\u3002\u5173\u4e8e\u6f14\u793a\u8bc4\u4f30\u8868\u683c\u5f0f\u8bf7\u53c2\u8003\uff1a PaddleSlim\u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u683c\u5f0f delimiter(str) - \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u8868\u4e2d\uff0c\u64cd\u4f5c\u4fe1\u606f\u4e4b\u524d\u6240\u4f7f\u7528\u7684\u5206\u5272\u7b26\uff0c\u9ed8\u8ba4\u4e3a\u82f1\u6587\u5b57\u7b26\u9017\u53f7\u3002 \u8fd4\u56de\u503c\uff1a Evaluator - \u786c\u4ef6\u5ef6\u65f6\u8bc4\u4f30\u5668\u7684\u5b9e\u4f8b\u3002 paddleslim.analysis.TableLatencyEvaluator.latency(graph) \u6e90\u4ee3\u7801 \u83b7\u5f97\u6307\u5b9a\u7f51\u7edc\u7684\u9884\u4f30\u5ef6\u65f6\u3002 \u53c2\u6570\uff1a graph(Program) - \u5f85\u9884\u4f30\u7684\u76ee\u6807\u7f51\u7edc\u3002 \u8fd4\u56de\u503c\uff1a latency - \u76ee\u6807\u7f51\u7edc\u7684\u9884\u4f30\u5ef6\u65f6\u3002","title":"TableLatencyEvaluator"},{"location":"api/api_guide/","text":"PaddleSlim API\u6587\u6863\u5bfc\u822a # \u6a21\u578b\u5206\u6790 # \u5377\u79ef\u901a\u9053\u526a\u88c1 # \u84b8\u998f # \u5355\u8fdb\u7a0b\u84b8\u998f \u901a\u9053\u526a\u88c1 \u91cf\u5316 # \u91cf\u5316\u8bad\u7ec3 \u79bb\u7ebf\u91cf\u5316 embedding\u91cf\u5316 \u5c0f\u6a21\u578b\u7ed3\u6784\u641c\u7d22 # nas API SearchSpace","title":"PaddleSlim API\u6587\u6863\u5bfc\u822a"},{"location":"api/api_guide/#paddleslim-api","text":"","title":"PaddleSlim API\u6587\u6863\u5bfc\u822a"},{"location":"api/api_guide/#_1","text":"","title":"\u6a21\u578b\u5206\u6790"},{"location":"api/api_guide/#_2","text":"","title":"\u5377\u79ef\u901a\u9053\u526a\u88c1"},{"location":"api/api_guide/#_3","text":"\u5355\u8fdb\u7a0b\u84b8\u998f \u901a\u9053\u526a\u88c1","title":"\u84b8\u998f"},{"location":"api/api_guide/#_4","text":"\u91cf\u5316\u8bad\u7ec3 \u79bb\u7ebf\u91cf\u5316 embedding\u91cf\u5316","title":"\u91cf\u5316"},{"location":"api/api_guide/#_5","text":"nas API SearchSpace","title":"\u5c0f\u6a21\u578b\u7ed3\u6784\u641c\u7d22"},{"location":"api/nas_api/","text":"paddleslim.nas API\u6587\u6863 # SANAS API\u6587\u6863 # class SANAS # SANAS\uff08Simulated Annealing Neural Architecture Search\uff09\u662f\u57fa\u4e8e\u6a21\u62df\u9000\u706b\u7b97\u6cd5\u8fdb\u884c\u6a21\u578b\u7ed3\u6784\u641c\u7d22\u7684\u7b97\u6cd5\uff0c\u4e00\u822c\u7528\u4e8e\u79bb\u6563\u641c\u7d22\u4efb\u52a1\u3002 paddleslim.nas.SANAS(configs, server_addr, init_temperature, reduce_rate, search_steps, save_checkpoint, load_checkpoint, is_server) \u53c2\u6570\uff1a - configs(list ): \u641c\u7d22\u7a7a\u95f4\u914d\u7f6e\u5217\u8868\uff0c\u683c\u5f0f\u662f [(key, {input_size, output_size, block_num, block_mask})] \u6216\u8005 [(key)] \uff08MobileNetV2\u3001MobilenetV1\u548cResNet\u7684\u641c\u7d22\u7a7a\u95f4\u4f7f\u7528\u548c\u539f\u672c\u7f51\u7edc\u7ed3\u6784\u76f8\u540c\u7684\u641c\u7d22\u7a7a\u95f4\uff0c\u6240\u4ee5\u4ec5\u9700\u6307\u5b9a key \u5373\u53ef\uff09, input_size \u548c output_size \u8868\u793a\u8f93\u5165\u548c\u8f93\u51fa\u7684\u7279\u5f81\u56fe\u7684\u5927\u5c0f\uff0c block_num \u662f\u6307\u641c\u7d22\u7f51\u7edc\u4e2d\u7684block\u6570\u91cf\uff0c block_mask \u662f\u4e00\u7ec4\u75310\u548c1\u7ec4\u6210\u7684\u5217\u8868\uff0c0\u4ee3\u8868\u4e0d\u8fdb\u884c\u4e0b\u91c7\u6837\u7684block\uff0c1\u4ee3\u8868\u4e0b\u91c7\u6837\u7684block\u3002 \u66f4\u591apaddleslim\u63d0\u4f9b\u7684\u641c\u7d22\u7a7a\u95f4\u914d\u7f6e\u53ef\u4ee5\u53c2\u8003\u3002 - server_addr(tuple): SANAS\u7684\u5730\u5740\uff0c\u5305\u62ecserver\u7684ip\u5730\u5740\u548c\u7aef\u53e3\u53f7\uff0c\u5982\u679cip\u5730\u5740\u4e3aNone\u6216\u8005\u4e3a\"\"\u7684\u8bdd\u5219\u9ed8\u8ba4\u4f7f\u7528\u672c\u673aip\u3002\u9ed8\u8ba4\uff1a\uff08\"\", 8881\uff09\u3002 - init_temperature(float): \u57fa\u4e8e\u6a21\u62df\u9000\u706b\u8fdb\u884c\u641c\u7d22\u7684\u521d\u59cb\u6e29\u5ea6\u3002\u9ed8\u8ba4\uff1a100\u3002 - reduce_rate(float): \u57fa\u4e8e\u6a21\u62df\u9000\u706b\u8fdb\u884c\u641c\u7d22\u7684\u8870\u51cf\u7387\u3002\u9ed8\u8ba4\uff1a0.85\u3002 - search_steps(int): \u641c\u7d22\u8fc7\u7a0b\u8fed\u4ee3\u7684\u6b21\u6570\u3002\u9ed8\u8ba4\uff1a300\u3002 - save_checkpoint(str|None): \u4fdd\u5b58checkpoint\u7684\u6587\u4ef6\u76ee\u5f55\uff0c\u5982\u679c\u8bbe\u7f6e\u4e3aNone\u7684\u8bdd\u5219\u4e0d\u4fdd\u5b58checkpoint\u3002\u9ed8\u8ba4\uff1a ./nas_checkpoint \u3002 - load_checkpoint(str|None): \u52a0\u8f7dcheckpoint\u7684\u6587\u4ef6\u76ee\u5f55\uff0c\u5982\u679c\u8bbe\u7f6e\u4e3aNone\u7684\u8bdd\u5219\u4e0d\u52a0\u8f7dcheckpoint\u3002\u9ed8\u8ba4\uff1aNone\u3002 - is_server(bool): \u5f53\u524d\u5b9e\u4f8b\u662f\u5426\u8981\u542f\u52a8\u4e00\u4e2aserver\u3002\u9ed8\u8ba4\uff1aTrue\u3002 \u8fd4\u56de\uff1a \u4e00\u4e2aSANAS\u7c7b\u7684\u5b9e\u4f8b \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 3 from paddleslim.nas import SANAS config = [( 'MobileNetV2Space' )] sanas = SANAS ( config = config ) tokens2arch(tokens) \u901a\u8fc7\u4e00\u7ec4token\u5f97\u5230\u5b9e\u9645\u7684\u6a21\u578b\u7ed3\u6784\uff0c\u4e00\u822c\u7528\u6765\u628a\u641c\u7d22\u5230\u6700\u4f18\u7684token\u8f6c\u6362\u4e3a\u6a21\u578b\u7ed3\u6784\u7528\u6765\u505a\u6700\u540e\u7684\u8bad\u7ec3\u3002 \u53c2\u6570\uff1a - tokens(list): \u4e00\u7ec4token\u3002 \u8fd4\u56de \u8fd4\u56de\u4e00\u4e2a\u6a21\u578b\u7ed3\u6784\u5b9e\u4f8b\u3002 \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 3 4 5 6 import paddle.fluid as fluid input = fluid . data ( name = 'input' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) archs = sanas . token2arch ( tokens ) for arch in archs : output = arch ( input ) input = output next_archs(): \u83b7\u53d6\u4e0b\u4e00\u7ec4\u6a21\u578b\u7ed3\u6784\u3002 \u8fd4\u56de \u8fd4\u56de\u6a21\u578b\u7ed3\u6784\u5b9e\u4f8b\u7684\u5217\u8868\uff0c\u5f62\u5f0f\u4e3alist\u3002 \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 3 4 5 6 import paddle.fluid as fluid input = fluid . data ( name = 'input' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) archs = sanas . next_archs () for arch in archs : output = arch ( input ) input = output reward(score): \u628a\u5f53\u524d\u6a21\u578b\u7ed3\u6784\u7684\u5f97\u5206\u60c5\u51b5\u56de\u4f20\u3002 \u53c2\u6570\uff1a score : \u5f53\u524d\u6a21\u578b\u7684\u5f97\u5206\uff0c\u5206\u6570\u8d8a\u5927\u8d8a\u597d\u3002 \u8fd4\u56de \u6a21\u578b\u7ed3\u6784\u66f4\u65b0\u6210\u529f\u6216\u8005\u5931\u8d25\uff0c\u6210\u529f\u5219\u8fd4\u56de True \uff0c\u5931\u8d25\u5219\u8fd4\u56de False \u3002 \u4ee3\u7801\u793a\u4f8b 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 import numpy as np import paddle import paddle.fluid as fluid from paddleslim.nas import SANAS from paddleslim.analysis import flops max_flops = 321208544 batch_size = 256 # \u641c\u7d22\u7a7a\u95f4\u914d\u7f6e config = [( 'MobileNetV2Space' )] # \u5b9e\u4f8b\u5316SANAS sa_nas = SANAS ( config , server_addr = ( \"\" , 8887 ), init_temperature = 10.24 , reduce_rate = 0.85 , search_steps = 100 , is_server = True ) for step in range ( 100 ): archs = sa_nas . next_archs () train_program = fluid . Program () test_program = fluid . Program () startup_program = fluid . Program () ### \u6784\u9020\u8bad\u7ec3program with fluid . program_guard ( train_program , startup_program ): image = fluid . data ( name = 'image' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) for arch in archs : output = arch ( image ) out = fluid . layers . fc ( output , size = 10 , act = \"softmax\" ) softmax_out = fluid . layers . softmax ( input = out , use_cudnn = False ) cost = fluid . layers . cross_entropy ( input = softmax_out , label = label ) avg_cost = fluid . layers . mean ( cost ) acc_top1 = fluid . layers . accuracy ( input = softmax_out , label = label , k = 1 ) ### \u6784\u9020\u6d4b\u8bd5program test_program = train_program . clone ( for_test = True ) ### \u5b9a\u4e49\u4f18\u5316\u5668 sgd = fluid . optimizer . SGD ( learning_rate = 1e-3 ) sgd . minimize ( avg_cost ) ### \u589e\u52a0\u9650\u5236\u6761\u4ef6\uff0c\u5982\u679c\u6ca1\u6709\u5219\u8fdb\u884c\u65e0\u9650\u5236\u641c\u7d22 if flops ( train_program ) > max_flops : continue ### \u5b9a\u4e49\u4ee3\u7801\u662f\u5728cpu\u4e0a\u8fd0\u884c place = fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( startup_program ) ### \u5b9a\u4e49\u8bad\u7ec3\u8f93\u5165\u6570\u636e train_reader = paddle . batch ( paddle . reader . shuffle ( paddle . dataset . cifar . train10 ( cycle = False ), buf_size = 1024 ), batch_size = batch_size , drop_last = True ) ### \u5b9a\u4e49\u9884\u6d4b\u8f93\u5165\u6570\u636e test_reader = paddle . batch ( paddle . dataset . cifar . test10 ( cycle = False ), batch_size = batch_size , drop_last = False ) train_feeder = fluid . DataFeeder ([ image , label ], place , program = train_program ) test_feeder = fluid . DataFeeder ([ image , label ], place , program = test_program ) ### \u5f00\u59cb\u8bad\u7ec3\uff0c\u6bcf\u4e2a\u641c\u7d22\u7ed3\u679c\u8bad\u7ec35\u4e2aepoch for epoch_id in range ( 5 ): for batch_id , data in enumerate ( train_reader ()): fetches = [ avg_cost . name ] outs = exe . run ( train_program , feed = train_feeder . feed ( data ), fetch_list = fetches )[ 0 ] if batch_id % 10 == 0 : print ( 'TRAIN: steps: {}, epoch: {}, batch: {}, cost: {}' . format ( step , epoch_id , batch_id , outs [ 0 ])) ### \u5f00\u59cb\u9884\u6d4b\uff0c\u5f97\u5230\u6700\u7ec8\u7684\u6d4b\u8bd5\u7ed3\u679c\u4f5c\u4e3ascore\u56de\u4f20\u7ed9sa_nas reward = [] for batch_id , data in enumerate ( test_reader ()): test_fetches = [ avg_cost . name , acc_top1 . name ] batch_reward = exe . run ( test_program , feed = test_feeder . feed ( data ), fetch_list = test_fetches ) reward_avg = np . mean ( np . array ( batch_reward ), axis = 1 ) reward . append ( reward_avg ) print ( 'TEST: step: {}, batch: {}, avg_cost: {}, acc_top1: {}' . format ( step , batch_id , batch_reward [ 0 ], batch_reward [ 1 ])) finally_reward = np . mean ( np . array ( reward ), axis = 0 ) print ( 'FINAL TEST: avg_cost: {}, acc_top1: {}' . format ( finally_reward [ 0 ], finally_reward [ 1 ])) ### \u56de\u4f20score sa_nas . reward ( float ( finally_reward [ 1 ]))","title":"SA\u641c\u7d22"},{"location":"api/nas_api/#paddleslimnas-api","text":"","title":"paddleslim.nas API\u6587\u6863"},{"location":"api/nas_api/#sanas-api","text":"","title":"SANAS API\u6587\u6863"},{"location":"api/nas_api/#class-sanas","text":"SANAS\uff08Simulated Annealing Neural Architecture Search\uff09\u662f\u57fa\u4e8e\u6a21\u62df\u9000\u706b\u7b97\u6cd5\u8fdb\u884c\u6a21\u578b\u7ed3\u6784\u641c\u7d22\u7684\u7b97\u6cd5\uff0c\u4e00\u822c\u7528\u4e8e\u79bb\u6563\u641c\u7d22\u4efb\u52a1\u3002 paddleslim.nas.SANAS(configs, server_addr, init_temperature, reduce_rate, search_steps, save_checkpoint, load_checkpoint, is_server) \u53c2\u6570\uff1a - configs(list ): \u641c\u7d22\u7a7a\u95f4\u914d\u7f6e\u5217\u8868\uff0c\u683c\u5f0f\u662f [(key, {input_size, output_size, block_num, block_mask})] \u6216\u8005 [(key)] \uff08MobileNetV2\u3001MobilenetV1\u548cResNet\u7684\u641c\u7d22\u7a7a\u95f4\u4f7f\u7528\u548c\u539f\u672c\u7f51\u7edc\u7ed3\u6784\u76f8\u540c\u7684\u641c\u7d22\u7a7a\u95f4\uff0c\u6240\u4ee5\u4ec5\u9700\u6307\u5b9a key \u5373\u53ef\uff09, input_size \u548c output_size \u8868\u793a\u8f93\u5165\u548c\u8f93\u51fa\u7684\u7279\u5f81\u56fe\u7684\u5927\u5c0f\uff0c block_num \u662f\u6307\u641c\u7d22\u7f51\u7edc\u4e2d\u7684block\u6570\u91cf\uff0c block_mask \u662f\u4e00\u7ec4\u75310\u548c1\u7ec4\u6210\u7684\u5217\u8868\uff0c0\u4ee3\u8868\u4e0d\u8fdb\u884c\u4e0b\u91c7\u6837\u7684block\uff0c1\u4ee3\u8868\u4e0b\u91c7\u6837\u7684block\u3002 \u66f4\u591apaddleslim\u63d0\u4f9b\u7684\u641c\u7d22\u7a7a\u95f4\u914d\u7f6e\u53ef\u4ee5\u53c2\u8003\u3002 - server_addr(tuple): SANAS\u7684\u5730\u5740\uff0c\u5305\u62ecserver\u7684ip\u5730\u5740\u548c\u7aef\u53e3\u53f7\uff0c\u5982\u679cip\u5730\u5740\u4e3aNone\u6216\u8005\u4e3a\"\"\u7684\u8bdd\u5219\u9ed8\u8ba4\u4f7f\u7528\u672c\u673aip\u3002\u9ed8\u8ba4\uff1a\uff08\"\", 8881\uff09\u3002 - init_temperature(float): \u57fa\u4e8e\u6a21\u62df\u9000\u706b\u8fdb\u884c\u641c\u7d22\u7684\u521d\u59cb\u6e29\u5ea6\u3002\u9ed8\u8ba4\uff1a100\u3002 - reduce_rate(float): \u57fa\u4e8e\u6a21\u62df\u9000\u706b\u8fdb\u884c\u641c\u7d22\u7684\u8870\u51cf\u7387\u3002\u9ed8\u8ba4\uff1a0.85\u3002 - search_steps(int): \u641c\u7d22\u8fc7\u7a0b\u8fed\u4ee3\u7684\u6b21\u6570\u3002\u9ed8\u8ba4\uff1a300\u3002 - save_checkpoint(str|None): \u4fdd\u5b58checkpoint\u7684\u6587\u4ef6\u76ee\u5f55\uff0c\u5982\u679c\u8bbe\u7f6e\u4e3aNone\u7684\u8bdd\u5219\u4e0d\u4fdd\u5b58checkpoint\u3002\u9ed8\u8ba4\uff1a ./nas_checkpoint \u3002 - load_checkpoint(str|None): \u52a0\u8f7dcheckpoint\u7684\u6587\u4ef6\u76ee\u5f55\uff0c\u5982\u679c\u8bbe\u7f6e\u4e3aNone\u7684\u8bdd\u5219\u4e0d\u52a0\u8f7dcheckpoint\u3002\u9ed8\u8ba4\uff1aNone\u3002 - is_server(bool): \u5f53\u524d\u5b9e\u4f8b\u662f\u5426\u8981\u542f\u52a8\u4e00\u4e2aserver\u3002\u9ed8\u8ba4\uff1aTrue\u3002 \u8fd4\u56de\uff1a \u4e00\u4e2aSANAS\u7c7b\u7684\u5b9e\u4f8b \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 3 from paddleslim.nas import SANAS config = [( 'MobileNetV2Space' )] sanas = SANAS ( config = config ) tokens2arch(tokens) \u901a\u8fc7\u4e00\u7ec4token\u5f97\u5230\u5b9e\u9645\u7684\u6a21\u578b\u7ed3\u6784\uff0c\u4e00\u822c\u7528\u6765\u628a\u641c\u7d22\u5230\u6700\u4f18\u7684token\u8f6c\u6362\u4e3a\u6a21\u578b\u7ed3\u6784\u7528\u6765\u505a\u6700\u540e\u7684\u8bad\u7ec3\u3002 \u53c2\u6570\uff1a - tokens(list): \u4e00\u7ec4token\u3002 \u8fd4\u56de \u8fd4\u56de\u4e00\u4e2a\u6a21\u578b\u7ed3\u6784\u5b9e\u4f8b\u3002 \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 3 4 5 6 import paddle.fluid as fluid input = fluid . data ( name = 'input' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) archs = sanas . token2arch ( tokens ) for arch in archs : output = arch ( input ) input = output next_archs(): \u83b7\u53d6\u4e0b\u4e00\u7ec4\u6a21\u578b\u7ed3\u6784\u3002 \u8fd4\u56de \u8fd4\u56de\u6a21\u578b\u7ed3\u6784\u5b9e\u4f8b\u7684\u5217\u8868\uff0c\u5f62\u5f0f\u4e3alist\u3002 \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 3 4 5 6 import paddle.fluid as fluid input = fluid . data ( name = 'input' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) archs = sanas . next_archs () for arch in archs : output = arch ( input ) input = output reward(score): \u628a\u5f53\u524d\u6a21\u578b\u7ed3\u6784\u7684\u5f97\u5206\u60c5\u51b5\u56de\u4f20\u3002 \u53c2\u6570\uff1a score : \u5f53\u524d\u6a21\u578b\u7684\u5f97\u5206\uff0c\u5206\u6570\u8d8a\u5927\u8d8a\u597d\u3002 \u8fd4\u56de \u6a21\u578b\u7ed3\u6784\u66f4\u65b0\u6210\u529f\u6216\u8005\u5931\u8d25\uff0c\u6210\u529f\u5219\u8fd4\u56de True \uff0c\u5931\u8d25\u5219\u8fd4\u56de False \u3002 \u4ee3\u7801\u793a\u4f8b 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 import numpy as np import paddle import paddle.fluid as fluid from paddleslim.nas import SANAS from paddleslim.analysis import flops max_flops = 321208544 batch_size = 256 # \u641c\u7d22\u7a7a\u95f4\u914d\u7f6e config = [( 'MobileNetV2Space' )] # \u5b9e\u4f8b\u5316SANAS sa_nas = SANAS ( config , server_addr = ( \"\" , 8887 ), init_temperature = 10.24 , reduce_rate = 0.85 , search_steps = 100 , is_server = True ) for step in range ( 100 ): archs = sa_nas . next_archs () train_program = fluid . Program () test_program = fluid . Program () startup_program = fluid . Program () ### \u6784\u9020\u8bad\u7ec3program with fluid . program_guard ( train_program , startup_program ): image = fluid . data ( name = 'image' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) for arch in archs : output = arch ( image ) out = fluid . layers . fc ( output , size = 10 , act = \"softmax\" ) softmax_out = fluid . layers . softmax ( input = out , use_cudnn = False ) cost = fluid . layers . cross_entropy ( input = softmax_out , label = label ) avg_cost = fluid . layers . mean ( cost ) acc_top1 = fluid . layers . accuracy ( input = softmax_out , label = label , k = 1 ) ### \u6784\u9020\u6d4b\u8bd5program test_program = train_program . clone ( for_test = True ) ### \u5b9a\u4e49\u4f18\u5316\u5668 sgd = fluid . optimizer . SGD ( learning_rate = 1e-3 ) sgd . minimize ( avg_cost ) ### \u589e\u52a0\u9650\u5236\u6761\u4ef6\uff0c\u5982\u679c\u6ca1\u6709\u5219\u8fdb\u884c\u65e0\u9650\u5236\u641c\u7d22 if flops ( train_program ) > max_flops : continue ### \u5b9a\u4e49\u4ee3\u7801\u662f\u5728cpu\u4e0a\u8fd0\u884c place = fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( startup_program ) ### \u5b9a\u4e49\u8bad\u7ec3\u8f93\u5165\u6570\u636e train_reader = paddle . batch ( paddle . reader . shuffle ( paddle . dataset . cifar . train10 ( cycle = False ), buf_size = 1024 ), batch_size = batch_size , drop_last = True ) ### \u5b9a\u4e49\u9884\u6d4b\u8f93\u5165\u6570\u636e test_reader = paddle . batch ( paddle . dataset . cifar . test10 ( cycle = False ), batch_size = batch_size , drop_last = False ) train_feeder = fluid . DataFeeder ([ image , label ], place , program = train_program ) test_feeder = fluid . DataFeeder ([ image , label ], place , program = test_program ) ### \u5f00\u59cb\u8bad\u7ec3\uff0c\u6bcf\u4e2a\u641c\u7d22\u7ed3\u679c\u8bad\u7ec35\u4e2aepoch for epoch_id in range ( 5 ): for batch_id , data in enumerate ( train_reader ()): fetches = [ avg_cost . name ] outs = exe . run ( train_program , feed = train_feeder . feed ( data ), fetch_list = fetches )[ 0 ] if batch_id % 10 == 0 : print ( 'TRAIN: steps: {}, epoch: {}, batch: {}, cost: {}' . format ( step , epoch_id , batch_id , outs [ 0 ])) ### \u5f00\u59cb\u9884\u6d4b\uff0c\u5f97\u5230\u6700\u7ec8\u7684\u6d4b\u8bd5\u7ed3\u679c\u4f5c\u4e3ascore\u56de\u4f20\u7ed9sa_nas reward = [] for batch_id , data in enumerate ( test_reader ()): test_fetches = [ avg_cost . name , acc_top1 . name ] batch_reward = exe . run ( test_program , feed = test_feeder . feed ( data ), fetch_list = test_fetches ) reward_avg = np . mean ( np . array ( batch_reward ), axis = 1 ) reward . append ( reward_avg ) print ( 'TEST: step: {}, batch: {}, avg_cost: {}, acc_top1: {}' . format ( step , batch_id , batch_reward [ 0 ], batch_reward [ 1 ])) finally_reward = np . mean ( np . array ( reward ), axis = 0 ) print ( 'FINAL TEST: avg_cost: {}, acc_top1: {}' . format ( finally_reward [ 0 ], finally_reward [ 1 ])) ### \u56de\u4f20score sa_nas . reward ( float ( finally_reward [ 1 ]))","title":"class SANAS"},{"location":"api/prune_api/","text":"Pruner # paddleslim.prune.Pruner(criterion=\"l1_norm\") \u6e90\u4ee3\u7801 \u5bf9\u5377\u79ef\u7f51\u7edc\u7684\u901a\u9053\u8fdb\u884c\u4e00\u6b21\u526a\u88c1\u3002\u526a\u88c1\u4e00\u4e2a\u5377\u79ef\u5c42\u7684\u901a\u9053\uff0c\u662f\u6307\u526a\u88c1\u8be5\u5377\u79ef\u5c42\u8f93\u51fa\u7684\u901a\u9053\u3002\u5377\u79ef\u5c42\u7684\u6743\u91cd\u5f62\u72b6\u4e3a [output_channel, input_channel, kernel_size, kernel_size] \uff0c\u901a\u8fc7\u526a\u88c1\u8be5\u6743\u91cd\u7684\u7b2c\u4e00\u7eac\u5ea6\u8fbe\u5230\u526a\u88c1\u8f93\u51fa\u901a\u9053\u6570\u7684\u76ee\u7684\u3002 \u53c2\u6570\uff1a criterion - \u8bc4\u4f30\u4e00\u4e2a\u5377\u79ef\u5c42\u5185\u901a\u9053\u91cd\u8981\u6027\u6240\u53c2\u8003\u7684\u6307\u6807\u3002\u76ee\u524d\u4ec5\u652f\u6301 l1_norm \u3002\u9ed8\u8ba4\u4e3a l1_norm \u3002 \u8fd4\u56de\uff1a \u4e00\u4e2aPruner\u7c7b\u7684\u5b9e\u4f8b \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 from paddleslim.prune import Pruner pruner = Pruner () paddleslim.prune.Pruner.prune(program, scope, params, ratios, place=None, lazy=False, only_graph=False, param_backup=False, param_shape_backup=False) \u6e90\u4ee3\u7801 \u5bf9\u76ee\u6807\u7f51\u7edc\u7684\u4e00\u7ec4\u5377\u79ef\u5c42\u7684\u6743\u91cd\u8fdb\u884c\u88c1\u526a\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u8981\u88c1\u526a\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 scope(paddle.fluid.Scope) - \u8981\u88c1\u526a\u7684\u6743\u91cd\u6240\u5728\u7684 scope \uff0cPaddle\u4e2d\u7528 scope \u5b9e\u4f8b\u5b58\u653e\u6a21\u578b\u53c2\u6570\u548c\u8fd0\u884c\u65f6\u53d8\u91cf\u7684\u503c\u3002Scope\u4e2d\u7684\u53c2\u6570\u503c\u4f1a\u88ab inplace \u7684\u88c1\u526a\u3002\u66f4\u591a\u4ecb\u7ecd\u8bf7\u53c2\u8003 Scope\u6982\u5ff5\u4ecb\u7ecd params(list ) - \u9700\u8981\u88ab\u88c1\u526a\u7684\u5377\u79ef\u5c42\u7684\u53c2\u6570\u7684\u540d\u79f0\u5217\u8868\u3002\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u67e5\u770b\u6a21\u578b\u4e2d\u6240\u6709\u53c2\u6570\u7684\u540d\u79f0: 1 2 3 for block in program . blocks : for param in block . all_parameters () : print ( \" param: {}; shape: {} \" . format ( param . name , param . shape )) ratios(list ) - \u7528\u4e8e\u88c1\u526a params \u7684\u526a\u5207\u7387\uff0c\u7c7b\u578b\u4e3a\u5217\u8868\u3002\u8be5\u5217\u8868\u957f\u5ea6\u5fc5\u987b\u4e0e params \u7684\u957f\u5ea6\u4e00\u81f4\u3002 place(paddle.fluid.Place) - \u5f85\u88c1\u526a\u53c2\u6570\u6240\u5728\u7684\u8bbe\u5907\u4f4d\u7f6e\uff0c\u53ef\u4ee5\u662f CUDAPlace \u6216 CPUPlace \u3002 Place\u6982\u5ff5\u4ecb\u7ecd lazy(bool) - lazy \u4e3aTrue\u65f6\uff0c\u901a\u8fc7\u5c06\u6307\u5b9a\u901a\u9053\u7684\u53c2\u6570\u7f6e\u96f6\u8fbe\u5230\u88c1\u526a\u7684\u76ee\u7684\uff0c\u53c2\u6570\u7684 shape\u4fdd\u6301\u4e0d\u53d8 \uff1b lazy \u4e3aFalse\u65f6\uff0c\u76f4\u63a5\u5c06\u8981\u88c1\u7684\u901a\u9053\u7684\u53c2\u6570\u5220\u9664\uff0c\u53c2\u6570\u7684 shape \u4f1a\u53d1\u751f\u53d8\u5316\u3002 only_graph(bool) - \u662f\u5426\u53ea\u88c1\u526a\u7f51\u7edc\u7ed3\u6784\u3002\u5728Paddle\u4e2d\uff0cProgram\u5b9a\u4e49\u4e86\u7f51\u7edc\u7ed3\u6784\uff0cScope\u5b58\u50a8\u53c2\u6570\u7684\u6570\u503c\u3002\u4e00\u4e2aScope\u5b9e\u4f8b\u53ef\u4ee5\u88ab\u591a\u4e2aProgram\u4f7f\u7528\uff0c\u6bd4\u5982\u5b9a\u4e49\u4e86\u8bad\u7ec3\u7f51\u7edc\u7684Program\u548c\u5b9a\u4e49\u4e86\u6d4b\u8bd5\u7f51\u7edc\u7684Program\u662f\u4f7f\u7528\u540c\u4e00\u4e2aScope\u5b9e\u4f8b\u7684\u3002 only_graph \u4e3aTrue\u65f6\uff0c\u53ea\u5bf9Program\u4e2d\u5b9a\u4e49\u7684\u5377\u79ef\u7684\u901a\u9053\u8fdb\u884c\u526a\u88c1\uff1b only_graph \u4e3afalse\u65f6\uff0cScope\u4e2d\u5377\u79ef\u53c2\u6570\u7684\u6570\u503c\u4e5f\u4f1a\u88ab\u526a\u88c1\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 param_backup(bool) - \u662f\u5426\u8fd4\u56de\u5bf9\u53c2\u6570\u503c\u7684\u5907\u4efd\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 param_shape_backup(bool) - \u662f\u5426\u8fd4\u56de\u5bf9\u53c2\u6570 shape \u7684\u5907\u4efd\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 \u8fd4\u56de\uff1a pruned_program(paddle.fluid.Program) - \u88ab\u88c1\u526a\u540e\u7684Program\u3002 param_backup(dict) - \u5bf9\u53c2\u6570\u6570\u503c\u7684\u5907\u4efd\uff0c\u7528\u4e8e\u6062\u590dScope\u4e2d\u7684\u53c2\u6570\u6570\u503c\u3002 param_shape_backup(dict) - \u5bf9\u53c2\u6570\u5f62\u72b6\u7684\u5907\u4efd\u3002 \u793a\u4f8b\uff1a \u70b9\u51fb AIStudio \u6267\u884c\u4ee5\u4e0b\u793a\u4f8b\u4ee3\u7801\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.prune import Pruner def conv_bn_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) bn_name = name + \"_bn\" return fluid . layers . batch_norm ( input = conv , act = act , name = bn_name + '_output' , param_attr = ParamAttr ( name = bn_name + '_scale' ), bias_attr = ParamAttr ( bn_name + '_offset' ), moving_mean_name = bn_name + '_mean' , moving_variance_name = bn_name + '_variance' , ) main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels with fluid . program_guard ( main_program , startup_program ): input = fluid . data ( name = \"image\" , shape = [ None , 3 , 16 , 16 ]) conv1 = conv_bn_layer ( input , 8 , 3 , \"conv1\" ) conv2 = conv_bn_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_bn_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_bn_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_bn_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_bn_layer ( conv5 , 8 , 3 , \"conv6\" ) place = fluid . CPUPlace () exe = fluid . Executor ( place ) scope = fluid . Scope () exe . run ( startup_program , scope = scope ) pruner = Pruner () main_program , _ , _ = pruner . prune ( main_program , scope , params = [ \"conv4_weights\" ], ratios = [ 0.5 ], place = place , lazy = False , only_graph = False , param_backup = False , param_shape_backup = False ) for param in main_program . global_block () . all_parameters (): if \"weights\" in param . name : print ( \"param name: {}; param shape: {}\" . format ( param . name , param . shape )) sensitivity # paddleslim.prune.sensitivity(program, place, param_names, eval_func, sensitivities_file=None, pruned_ratios=None) \u6e90\u4ee3\u7801 \u8ba1\u7b97\u7f51\u7edc\u4e2d\u6bcf\u4e2a\u5377\u79ef\u5c42\u7684\u654f\u611f\u5ea6\u3002\u6bcf\u4e2a\u5377\u79ef\u5c42\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u7edf\u8ba1\u65b9\u6cd5\u4e3a\uff1a\u4f9d\u6b21\u526a\u6389\u5f53\u524d\u5377\u79ef\u5c42\u4e0d\u540c\u6bd4\u4f8b\u7684\u8f93\u51fa\u901a\u9053\u6570\uff0c\u5728\u6d4b\u8bd5\u96c6\u4e0a\u8ba1\u7b97\u526a\u88c1\u540e\u7684\u7cbe\u5ea6\u635f\u5931\u3002\u5f97\u5230\u654f\u611f\u5ea6\u4fe1\u606f\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7\u89c2\u5bdf\u6216\u5176\u5b83\u65b9\u5f0f\u786e\u5b9a\u6bcf\u5c42\u5377\u79ef\u7684\u526a\u88c1\u7387\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u5f85\u8bc4\u4f30\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 place(paddle.fluid.Place) - \u5f85\u5206\u6790\u7684\u53c2\u6570\u6240\u5728\u7684\u8bbe\u5907\u4f4d\u7f6e\uff0c\u53ef\u4ee5\u662f CUDAPlace \u6216 CPUPlace \u3002 Place\u6982\u5ff5\u4ecb\u7ecd param_names(list ) - \u5f85\u5206\u6790\u7684\u5377\u79ef\u5c42\u7684\u53c2\u6570\u7684\u540d\u79f0\u5217\u8868\u3002\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u67e5\u770b\u6a21\u578b\u4e2d\u6240\u6709\u53c2\u6570\u7684\u540d\u79f0: 1 2 3 for block in program . blocks : for param in block . all_parameters () : print ( \" param: {}; shape: {} \" . format ( param . name , param . shape )) eval_func(function) - \u7528\u4e8e\u8bc4\u4f30\u88c1\u526a\u540e\u6a21\u578b\u6548\u679c\u7684\u56de\u8c03\u51fd\u6570\u3002\u8be5\u56de\u8c03\u51fd\u6570\u63a5\u53d7\u88ab\u88c1\u526a\u540e\u7684 program \u4e3a\u53c2\u6570\uff0c\u8fd4\u56de\u4e00\u4e2a\u8868\u793a\u5f53\u524dprogram\u7684\u7cbe\u5ea6\uff0c\u7528\u4ee5\u8ba1\u7b97\u5f53\u524d\u88c1\u526a\u5e26\u6765\u7684\u7cbe\u5ea6\u635f\u5931\u3002 sensitivities_file(str) - \u4fdd\u5b58\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u672c\u5730\u6587\u4ef6\u7cfb\u7edf\u7684\u6587\u4ef6\u3002\u5728\u654f\u611f\u5ea6\u8ba1\u7b97\u8fc7\u7a0b\u4e2d\uff0c\u4f1a\u6301\u7eed\u5c06\u65b0\u8ba1\u7b97\u51fa\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u8ffd\u52a0\u5230\u8be5\u6587\u4ef6\u4e2d\u3002\u91cd\u542f\u4efb\u52a1\u540e\uff0c\u6587\u4ef6\u4e2d\u5df2\u6709\u654f\u611f\u5ea6\u4fe1\u606f\u4e0d\u4f1a\u88ab\u91cd\u590d\u8ba1\u7b97\u3002\u8be5\u6587\u4ef6\u53ef\u4ee5\u7528 pickle \u52a0\u8f7d\u3002 pruned_ratios(list ) - \u8ba1\u7b97\u5377\u79ef\u5c42\u654f\u611f\u5ea6\u4fe1\u606f\u65f6\uff0c\u4f9d\u6b21\u526a\u6389\u7684\u901a\u9053\u6570\u6bd4\u4f8b\u3002\u9ed8\u8ba4\u4e3a[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\u3002 \u8fd4\u56de\uff1a sensitivities(dict) - \u5b58\u653e\u654f\u611f\u5ea6\u4fe1\u606f\u7684dict\uff0c\u5176\u683c\u5f0f\u4e3a\uff1a 1 2 3 4 5 6 7 8 9 { \"weight_0\" : { 0 . 1 : 0 . 22 , 0 . 2 : 0 . 33 } , \"weight_1\" : { 0 . 1 : 0 . 21 , 0 . 2 : 0 . 4 } } \u5176\u4e2d\uff0c weight_0 \u662f\u5377\u79ef\u5c42\u53c2\u6570\u7684\u540d\u79f0\uff0csensitivities['weight_0']\u7684 value \u4e3a\u526a\u88c1\u6bd4\u4f8b\uff0c value \u4e3a\u7cbe\u5ea6\u635f\u5931\u7684\u6bd4\u4f8b\u3002 \u793a\u4f8b\uff1a \u70b9\u51fb AIStudio \u8fd0\u884c\u4ee5\u4e0b\u793a\u4f8b\u4ee3\u7801\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 import paddle import numpy as np import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.prune import sensitivity import paddle.dataset.mnist as reader def conv_bn_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) bn_name = name + \"_bn\" return fluid . layers . batch_norm ( input = conv , act = act , name = bn_name + '_output' , param_attr = ParamAttr ( name = bn_name + '_scale' ), bias_attr = ParamAttr ( bn_name + '_offset' ), moving_mean_name = bn_name + '_mean' , moving_variance_name = bn_name + '_variance' , ) main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels image_shape = [ 1 , 28 , 28 ] with fluid . program_guard ( main_program , startup_program ): image = fluid . data ( name = 'image' , shape = [ None ] + image_shape , dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) conv1 = conv_bn_layer ( image , 8 , 3 , \"conv1\" ) conv2 = conv_bn_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_bn_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_bn_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_bn_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_bn_layer ( conv5 , 8 , 3 , \"conv6\" ) out = fluid . layers . fc ( conv6 , size = 10 , act = \"softmax\" ) # cost = fluid.layers.cross_entropy(input=out, label=label) # avg_cost = fluid.layers.mean(x=cost) acc_top1 = fluid . layers . accuracy ( input = out , label = label , k = 1 ) # acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) place = fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( startup_program ) val_reader = paddle . batch ( reader . test (), batch_size = 128 ) val_feeder = feeder = fluid . DataFeeder ( [ image , label ], place , program = main_program ) def eval_func ( program ): acc_top1_ns = [] for data in val_reader (): acc_top1_n = exe . run ( program , feed = val_feeder . feed ( data ), fetch_list = [ acc_top1 . name ]) acc_top1_ns . append ( np . mean ( acc_top1_n )) return np . mean ( acc_top1_ns ) param_names = [] for param in main_program . global_block () . all_parameters (): if \"weights\" in param . name : param_names . append ( param . name ) sensitivities = sensitivity ( main_program , place , param_names , eval_func , sensitivities_file = \"./sensitive.data\" , pruned_ratios = [ 0.1 , 0.2 , 0.3 ]) print ( sensitivities ) merge_sensitive # paddleslim.prune.merge_sensitive(sensitivities) \u6e90\u4ee3\u7801 \u5408\u5e76\u591a\u4e2a\u654f\u611f\u5ea6\u4fe1\u606f\u3002 \u53c2\u6570\uff1a sensitivities(list | list ) - \u5f85\u5408\u5e76\u7684\u654f\u611f\u5ea6\u4fe1\u606f\uff0c\u53ef\u4ee5\u662f\u5b57\u5178\u7684\u5217\u8868\uff0c\u6216\u8005\u662f\u5b58\u653e\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u6587\u4ef6\u7684\u8def\u5f84\u5217\u8868\u3002 \u8fd4\u56de\uff1a sensitivities(dict) - \u5408\u5e76\u540e\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u3002\u5176\u683c\u5f0f\u4e3a\uff1a 1 2 3 4 5 6 7 8 9 { \"weight_0\" : { 0 . 1 : 0 . 22 , 0 . 2 : 0 . 33 } , \"weight_1\" : { 0 . 1 : 0 . 21 , 0 . 2 : 0 . 4 } } \u5176\u4e2d\uff0c weight_0 \u662f\u5377\u79ef\u5c42\u53c2\u6570\u7684\u540d\u79f0\uff0csensitivities['weight_0']\u7684 value \u4e3a\u526a\u88c1\u6bd4\u4f8b\uff0c value \u4e3a\u7cbe\u5ea6\u635f\u5931\u7684\u6bd4\u4f8b\u3002 \u793a\u4f8b\uff1a load_sensitivities # paddleslim.prune.load_sensitivities(sensitivities_file) \u6e90\u4ee3\u7801 \u4ece\u6587\u4ef6\u4e2d\u52a0\u8f7d\u654f\u611f\u5ea6\u4fe1\u606f\u3002 \u53c2\u6570\uff1a sensitivities_file(str) - \u5b58\u653e\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u672c\u5730\u6587\u4ef6. \u8fd4\u56de\uff1a sensitivities(dict) - \u654f\u611f\u5ea6\u4fe1\u606f\u3002 \u793a\u4f8b\uff1a get_ratios_by_loss # paddleslim.prune.get_ratios_by_loss(sensitivities, loss) \u6e90\u4ee3\u7801 \u6839\u636e\u654f\u611f\u5ea6\u548c\u7cbe\u5ea6\u635f\u5931\u9608\u503c\u8ba1\u7b97\u51fa\u4e00\u7ec4\u526a\u5207\u7387\u3002\u5bf9\u4e8e\u53c2\u6570 w , \u5176\u526a\u88c1\u7387\u4e3a\u4f7f\u7cbe\u5ea6\u635f\u5931\u4f4e\u4e8e loss \u7684\u6700\u5927\u526a\u88c1\u7387\u3002 \u53c2\u6570\uff1a sensitivities(dict) - \u654f\u611f\u5ea6\u4fe1\u606f\u3002 loss - \u7cbe\u5ea6\u635f\u5931\u9608\u503c\u3002 \u8fd4\u56de\uff1a ratios(dict) - \u4e00\u7ec4\u526a\u5207\u7387\u3002 key \u662f\u5f85\u526a\u88c1\u53c2\u6570\u7684\u540d\u79f0\u3002 value \u662f\u5bf9\u5e94\u53c2\u6570\u7684\u526a\u88c1\u7387\u3002","title":"\u526a\u679d\u4e0e\u654f\u611f\u5ea6"},{"location":"api/prune_api/#pruner","text":"paddleslim.prune.Pruner(criterion=\"l1_norm\") \u6e90\u4ee3\u7801 \u5bf9\u5377\u79ef\u7f51\u7edc\u7684\u901a\u9053\u8fdb\u884c\u4e00\u6b21\u526a\u88c1\u3002\u526a\u88c1\u4e00\u4e2a\u5377\u79ef\u5c42\u7684\u901a\u9053\uff0c\u662f\u6307\u526a\u88c1\u8be5\u5377\u79ef\u5c42\u8f93\u51fa\u7684\u901a\u9053\u3002\u5377\u79ef\u5c42\u7684\u6743\u91cd\u5f62\u72b6\u4e3a [output_channel, input_channel, kernel_size, kernel_size] \uff0c\u901a\u8fc7\u526a\u88c1\u8be5\u6743\u91cd\u7684\u7b2c\u4e00\u7eac\u5ea6\u8fbe\u5230\u526a\u88c1\u8f93\u51fa\u901a\u9053\u6570\u7684\u76ee\u7684\u3002 \u53c2\u6570\uff1a criterion - \u8bc4\u4f30\u4e00\u4e2a\u5377\u79ef\u5c42\u5185\u901a\u9053\u91cd\u8981\u6027\u6240\u53c2\u8003\u7684\u6307\u6807\u3002\u76ee\u524d\u4ec5\u652f\u6301 l1_norm \u3002\u9ed8\u8ba4\u4e3a l1_norm \u3002 \u8fd4\u56de\uff1a \u4e00\u4e2aPruner\u7c7b\u7684\u5b9e\u4f8b \u793a\u4f8b\u4ee3\u7801\uff1a 1 2 from paddleslim.prune import Pruner pruner = Pruner () paddleslim.prune.Pruner.prune(program, scope, params, ratios, place=None, lazy=False, only_graph=False, param_backup=False, param_shape_backup=False) \u6e90\u4ee3\u7801 \u5bf9\u76ee\u6807\u7f51\u7edc\u7684\u4e00\u7ec4\u5377\u79ef\u5c42\u7684\u6743\u91cd\u8fdb\u884c\u88c1\u526a\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u8981\u88c1\u526a\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 scope(paddle.fluid.Scope) - \u8981\u88c1\u526a\u7684\u6743\u91cd\u6240\u5728\u7684 scope \uff0cPaddle\u4e2d\u7528 scope \u5b9e\u4f8b\u5b58\u653e\u6a21\u578b\u53c2\u6570\u548c\u8fd0\u884c\u65f6\u53d8\u91cf\u7684\u503c\u3002Scope\u4e2d\u7684\u53c2\u6570\u503c\u4f1a\u88ab inplace \u7684\u88c1\u526a\u3002\u66f4\u591a\u4ecb\u7ecd\u8bf7\u53c2\u8003 Scope\u6982\u5ff5\u4ecb\u7ecd params(list ) - \u9700\u8981\u88ab\u88c1\u526a\u7684\u5377\u79ef\u5c42\u7684\u53c2\u6570\u7684\u540d\u79f0\u5217\u8868\u3002\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u67e5\u770b\u6a21\u578b\u4e2d\u6240\u6709\u53c2\u6570\u7684\u540d\u79f0: 1 2 3 for block in program . blocks : for param in block . all_parameters () : print ( \" param: {}; shape: {} \" . format ( param . name , param . shape )) ratios(list ) - \u7528\u4e8e\u88c1\u526a params \u7684\u526a\u5207\u7387\uff0c\u7c7b\u578b\u4e3a\u5217\u8868\u3002\u8be5\u5217\u8868\u957f\u5ea6\u5fc5\u987b\u4e0e params \u7684\u957f\u5ea6\u4e00\u81f4\u3002 place(paddle.fluid.Place) - \u5f85\u88c1\u526a\u53c2\u6570\u6240\u5728\u7684\u8bbe\u5907\u4f4d\u7f6e\uff0c\u53ef\u4ee5\u662f CUDAPlace \u6216 CPUPlace \u3002 Place\u6982\u5ff5\u4ecb\u7ecd lazy(bool) - lazy \u4e3aTrue\u65f6\uff0c\u901a\u8fc7\u5c06\u6307\u5b9a\u901a\u9053\u7684\u53c2\u6570\u7f6e\u96f6\u8fbe\u5230\u88c1\u526a\u7684\u76ee\u7684\uff0c\u53c2\u6570\u7684 shape\u4fdd\u6301\u4e0d\u53d8 \uff1b lazy \u4e3aFalse\u65f6\uff0c\u76f4\u63a5\u5c06\u8981\u88c1\u7684\u901a\u9053\u7684\u53c2\u6570\u5220\u9664\uff0c\u53c2\u6570\u7684 shape \u4f1a\u53d1\u751f\u53d8\u5316\u3002 only_graph(bool) - \u662f\u5426\u53ea\u88c1\u526a\u7f51\u7edc\u7ed3\u6784\u3002\u5728Paddle\u4e2d\uff0cProgram\u5b9a\u4e49\u4e86\u7f51\u7edc\u7ed3\u6784\uff0cScope\u5b58\u50a8\u53c2\u6570\u7684\u6570\u503c\u3002\u4e00\u4e2aScope\u5b9e\u4f8b\u53ef\u4ee5\u88ab\u591a\u4e2aProgram\u4f7f\u7528\uff0c\u6bd4\u5982\u5b9a\u4e49\u4e86\u8bad\u7ec3\u7f51\u7edc\u7684Program\u548c\u5b9a\u4e49\u4e86\u6d4b\u8bd5\u7f51\u7edc\u7684Program\u662f\u4f7f\u7528\u540c\u4e00\u4e2aScope\u5b9e\u4f8b\u7684\u3002 only_graph \u4e3aTrue\u65f6\uff0c\u53ea\u5bf9Program\u4e2d\u5b9a\u4e49\u7684\u5377\u79ef\u7684\u901a\u9053\u8fdb\u884c\u526a\u88c1\uff1b only_graph \u4e3afalse\u65f6\uff0cScope\u4e2d\u5377\u79ef\u53c2\u6570\u7684\u6570\u503c\u4e5f\u4f1a\u88ab\u526a\u88c1\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 param_backup(bool) - \u662f\u5426\u8fd4\u56de\u5bf9\u53c2\u6570\u503c\u7684\u5907\u4efd\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 param_shape_backup(bool) - \u662f\u5426\u8fd4\u56de\u5bf9\u53c2\u6570 shape \u7684\u5907\u4efd\u3002\u9ed8\u8ba4\u4e3aFalse\u3002 \u8fd4\u56de\uff1a pruned_program(paddle.fluid.Program) - \u88ab\u88c1\u526a\u540e\u7684Program\u3002 param_backup(dict) - \u5bf9\u53c2\u6570\u6570\u503c\u7684\u5907\u4efd\uff0c\u7528\u4e8e\u6062\u590dScope\u4e2d\u7684\u53c2\u6570\u6570\u503c\u3002 param_shape_backup(dict) - \u5bf9\u53c2\u6570\u5f62\u72b6\u7684\u5907\u4efd\u3002 \u793a\u4f8b\uff1a \u70b9\u51fb AIStudio \u6267\u884c\u4ee5\u4e0b\u793a\u4f8b\u4ee3\u7801\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.prune import Pruner def conv_bn_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) bn_name = name + \"_bn\" return fluid . layers . batch_norm ( input = conv , act = act , name = bn_name + '_output' , param_attr = ParamAttr ( name = bn_name + '_scale' ), bias_attr = ParamAttr ( bn_name + '_offset' ), moving_mean_name = bn_name + '_mean' , moving_variance_name = bn_name + '_variance' , ) main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels with fluid . program_guard ( main_program , startup_program ): input = fluid . data ( name = \"image\" , shape = [ None , 3 , 16 , 16 ]) conv1 = conv_bn_layer ( input , 8 , 3 , \"conv1\" ) conv2 = conv_bn_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_bn_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_bn_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_bn_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_bn_layer ( conv5 , 8 , 3 , \"conv6\" ) place = fluid . CPUPlace () exe = fluid . Executor ( place ) scope = fluid . Scope () exe . run ( startup_program , scope = scope ) pruner = Pruner () main_program , _ , _ = pruner . prune ( main_program , scope , params = [ \"conv4_weights\" ], ratios = [ 0.5 ], place = place , lazy = False , only_graph = False , param_backup = False , param_shape_backup = False ) for param in main_program . global_block () . all_parameters (): if \"weights\" in param . name : print ( \"param name: {}; param shape: {}\" . format ( param . name , param . shape ))","title":"Pruner"},{"location":"api/prune_api/#sensitivity","text":"paddleslim.prune.sensitivity(program, place, param_names, eval_func, sensitivities_file=None, pruned_ratios=None) \u6e90\u4ee3\u7801 \u8ba1\u7b97\u7f51\u7edc\u4e2d\u6bcf\u4e2a\u5377\u79ef\u5c42\u7684\u654f\u611f\u5ea6\u3002\u6bcf\u4e2a\u5377\u79ef\u5c42\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u7edf\u8ba1\u65b9\u6cd5\u4e3a\uff1a\u4f9d\u6b21\u526a\u6389\u5f53\u524d\u5377\u79ef\u5c42\u4e0d\u540c\u6bd4\u4f8b\u7684\u8f93\u51fa\u901a\u9053\u6570\uff0c\u5728\u6d4b\u8bd5\u96c6\u4e0a\u8ba1\u7b97\u526a\u88c1\u540e\u7684\u7cbe\u5ea6\u635f\u5931\u3002\u5f97\u5230\u654f\u611f\u5ea6\u4fe1\u606f\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7\u89c2\u5bdf\u6216\u5176\u5b83\u65b9\u5f0f\u786e\u5b9a\u6bcf\u5c42\u5377\u79ef\u7684\u526a\u88c1\u7387\u3002 \u53c2\u6570\uff1a program(paddle.fluid.Program) - \u5f85\u8bc4\u4f30\u7684\u76ee\u6807\u7f51\u7edc\u3002\u66f4\u591a\u5173\u4e8eProgram\u7684\u4ecb\u7ecd\u8bf7\u53c2\u8003\uff1a Program\u6982\u5ff5\u4ecb\u7ecd \u3002 place(paddle.fluid.Place) - \u5f85\u5206\u6790\u7684\u53c2\u6570\u6240\u5728\u7684\u8bbe\u5907\u4f4d\u7f6e\uff0c\u53ef\u4ee5\u662f CUDAPlace \u6216 CPUPlace \u3002 Place\u6982\u5ff5\u4ecb\u7ecd param_names(list ) - \u5f85\u5206\u6790\u7684\u5377\u79ef\u5c42\u7684\u53c2\u6570\u7684\u540d\u79f0\u5217\u8868\u3002\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u67e5\u770b\u6a21\u578b\u4e2d\u6240\u6709\u53c2\u6570\u7684\u540d\u79f0: 1 2 3 for block in program . blocks : for param in block . all_parameters () : print ( \" param: {}; shape: {} \" . format ( param . name , param . shape )) eval_func(function) - \u7528\u4e8e\u8bc4\u4f30\u88c1\u526a\u540e\u6a21\u578b\u6548\u679c\u7684\u56de\u8c03\u51fd\u6570\u3002\u8be5\u56de\u8c03\u51fd\u6570\u63a5\u53d7\u88ab\u88c1\u526a\u540e\u7684 program \u4e3a\u53c2\u6570\uff0c\u8fd4\u56de\u4e00\u4e2a\u8868\u793a\u5f53\u524dprogram\u7684\u7cbe\u5ea6\uff0c\u7528\u4ee5\u8ba1\u7b97\u5f53\u524d\u88c1\u526a\u5e26\u6765\u7684\u7cbe\u5ea6\u635f\u5931\u3002 sensitivities_file(str) - \u4fdd\u5b58\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u672c\u5730\u6587\u4ef6\u7cfb\u7edf\u7684\u6587\u4ef6\u3002\u5728\u654f\u611f\u5ea6\u8ba1\u7b97\u8fc7\u7a0b\u4e2d\uff0c\u4f1a\u6301\u7eed\u5c06\u65b0\u8ba1\u7b97\u51fa\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u8ffd\u52a0\u5230\u8be5\u6587\u4ef6\u4e2d\u3002\u91cd\u542f\u4efb\u52a1\u540e\uff0c\u6587\u4ef6\u4e2d\u5df2\u6709\u654f\u611f\u5ea6\u4fe1\u606f\u4e0d\u4f1a\u88ab\u91cd\u590d\u8ba1\u7b97\u3002\u8be5\u6587\u4ef6\u53ef\u4ee5\u7528 pickle \u52a0\u8f7d\u3002 pruned_ratios(list ) - \u8ba1\u7b97\u5377\u79ef\u5c42\u654f\u611f\u5ea6\u4fe1\u606f\u65f6\uff0c\u4f9d\u6b21\u526a\u6389\u7684\u901a\u9053\u6570\u6bd4\u4f8b\u3002\u9ed8\u8ba4\u4e3a[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\u3002 \u8fd4\u56de\uff1a sensitivities(dict) - \u5b58\u653e\u654f\u611f\u5ea6\u4fe1\u606f\u7684dict\uff0c\u5176\u683c\u5f0f\u4e3a\uff1a 1 2 3 4 5 6 7 8 9 { \"weight_0\" : { 0 . 1 : 0 . 22 , 0 . 2 : 0 . 33 } , \"weight_1\" : { 0 . 1 : 0 . 21 , 0 . 2 : 0 . 4 } } \u5176\u4e2d\uff0c weight_0 \u662f\u5377\u79ef\u5c42\u53c2\u6570\u7684\u540d\u79f0\uff0csensitivities['weight_0']\u7684 value \u4e3a\u526a\u88c1\u6bd4\u4f8b\uff0c value \u4e3a\u7cbe\u5ea6\u635f\u5931\u7684\u6bd4\u4f8b\u3002 \u793a\u4f8b\uff1a \u70b9\u51fb AIStudio \u8fd0\u884c\u4ee5\u4e0b\u793a\u4f8b\u4ee3\u7801\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 import paddle import numpy as np import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from paddleslim.prune import sensitivity import paddle.dataset.mnist as reader def conv_bn_layer ( input , num_filters , filter_size , name , stride = 1 , groups = 1 , act = None ): conv = fluid . layers . conv2d ( input = input , num_filters = num_filters , filter_size = filter_size , stride = stride , padding = ( filter_size - 1 ) // 2 , groups = groups , act = None , param_attr = ParamAttr ( name = name + \"_weights\" ), bias_attr = False , name = name + \"_out\" ) bn_name = name + \"_bn\" return fluid . layers . batch_norm ( input = conv , act = act , name = bn_name + '_output' , param_attr = ParamAttr ( name = bn_name + '_scale' ), bias_attr = ParamAttr ( bn_name + '_offset' ), moving_mean_name = bn_name + '_mean' , moving_variance_name = bn_name + '_variance' , ) main_program = fluid . Program () startup_program = fluid . Program () # X X O X O # conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6 # | ^ | ^ # |____________| |____________________| # # X: prune output channels # O: prune input channels image_shape = [ 1 , 28 , 28 ] with fluid . program_guard ( main_program , startup_program ): image = fluid . data ( name = 'image' , shape = [ None ] + image_shape , dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) conv1 = conv_bn_layer ( image , 8 , 3 , \"conv1\" ) conv2 = conv_bn_layer ( conv1 , 8 , 3 , \"conv2\" ) sum1 = conv1 + conv2 conv3 = conv_bn_layer ( sum1 , 8 , 3 , \"conv3\" ) conv4 = conv_bn_layer ( conv3 , 8 , 3 , \"conv4\" ) sum2 = conv4 + sum1 conv5 = conv_bn_layer ( sum2 , 8 , 3 , \"conv5\" ) conv6 = conv_bn_layer ( conv5 , 8 , 3 , \"conv6\" ) out = fluid . layers . fc ( conv6 , size = 10 , act = \"softmax\" ) # cost = fluid.layers.cross_entropy(input=out, label=label) # avg_cost = fluid.layers.mean(x=cost) acc_top1 = fluid . layers . accuracy ( input = out , label = label , k = 1 ) # acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) place = fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( startup_program ) val_reader = paddle . batch ( reader . test (), batch_size = 128 ) val_feeder = feeder = fluid . DataFeeder ( [ image , label ], place , program = main_program ) def eval_func ( program ): acc_top1_ns = [] for data in val_reader (): acc_top1_n = exe . run ( program , feed = val_feeder . feed ( data ), fetch_list = [ acc_top1 . name ]) acc_top1_ns . append ( np . mean ( acc_top1_n )) return np . mean ( acc_top1_ns ) param_names = [] for param in main_program . global_block () . all_parameters (): if \"weights\" in param . name : param_names . append ( param . name ) sensitivities = sensitivity ( main_program , place , param_names , eval_func , sensitivities_file = \"./sensitive.data\" , pruned_ratios = [ 0.1 , 0.2 , 0.3 ]) print ( sensitivities )","title":"sensitivity"},{"location":"api/prune_api/#merge_sensitive","text":"paddleslim.prune.merge_sensitive(sensitivities) \u6e90\u4ee3\u7801 \u5408\u5e76\u591a\u4e2a\u654f\u611f\u5ea6\u4fe1\u606f\u3002 \u53c2\u6570\uff1a sensitivities(list | list ) - \u5f85\u5408\u5e76\u7684\u654f\u611f\u5ea6\u4fe1\u606f\uff0c\u53ef\u4ee5\u662f\u5b57\u5178\u7684\u5217\u8868\uff0c\u6216\u8005\u662f\u5b58\u653e\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u6587\u4ef6\u7684\u8def\u5f84\u5217\u8868\u3002 \u8fd4\u56de\uff1a sensitivities(dict) - \u5408\u5e76\u540e\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u3002\u5176\u683c\u5f0f\u4e3a\uff1a 1 2 3 4 5 6 7 8 9 { \"weight_0\" : { 0 . 1 : 0 . 22 , 0 . 2 : 0 . 33 } , \"weight_1\" : { 0 . 1 : 0 . 21 , 0 . 2 : 0 . 4 } } \u5176\u4e2d\uff0c weight_0 \u662f\u5377\u79ef\u5c42\u53c2\u6570\u7684\u540d\u79f0\uff0csensitivities['weight_0']\u7684 value \u4e3a\u526a\u88c1\u6bd4\u4f8b\uff0c value \u4e3a\u7cbe\u5ea6\u635f\u5931\u7684\u6bd4\u4f8b\u3002 \u793a\u4f8b\uff1a","title":"merge_sensitive"},{"location":"api/prune_api/#load_sensitivities","text":"paddleslim.prune.load_sensitivities(sensitivities_file) \u6e90\u4ee3\u7801 \u4ece\u6587\u4ef6\u4e2d\u52a0\u8f7d\u654f\u611f\u5ea6\u4fe1\u606f\u3002 \u53c2\u6570\uff1a sensitivities_file(str) - \u5b58\u653e\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u672c\u5730\u6587\u4ef6. \u8fd4\u56de\uff1a sensitivities(dict) - \u654f\u611f\u5ea6\u4fe1\u606f\u3002 \u793a\u4f8b\uff1a","title":"load_sensitivities"},{"location":"api/prune_api/#get_ratios_by_loss","text":"paddleslim.prune.get_ratios_by_loss(sensitivities, loss) \u6e90\u4ee3\u7801 \u6839\u636e\u654f\u611f\u5ea6\u548c\u7cbe\u5ea6\u635f\u5931\u9608\u503c\u8ba1\u7b97\u51fa\u4e00\u7ec4\u526a\u5207\u7387\u3002\u5bf9\u4e8e\u53c2\u6570 w , \u5176\u526a\u88c1\u7387\u4e3a\u4f7f\u7cbe\u5ea6\u635f\u5931\u4f4e\u4e8e loss \u7684\u6700\u5927\u526a\u88c1\u7387\u3002 \u53c2\u6570\uff1a sensitivities(dict) - \u654f\u611f\u5ea6\u4fe1\u606f\u3002 loss - \u7cbe\u5ea6\u635f\u5931\u9608\u503c\u3002 \u8fd4\u56de\uff1a ratios(dict) - \u4e00\u7ec4\u526a\u5207\u7387\u3002 key \u662f\u5f85\u526a\u88c1\u53c2\u6570\u7684\u540d\u79f0\u3002 value \u662f\u5bf9\u5e94\u53c2\u6570\u7684\u526a\u88c1\u7387\u3002","title":"get_ratios_by_loss"},{"location":"api/quantization_api/","text":"\u91cf\u5316\u914d\u7f6e # \u901a\u8fc7\u5b57\u5178\u914d\u7f6e\u91cf\u5316\u53c2\u6570 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 quant_config_default = { ' weight_quantize_type ' : ' abs_max ' , ' activation_quantize_type ' : ' abs_max ' , ' weight_bits ' : 8 , ' activation_bits ' : 8 , # ops of name_scope in not_quant_pattern list , will not be quantized ' not_quant_pattern ' : [ ' skip_quant ' ], # ops of type in quantize_op_types , will be quantized ' quantize_op_types ' : [ ' conv2d ' , ' depthwise_conv2d ' , ' mul ' , ' elementwise_add ' , ' pool2d ' ], # data type after quantization , such as ' uint8 ' , ' int8 ' , etc . default is ' int8 ' ' dtype ' : ' int8 ' , # window size for ' range_abs_max ' quantization . defaulf is 10000 ' window_size ' : 10000 , # The decay coefficient of moving average , default is 0 . 9 ' moving_rate ' : 0 . 9 , } \u53c2\u6570\uff1a weight_quantize_type(str) - \u53c2\u6570\u91cf\u5316\u65b9\u5f0f\u3002\u53ef\u9009 'abs_max' , 'channel_wise_abs_max' , 'range_abs_max' , 'moving_average_abs_max' \u3002 \u9ed8\u8ba4 'abs_max' \u3002 activation_quantize_type(str) - \u6fc0\u6d3b\u91cf\u5316\u65b9\u5f0f\uff0c\u53ef\u9009 'abs_max' , 'range_abs_max' , 'moving_average_abs_max' \uff0c\u9ed8\u8ba4 'abs_max' \u3002 weight_bits(int) - \u53c2\u6570\u91cf\u5316bit\u6570\uff0c\u9ed8\u8ba48, \u63a8\u8350\u8bbe\u4e3a8\u3002 activation_bits(int) - \u6fc0\u6d3b\u91cf\u5316bit\u6570\uff0c\u9ed8\u8ba48\uff0c \u63a8\u8350\u8bbe\u4e3a8\u3002 not_quant_pattern(str | list[str]) - \u6240\u6709 name_scope \u5305\u542b 'not_quant_pattern' \u5b57\u7b26\u4e32\u7684 op \uff0c\u90fd\u4e0d\u91cf\u5316, \u8bbe\u7f6e\u65b9\u5f0f\u8bf7\u53c2\u8003 fluid.name_scope \u3002 quantize_op_types(list[str]) - \u9700\u8981\u8fdb\u884c\u91cf\u5316\u7684 op \u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 'conv2d', 'depthwise_conv2d', 'mul' \u3002 dtype(int8) - \u91cf\u5316\u540e\u7684\u53c2\u6570\u7c7b\u578b\uff0c\u9ed8\u8ba4 int8 , \u76ee\u524d\u4ec5\u652f\u6301 int8 \u3002 window_size(int) - 'range_abs_max' \u91cf\u5316\u65b9\u5f0f\u7684 window size \uff0c\u9ed8\u8ba410000\u3002 moving_rate(int) - 'moving_average_abs_max' \u91cf\u5316\u65b9\u5f0f\u7684\u8870\u51cf\u7cfb\u6570\uff0c\u9ed8\u8ba4 0.9\u3002 quant_aware # paddleslim.quant.quant_aware(program, place, config, scope=None, for_test=False) [\u6e90\u4ee3\u7801] \u5728 program \u4e2d\u52a0\u5165\u91cf\u5316\u548c\u53cd\u91cf\u5316 op , \u7528\u4e8e\u91cf\u5316\u8bad\u7ec3\u3002 \u53c2\u6570\uff1a program (fluid.Program) - \u4f20\u5165\u8bad\u7ec3\u6216\u6d4b\u8bd5 program \u3002 place(fluid.CPUPlace | fluid.CUDAPlace) - \u8be5\u53c2\u6570\u8868\u793a Executor \u6267\u884c\u6240\u5728\u7684\u8bbe\u5907\u3002 config(dict) - \u91cf\u5316\u914d\u7f6e\u8868\u3002 scope(fluid.Scope, optional) - \u4f20\u5165\u7528\u4e8e\u5b58\u50a8 Variable \u7684 scope \uff0c\u9700\u8981\u4f20\u5165 program \u6240\u4f7f\u7528\u7684 scope \uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\uff0c\u662f fluid.global_scope() \u3002\u8bbe\u7f6e\u4e3a None \u65f6\u5c06\u4f7f\u7528 fluid.global_scope() \uff0c\u9ed8\u8ba4\u503c\u4e3a None \u3002 for_test(bool) - \u5982\u679c program \u53c2\u6570\u662f\u4e00\u4e2a\u6d4b\u8bd5 program \uff0c for_test \u5e94\u8bbe\u4e3a True \uff0c\u5426\u5219\u8bbe\u4e3a False \u3002 \u8fd4\u56de \u542b\u6709\u91cf\u5316\u548c\u53cd\u91cf\u5316 operator \u7684 program \u8fd4\u56de\u7c7b\u578b \u5f53 for_test=False \uff0c\u8fd4\u56de\u7c7b\u578b\u4e3a fluid.CompiledProgram \uff0c \u6ce8\u610f\uff0c\u6b64\u8fd4\u56de\u503c\u4e0d\u80fd\u7528\u4e8e\u4fdd\u5b58\u53c2\u6570 \u3002 \u5f53 for_test=True \uff0c\u8fd4\u56de\u7c7b\u578b\u4e3a fluid.Program \u3002 \u6ce8\u610f\u4e8b\u9879 \u6b64\u63a5\u53e3\u4f1a\u6539\u53d8 program \u7ed3\u6784\uff0c\u5e76\u4e14\u53ef\u80fd\u589e\u52a0\u4e00\u4e9b persistable \u7684\u53d8\u91cf\uff0c\u6240\u4ee5\u52a0\u8f7d\u6a21\u578b\u53c2\u6570\u65f6\u8bf7\u6ce8\u610f\u548c\u76f8\u5e94\u7684 program \u5bf9\u5e94\u3002 \u6b64\u63a5\u53e3\u5e95\u5c42\u7ecf\u5386\u4e86 fluid.Program -> fluid.framework.IrGraph -> fluid.Program \u7684\u8f6c\u53d8\uff0c\u5728 fluid.framework.IrGraph \u4e2d\u6ca1\u6709 Parameter \u7684\u6982\u5ff5\uff0c Variable \u53ea\u6709 persistable \u548c not persistable \u7684\u533a\u522b\uff0c\u6240\u4ee5\u5728\u4fdd\u5b58\u548c\u52a0\u8f7d\u53c2\u6570\u65f6\uff0c\u8bf7\u4f7f\u7528 fluid.io.save_persistables \u548c fluid.io.load_persistables \u63a5\u53e3\u3002 \u7531\u4e8e\u6b64\u63a5\u53e3\u4f1a\u6839\u636e program \u7684\u7ed3\u6784\u548c\u91cf\u5316\u914d\u7f6e\u6765\u5bf9 program \u6dfb\u52a0op\uff0c\u6240\u4ee5 Paddle \u4e2d\u4e00\u4e9b\u901a\u8fc7 fuse op \u6765\u52a0\u901f\u8bad\u7ec3\u7684\u7b56\u7565\u4e0d\u80fd\u4f7f\u7528\u3002\u5df2\u77e5\u4ee5\u4e0b\u7b56\u7565\u5728\u4f7f\u7528\u91cf\u5316\u65f6\u5fc5\u987b\u8bbe\u4e3a False \uff1a fuse_all_reduce_ops, sync_batch_norm \u3002 \u5982\u679c\u4f20\u5165\u7684 program \u4e2d\u5b58\u5728\u548c\u4efb\u4f55op\u90fd\u6ca1\u6709\u8fde\u63a5\u7684 Variable \uff0c\u5219\u4f1a\u5728\u91cf\u5316\u7684\u8fc7\u7a0b\u4e2d\u88ab\u4f18\u5316\u6389\u3002 convert # paddleslim.quant.convert(program, place, config, scope=None, save_int8=False) [\u6e90\u4ee3\u7801] \u628a\u8bad\u7ec3\u597d\u7684\u91cf\u5316 program \uff0c\u8f6c\u6362\u4e3a\u53ef\u7528\u4e8e\u4fdd\u5b58 inference model \u7684 program \u3002 \u53c2\u6570\uff1a program (fluid.Program) - \u4f20\u5165\u6d4b\u8bd5 program \u3002 place(fluid.CPUPlace | fluid.CUDAPlace) - \u8be5\u53c2\u6570\u8868\u793a Executor \u6267\u884c\u6240\u5728\u7684\u8bbe\u5907\u3002 config(dict) - \u91cf\u5316\u914d\u7f6e\u8868\u3002 scope(fluid.Scope) - \u4f20\u5165\u7528\u4e8e\u5b58\u50a8 Variable \u7684 scope \uff0c\u9700\u8981\u4f20\u5165 program \u6240\u4f7f\u7528\u7684 scope \uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\uff0c\u662f fluid.global_scope() \u3002\u8bbe\u7f6e\u4e3a None \u65f6\u5c06\u4f7f\u7528 fluid.global_scope() \uff0c\u9ed8\u8ba4\u503c\u4e3a None \u3002 save_int8\uff08bool) - \u662f\u5426\u9700\u8981\u8fd4\u56de\u53c2\u6570\u4e3a int8 \u7684 program \u3002\u8be5\u529f\u80fd\u76ee\u524d\u53ea\u80fd\u7528\u4e8e\u786e\u8ba4\u6a21\u578b\u5927\u5c0f\u3002\u9ed8\u8ba4\u503c\u4e3a False \u3002 \u8fd4\u56de program (fluid.Program) - freezed program\uff0c\u53ef\u7528\u4e8e\u4fdd\u5b58inference model\uff0c\u53c2\u6570\u4e3a float32 \u7c7b\u578b\uff0c\u4f46\u5176\u6570\u503c\u8303\u56f4\u53ef\u7528int8\u8868\u793a\u3002 int8_program (fluid.Program) - freezed program\uff0c\u53ef\u7528\u4e8e\u4fdd\u5b58inference model\uff0c\u53c2\u6570\u4e3a int8 \u7c7b\u578b\u3002\u5f53 save_int8 \u4e3a False \u65f6\uff0c\u4e0d\u8fd4\u56de\u8be5\u503c\u3002 \u6ce8\u610f\u4e8b\u9879 \u56e0\u4e3a\u8be5\u63a5\u53e3\u4f1a\u5bf9 op \u548c Variable \u505a\u76f8\u5e94\u7684\u5220\u9664\u548c\u4fee\u6539\uff0c\u6240\u4ee5\u6b64\u63a5\u53e3\u53ea\u80fd\u5728\u8bad\u7ec3\u5b8c\u6210\u4e4b\u540e\u8c03\u7528\u3002\u5982\u679c\u60f3\u8f6c\u5316\u8bad\u7ec3\u7684\u4e2d\u95f4\u6a21\u578b\uff0c\u53ef\u52a0\u8f7d\u76f8\u5e94\u7684\u53c2\u6570\u4e4b\u540e\u518d\u4f7f\u7528\u6b64\u63a5\u53e3\u3002 \u4ee3\u7801\u793a\u4f8b 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 #encoding=utf8 import paddle.fluid as fluid import paddleslim.quant as quant train_program = fluid . Program () with fluid . program_guard ( train_program ): image = fluid . data ( name = 'x' , shape = [ None , 1 , 28 , 28 ]) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) conv = fluid . layers . conv2d ( image , 32 , 1 ) feat = fluid . layers . fc ( conv , 10 , act = 'softmax' ) cost = fluid . layers . cross_entropy ( input = feat , label = label ) avg_cost = fluid . layers . mean ( x = cost ) use_gpu = True place = fluid . CUDAPlace ( 0 ) if use_gpu else fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( fluid . default_startup_program ()) eval_program = train_program . clone ( for_test = True ) #\u914d\u7f6e config = { 'weight_quantize_type' : 'abs_max' , 'activation_quantize_type' : 'moving_average_abs_max' } build_strategy = fluid . BuildStrategy () exec_strategy = fluid . ExecutionStrategy () #\u8c03\u7528api quant_train_program = quant . quant_aware ( train_program , place , config , for_test = False ) quant_eval_program = quant . quant_aware ( eval_program , place , config , for_test = True ) #\u5173\u95ed\u7b56\u7565 build_strategy . fuse_all_reduce_ops = False build_strategy . sync_batch_norm = False quant_train_program = quant_train_program . with_data_parallel ( loss_name = avg_cost . name , build_strategy = build_strategy , exec_strategy = exec_strategy ) inference_prog = quant . convert ( quant_eval_program , place , config ) \u66f4\u8be6\u7ec6\u7684\u7528\u6cd5\u8bf7\u53c2\u8003 \u91cf\u5316\u8bad\u7ec3demo \u3002 quant_post # paddleslim.quant.quant_post(executor, model_dir, quantize_model_path,sample_generator, model_filename=None, params_filename=None, batch_size=16,batch_nums=None, scope=None, algo='KL', quantizable_op_type=[\"conv2d\", \"depthwise_conv2d\", \"mul\"]) [\u6e90\u4ee3\u7801] \u5bf9\u4fdd\u5b58\u5728 ${model_dir} \u4e0b\u7684\u6a21\u578b\u8fdb\u884c\u91cf\u5316\uff0c\u4f7f\u7528 sample_generator \u7684\u6570\u636e\u8fdb\u884c\u53c2\u6570\u6821\u6b63\u3002 \u53c2\u6570: executor (fluid.Executor) - \u6267\u884c\u6a21\u578b\u7684executor\uff0c\u53ef\u4ee5\u5728cpu\u6216\u8005gpu\u4e0a\u6267\u884c\u3002 model_dir\uff08str) - \u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u6240\u5728\u7684\u6587\u4ef6\u5939\u3002 quantize_model_path(str) - \u4fdd\u5b58\u91cf\u5316\u540e\u7684\u6a21\u578b\u7684\u8def\u5f84 sample_generator(python generator) - \u8bfb\u53d6\u6570\u636e\u6837\u672c\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u6837\u672c\u3002 model_filename(str, optional) - \u6a21\u578b\u6587\u4ef6\u540d\uff0c\u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u9700\u8981\u8bbe\u7f6e model_filename \u4e3a\u6a21\u578b\u6587\u4ef6\u7684\u540d\u79f0\uff0c\u5426\u5219\u8bbe\u7f6e\u4e3a None \u5373\u53ef\u3002\u9ed8\u8ba4\u503c\u662f None \u3002 params_filename(str) - \u53c2\u6570\u6587\u4ef6\u540d\uff0c\u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u9700\u8981\u8bbe\u7f6e params_filename \u4e3a\u53c2\u6570\u6587\u4ef6\u7684\u540d\u79f0\uff0c\u5426\u5219\u8bbe\u7f6e\u4e3a None \u5373\u53ef\u3002\u9ed8\u8ba4\u503c\u662f None \u3002 batch_size(int) - \u6bcf\u4e2abatch\u7684\u56fe\u7247\u6570\u91cf\u3002\u9ed8\u8ba4\u503c\u4e3a16 \u3002 batch_nums(int, optional) - \u8fed\u4ee3\u6b21\u6570\u3002\u5982\u679c\u8bbe\u7f6e\u4e3a None \uff0c\u5219\u4f1a\u4e00\u76f4\u8fd0\u884c\u5230 sample_generator \u8fed\u4ee3\u7ed3\u675f\uff0c \u5426\u5219\uff0c\u8fed\u4ee3\u6b21\u6570\u4e3a batch_nums , \u4e5f\u5c31\u662f\u8bf4\u53c2\u4e0e\u5bf9 Scale \u8fdb\u884c\u6821\u6b63\u7684\u6837\u672c\u4e2a\u6570\u4e3a 'batch_nums' * 'batch_size' . scope(fluid.Scope, optional) - \u7528\u6765\u83b7\u53d6\u548c\u5199\u5165 Variable , \u5982\u679c\u8bbe\u7f6e\u4e3a None ,\u5219\u4f7f\u7528 fluid.global_scope() . \u9ed8\u8ba4\u503c\u662f None . algo(str) - \u91cf\u5316\u65f6\u4f7f\u7528\u7684\u7b97\u6cd5\u540d\u79f0\uff0c\u53ef\u4e3a 'KL' \u6216\u8005 'direct' \u3002\u8be5\u53c2\u6570\u4ec5\u9488\u5bf9\u6fc0\u6d3b\u503c\u7684\u91cf\u5316\uff0c\u56e0\u4e3a\u53c2\u6570\u503c\u7684\u91cf\u5316\u4f7f\u7528\u7684\u65b9\u5f0f\u4e3a 'channel_wise_abs_max' . \u5f53 algo \u8bbe\u7f6e\u4e3a 'direct' \u65f6\uff0c\u4f7f\u7528\u6821\u6b63\u6570\u636e\u7684\u6fc0\u6d3b\u503c\u7684\u7edd\u5bf9\u503c\u7684\u6700\u5927\u503c\u5f53\u4f5c Scale \u503c\uff0c\u5f53\u8bbe\u7f6e\u4e3a 'KL' \u65f6\uff0c\u5219\u4f7f\u7528 KL \u6563\u5ea6\u7684\u65b9\u6cd5\u6765\u8ba1\u7b97 Scale \u503c\u3002\u9ed8\u8ba4\u503c\u4e3a 'KL' \u3002 quantizable_op_type(list[str]) - \u9700\u8981\u91cf\u5316\u7684 op \u7c7b\u578b\u5217\u8868\u3002\u9ed8\u8ba4\u503c\u4e3a [\"conv2d\", \"depthwise_conv2d\", \"mul\"] \u3002 \u8fd4\u56de \u65e0\u3002 \u6ce8\u610f\u4e8b\u9879 \u56e0\u4e3a\u8be5\u63a5\u53e3\u4f1a\u6536\u96c6\u6821\u6b63\u6570\u636e\u7684\u6240\u6709\u7684\u6fc0\u6d3b\u503c\uff0c\u6240\u4ee5\u4f7f\u7528\u7684\u6821\u6b63\u56fe\u7247\u4e0d\u80fd\u592a\u591a\u3002 'KL' \u6563\u5ea6\u7684\u8ba1\u7b97\u4e5f\u6bd4\u8f83\u8017\u65f6\u3002 \u4ee3\u7801\u793a\u4f8b \u6ce8\uff1a \u6b64\u793a\u4f8b\u4e0d\u80fd\u76f4\u63a5\u8fd0\u884c\uff0c\u56e0\u4e3a\u9700\u8981\u52a0\u8f7d ${model_dir} \u4e0b\u7684\u6a21\u578b\uff0c\u6240\u4ee5\u4e0d\u80fd\u76f4\u63a5\u8fd0\u884c\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 import paddle.fluid as fluid import paddle.dataset.mnist as reader from paddleslim.quant import quant_post val_reader = reader . train () use_gpu = True place = fluid . CUDAPlace ( 0 ) if use_gpu else fluid . CPUPlace () exe = fluid . Executor ( place ) quant_post ( executor = exe , model_dir = './model_path' , quantize_model_path = './save_path' , sample_generator = val_reader , model_filename = '__model__' , params_filename = '__params__' , batch_size = 16 , batch_nums = 10 ) \u66f4\u8be6\u7ec6\u7684\u7528\u6cd5\u8bf7\u53c2\u8003 \u79bb\u7ebf\u91cf\u5316demo \u3002 quant_embedding # paddleslim.quant.quant_embedding(program, place, config, scope=None) [\u6e90\u4ee3\u7801] \u5bf9 Embedding \u53c2\u6570\u8fdb\u884c\u91cf\u5316\u3002 \u53c2\u6570: program(fluid.Program) - \u9700\u8981\u91cf\u5316\u7684program scope(fluid.Scope, optional) - \u7528\u6765\u83b7\u53d6\u548c\u5199\u5165 Variable , \u5982\u679c\u8bbe\u7f6e\u4e3a None ,\u5219\u4f7f\u7528 fluid.global_scope() . place(fluid.CPUPlace | fluid.CUDAPlace) - \u8fd0\u884cprogram\u7684\u8bbe\u5907 config(dict) - \u5b9a\u4e49\u91cf\u5316\u7684\u914d\u7f6e\u3002\u53ef\u4ee5\u914d\u7f6e\u7684\u53c2\u6570\u6709\uff1a 'params_name' (str, required): \u9700\u8981\u8fdb\u884c\u91cf\u5316\u7684\u53c2\u6570\u540d\u79f0\uff0c\u6b64\u53c2\u6570\u5fc5\u987b\u8bbe\u7f6e\u3002 'quantize_type' (str, optional): \u91cf\u5316\u7684\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301\u7684\u7c7b\u578b\u662f 'abs_max' , \u5f85\u652f\u6301\u7684\u7c7b\u578b\u6709 'log', 'product_quantization' \u3002 \u9ed8\u8ba4\u503c\u662f 'abs_max' . 'quantize_bits' \uff08int, optional): \u91cf\u5316\u7684 bit \u6570\uff0c\u76ee\u524d\u652f\u6301\u7684 bit \u6570\u4e3a8\u3002\u9ed8\u8ba4\u503c\u662f8. 'dtype' (str, optional): \u91cf\u5316\u4e4b\u540e\u7684\u6570\u636e\u7c7b\u578b\uff0c \u76ee\u524d\u652f\u6301\u7684\u662f 'int8' . \u9ed8\u8ba4\u503c\u662f int8 \u3002 'threshold' (float, optional): \u91cf\u5316\u4e4b\u524d\u5c06\u6839\u636e\u6b64\u9608\u503c\u5bf9\u9700\u8981\u91cf\u5316\u7684\u53c2\u6570\u503c\u8fdb\u884c clip . \u5982\u679c\u4e0d\u8bbe\u7f6e\uff0c\u5219\u8df3\u8fc7 clip \u8fc7\u7a0b\u76f4\u63a5\u91cf\u5316\u3002 \u8fd4\u56de \u91cf\u5316\u4e4b\u540e\u7684program \u8fd4\u56de\u7c7b\u578b fluid.Program \u4ee3\u7801\u793a\u4f8b 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 import paddle.fluid as fluid import paddleslim.quant as quant train_program = fluid . Program () with fluid . program_guard ( train_program ): input_word = fluid . data ( name = \"input_word\" , shape = [ None , 1 ], dtype = 'int64' ) input_emb = fluid . embedding ( input = input_word , is_sparse = False , size = [ 100 , 128 ], param_attr = fluid . ParamAttr ( name = 'emb' , initializer = fluid . initializer . Uniform ( - 0.005 , 0.005 ))) infer_program = train_program . clone ( for_test = True ) use_gpu = True place = fluid . CUDAPlace ( 0 ) if use_gpu else fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( fluid . default_startup_program ()) config = { 'params_name' : 'emb' , 'quantize_type' : 'abs_max' } quant_program = quant . quant_embedding ( infer_program , place , config ) \u66f4\u8be6\u7ec6\u7684\u7528\u6cd5\u8bf7\u53c2\u8003 Embedding\u91cf\u5316demo \u3002","title":"\u91cf\u5316"},{"location":"api/quantization_api/#_1","text":"\u901a\u8fc7\u5b57\u5178\u914d\u7f6e\u91cf\u5316\u53c2\u6570 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 quant_config_default = { ' weight_quantize_type ' : ' abs_max ' , ' activation_quantize_type ' : ' abs_max ' , ' weight_bits ' : 8 , ' activation_bits ' : 8 , # ops of name_scope in not_quant_pattern list , will not be quantized ' not_quant_pattern ' : [ ' skip_quant ' ], # ops of type in quantize_op_types , will be quantized ' quantize_op_types ' : [ ' conv2d ' , ' depthwise_conv2d ' , ' mul ' , ' elementwise_add ' , ' pool2d ' ], # data type after quantization , such as ' uint8 ' , ' int8 ' , etc . default is ' int8 ' ' dtype ' : ' int8 ' , # window size for ' range_abs_max ' quantization . defaulf is 10000 ' window_size ' : 10000 , # The decay coefficient of moving average , default is 0 . 9 ' moving_rate ' : 0 . 9 , } \u53c2\u6570\uff1a weight_quantize_type(str) - \u53c2\u6570\u91cf\u5316\u65b9\u5f0f\u3002\u53ef\u9009 'abs_max' , 'channel_wise_abs_max' , 'range_abs_max' , 'moving_average_abs_max' \u3002 \u9ed8\u8ba4 'abs_max' \u3002 activation_quantize_type(str) - \u6fc0\u6d3b\u91cf\u5316\u65b9\u5f0f\uff0c\u53ef\u9009 'abs_max' , 'range_abs_max' , 'moving_average_abs_max' \uff0c\u9ed8\u8ba4 'abs_max' \u3002 weight_bits(int) - \u53c2\u6570\u91cf\u5316bit\u6570\uff0c\u9ed8\u8ba48, \u63a8\u8350\u8bbe\u4e3a8\u3002 activation_bits(int) - \u6fc0\u6d3b\u91cf\u5316bit\u6570\uff0c\u9ed8\u8ba48\uff0c \u63a8\u8350\u8bbe\u4e3a8\u3002 not_quant_pattern(str | list[str]) - \u6240\u6709 name_scope \u5305\u542b 'not_quant_pattern' \u5b57\u7b26\u4e32\u7684 op \uff0c\u90fd\u4e0d\u91cf\u5316, \u8bbe\u7f6e\u65b9\u5f0f\u8bf7\u53c2\u8003 fluid.name_scope \u3002 quantize_op_types(list[str]) - \u9700\u8981\u8fdb\u884c\u91cf\u5316\u7684 op \u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 'conv2d', 'depthwise_conv2d', 'mul' \u3002 dtype(int8) - \u91cf\u5316\u540e\u7684\u53c2\u6570\u7c7b\u578b\uff0c\u9ed8\u8ba4 int8 , \u76ee\u524d\u4ec5\u652f\u6301 int8 \u3002 window_size(int) - 'range_abs_max' \u91cf\u5316\u65b9\u5f0f\u7684 window size \uff0c\u9ed8\u8ba410000\u3002 moving_rate(int) - 'moving_average_abs_max' \u91cf\u5316\u65b9\u5f0f\u7684\u8870\u51cf\u7cfb\u6570\uff0c\u9ed8\u8ba4 0.9\u3002","title":"\u91cf\u5316\u914d\u7f6e"},{"location":"api/quantization_api/#quant_aware","text":"paddleslim.quant.quant_aware(program, place, config, scope=None, for_test=False) [\u6e90\u4ee3\u7801] \u5728 program \u4e2d\u52a0\u5165\u91cf\u5316\u548c\u53cd\u91cf\u5316 op , \u7528\u4e8e\u91cf\u5316\u8bad\u7ec3\u3002 \u53c2\u6570\uff1a program (fluid.Program) - \u4f20\u5165\u8bad\u7ec3\u6216\u6d4b\u8bd5 program \u3002 place(fluid.CPUPlace | fluid.CUDAPlace) - \u8be5\u53c2\u6570\u8868\u793a Executor \u6267\u884c\u6240\u5728\u7684\u8bbe\u5907\u3002 config(dict) - \u91cf\u5316\u914d\u7f6e\u8868\u3002 scope(fluid.Scope, optional) - \u4f20\u5165\u7528\u4e8e\u5b58\u50a8 Variable \u7684 scope \uff0c\u9700\u8981\u4f20\u5165 program \u6240\u4f7f\u7528\u7684 scope \uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\uff0c\u662f fluid.global_scope() \u3002\u8bbe\u7f6e\u4e3a None \u65f6\u5c06\u4f7f\u7528 fluid.global_scope() \uff0c\u9ed8\u8ba4\u503c\u4e3a None \u3002 for_test(bool) - \u5982\u679c program \u53c2\u6570\u662f\u4e00\u4e2a\u6d4b\u8bd5 program \uff0c for_test \u5e94\u8bbe\u4e3a True \uff0c\u5426\u5219\u8bbe\u4e3a False \u3002 \u8fd4\u56de \u542b\u6709\u91cf\u5316\u548c\u53cd\u91cf\u5316 operator \u7684 program \u8fd4\u56de\u7c7b\u578b \u5f53 for_test=False \uff0c\u8fd4\u56de\u7c7b\u578b\u4e3a fluid.CompiledProgram \uff0c \u6ce8\u610f\uff0c\u6b64\u8fd4\u56de\u503c\u4e0d\u80fd\u7528\u4e8e\u4fdd\u5b58\u53c2\u6570 \u3002 \u5f53 for_test=True \uff0c\u8fd4\u56de\u7c7b\u578b\u4e3a fluid.Program \u3002 \u6ce8\u610f\u4e8b\u9879 \u6b64\u63a5\u53e3\u4f1a\u6539\u53d8 program \u7ed3\u6784\uff0c\u5e76\u4e14\u53ef\u80fd\u589e\u52a0\u4e00\u4e9b persistable \u7684\u53d8\u91cf\uff0c\u6240\u4ee5\u52a0\u8f7d\u6a21\u578b\u53c2\u6570\u65f6\u8bf7\u6ce8\u610f\u548c\u76f8\u5e94\u7684 program \u5bf9\u5e94\u3002 \u6b64\u63a5\u53e3\u5e95\u5c42\u7ecf\u5386\u4e86 fluid.Program -> fluid.framework.IrGraph -> fluid.Program \u7684\u8f6c\u53d8\uff0c\u5728 fluid.framework.IrGraph \u4e2d\u6ca1\u6709 Parameter \u7684\u6982\u5ff5\uff0c Variable \u53ea\u6709 persistable \u548c not persistable \u7684\u533a\u522b\uff0c\u6240\u4ee5\u5728\u4fdd\u5b58\u548c\u52a0\u8f7d\u53c2\u6570\u65f6\uff0c\u8bf7\u4f7f\u7528 fluid.io.save_persistables \u548c fluid.io.load_persistables \u63a5\u53e3\u3002 \u7531\u4e8e\u6b64\u63a5\u53e3\u4f1a\u6839\u636e program \u7684\u7ed3\u6784\u548c\u91cf\u5316\u914d\u7f6e\u6765\u5bf9 program \u6dfb\u52a0op\uff0c\u6240\u4ee5 Paddle \u4e2d\u4e00\u4e9b\u901a\u8fc7 fuse op \u6765\u52a0\u901f\u8bad\u7ec3\u7684\u7b56\u7565\u4e0d\u80fd\u4f7f\u7528\u3002\u5df2\u77e5\u4ee5\u4e0b\u7b56\u7565\u5728\u4f7f\u7528\u91cf\u5316\u65f6\u5fc5\u987b\u8bbe\u4e3a False \uff1a fuse_all_reduce_ops, sync_batch_norm \u3002 \u5982\u679c\u4f20\u5165\u7684 program \u4e2d\u5b58\u5728\u548c\u4efb\u4f55op\u90fd\u6ca1\u6709\u8fde\u63a5\u7684 Variable \uff0c\u5219\u4f1a\u5728\u91cf\u5316\u7684\u8fc7\u7a0b\u4e2d\u88ab\u4f18\u5316\u6389\u3002","title":"quant_aware"},{"location":"api/quantization_api/#convert","text":"paddleslim.quant.convert(program, place, config, scope=None, save_int8=False) [\u6e90\u4ee3\u7801] \u628a\u8bad\u7ec3\u597d\u7684\u91cf\u5316 program \uff0c\u8f6c\u6362\u4e3a\u53ef\u7528\u4e8e\u4fdd\u5b58 inference model \u7684 program \u3002 \u53c2\u6570\uff1a program (fluid.Program) - \u4f20\u5165\u6d4b\u8bd5 program \u3002 place(fluid.CPUPlace | fluid.CUDAPlace) - \u8be5\u53c2\u6570\u8868\u793a Executor \u6267\u884c\u6240\u5728\u7684\u8bbe\u5907\u3002 config(dict) - \u91cf\u5316\u914d\u7f6e\u8868\u3002 scope(fluid.Scope) - \u4f20\u5165\u7528\u4e8e\u5b58\u50a8 Variable \u7684 scope \uff0c\u9700\u8981\u4f20\u5165 program \u6240\u4f7f\u7528\u7684 scope \uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\uff0c\u662f fluid.global_scope() \u3002\u8bbe\u7f6e\u4e3a None \u65f6\u5c06\u4f7f\u7528 fluid.global_scope() \uff0c\u9ed8\u8ba4\u503c\u4e3a None \u3002 save_int8\uff08bool) - \u662f\u5426\u9700\u8981\u8fd4\u56de\u53c2\u6570\u4e3a int8 \u7684 program \u3002\u8be5\u529f\u80fd\u76ee\u524d\u53ea\u80fd\u7528\u4e8e\u786e\u8ba4\u6a21\u578b\u5927\u5c0f\u3002\u9ed8\u8ba4\u503c\u4e3a False \u3002 \u8fd4\u56de program (fluid.Program) - freezed program\uff0c\u53ef\u7528\u4e8e\u4fdd\u5b58inference model\uff0c\u53c2\u6570\u4e3a float32 \u7c7b\u578b\uff0c\u4f46\u5176\u6570\u503c\u8303\u56f4\u53ef\u7528int8\u8868\u793a\u3002 int8_program (fluid.Program) - freezed program\uff0c\u53ef\u7528\u4e8e\u4fdd\u5b58inference model\uff0c\u53c2\u6570\u4e3a int8 \u7c7b\u578b\u3002\u5f53 save_int8 \u4e3a False \u65f6\uff0c\u4e0d\u8fd4\u56de\u8be5\u503c\u3002 \u6ce8\u610f\u4e8b\u9879 \u56e0\u4e3a\u8be5\u63a5\u53e3\u4f1a\u5bf9 op \u548c Variable \u505a\u76f8\u5e94\u7684\u5220\u9664\u548c\u4fee\u6539\uff0c\u6240\u4ee5\u6b64\u63a5\u53e3\u53ea\u80fd\u5728\u8bad\u7ec3\u5b8c\u6210\u4e4b\u540e\u8c03\u7528\u3002\u5982\u679c\u60f3\u8f6c\u5316\u8bad\u7ec3\u7684\u4e2d\u95f4\u6a21\u578b\uff0c\u53ef\u52a0\u8f7d\u76f8\u5e94\u7684\u53c2\u6570\u4e4b\u540e\u518d\u4f7f\u7528\u6b64\u63a5\u53e3\u3002 \u4ee3\u7801\u793a\u4f8b 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 #encoding=utf8 import paddle.fluid as fluid import paddleslim.quant as quant train_program = fluid . Program () with fluid . program_guard ( train_program ): image = fluid . data ( name = 'x' , shape = [ None , 1 , 28 , 28 ]) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) conv = fluid . layers . conv2d ( image , 32 , 1 ) feat = fluid . layers . fc ( conv , 10 , act = 'softmax' ) cost = fluid . layers . cross_entropy ( input = feat , label = label ) avg_cost = fluid . layers . mean ( x = cost ) use_gpu = True place = fluid . CUDAPlace ( 0 ) if use_gpu else fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( fluid . default_startup_program ()) eval_program = train_program . clone ( for_test = True ) #\u914d\u7f6e config = { 'weight_quantize_type' : 'abs_max' , 'activation_quantize_type' : 'moving_average_abs_max' } build_strategy = fluid . BuildStrategy () exec_strategy = fluid . ExecutionStrategy () #\u8c03\u7528api quant_train_program = quant . quant_aware ( train_program , place , config , for_test = False ) quant_eval_program = quant . quant_aware ( eval_program , place , config , for_test = True ) #\u5173\u95ed\u7b56\u7565 build_strategy . fuse_all_reduce_ops = False build_strategy . sync_batch_norm = False quant_train_program = quant_train_program . with_data_parallel ( loss_name = avg_cost . name , build_strategy = build_strategy , exec_strategy = exec_strategy ) inference_prog = quant . convert ( quant_eval_program , place , config ) \u66f4\u8be6\u7ec6\u7684\u7528\u6cd5\u8bf7\u53c2\u8003 \u91cf\u5316\u8bad\u7ec3demo \u3002","title":"convert"},{"location":"api/quantization_api/#quant_post","text":"paddleslim.quant.quant_post(executor, model_dir, quantize_model_path,sample_generator, model_filename=None, params_filename=None, batch_size=16,batch_nums=None, scope=None, algo='KL', quantizable_op_type=[\"conv2d\", \"depthwise_conv2d\", \"mul\"]) [\u6e90\u4ee3\u7801] \u5bf9\u4fdd\u5b58\u5728 ${model_dir} \u4e0b\u7684\u6a21\u578b\u8fdb\u884c\u91cf\u5316\uff0c\u4f7f\u7528 sample_generator \u7684\u6570\u636e\u8fdb\u884c\u53c2\u6570\u6821\u6b63\u3002 \u53c2\u6570: executor (fluid.Executor) - \u6267\u884c\u6a21\u578b\u7684executor\uff0c\u53ef\u4ee5\u5728cpu\u6216\u8005gpu\u4e0a\u6267\u884c\u3002 model_dir\uff08str) - \u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u6240\u5728\u7684\u6587\u4ef6\u5939\u3002 quantize_model_path(str) - \u4fdd\u5b58\u91cf\u5316\u540e\u7684\u6a21\u578b\u7684\u8def\u5f84 sample_generator(python generator) - \u8bfb\u53d6\u6570\u636e\u6837\u672c\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u6837\u672c\u3002 model_filename(str, optional) - \u6a21\u578b\u6587\u4ef6\u540d\uff0c\u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u9700\u8981\u8bbe\u7f6e model_filename \u4e3a\u6a21\u578b\u6587\u4ef6\u7684\u540d\u79f0\uff0c\u5426\u5219\u8bbe\u7f6e\u4e3a None \u5373\u53ef\u3002\u9ed8\u8ba4\u503c\u662f None \u3002 params_filename(str) - \u53c2\u6570\u6587\u4ef6\u540d\uff0c\u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u9700\u8981\u8bbe\u7f6e params_filename \u4e3a\u53c2\u6570\u6587\u4ef6\u7684\u540d\u79f0\uff0c\u5426\u5219\u8bbe\u7f6e\u4e3a None \u5373\u53ef\u3002\u9ed8\u8ba4\u503c\u662f None \u3002 batch_size(int) - \u6bcf\u4e2abatch\u7684\u56fe\u7247\u6570\u91cf\u3002\u9ed8\u8ba4\u503c\u4e3a16 \u3002 batch_nums(int, optional) - \u8fed\u4ee3\u6b21\u6570\u3002\u5982\u679c\u8bbe\u7f6e\u4e3a None \uff0c\u5219\u4f1a\u4e00\u76f4\u8fd0\u884c\u5230 sample_generator \u8fed\u4ee3\u7ed3\u675f\uff0c \u5426\u5219\uff0c\u8fed\u4ee3\u6b21\u6570\u4e3a batch_nums , \u4e5f\u5c31\u662f\u8bf4\u53c2\u4e0e\u5bf9 Scale \u8fdb\u884c\u6821\u6b63\u7684\u6837\u672c\u4e2a\u6570\u4e3a 'batch_nums' * 'batch_size' . scope(fluid.Scope, optional) - \u7528\u6765\u83b7\u53d6\u548c\u5199\u5165 Variable , \u5982\u679c\u8bbe\u7f6e\u4e3a None ,\u5219\u4f7f\u7528 fluid.global_scope() . \u9ed8\u8ba4\u503c\u662f None . algo(str) - \u91cf\u5316\u65f6\u4f7f\u7528\u7684\u7b97\u6cd5\u540d\u79f0\uff0c\u53ef\u4e3a 'KL' \u6216\u8005 'direct' \u3002\u8be5\u53c2\u6570\u4ec5\u9488\u5bf9\u6fc0\u6d3b\u503c\u7684\u91cf\u5316\uff0c\u56e0\u4e3a\u53c2\u6570\u503c\u7684\u91cf\u5316\u4f7f\u7528\u7684\u65b9\u5f0f\u4e3a 'channel_wise_abs_max' . \u5f53 algo \u8bbe\u7f6e\u4e3a 'direct' \u65f6\uff0c\u4f7f\u7528\u6821\u6b63\u6570\u636e\u7684\u6fc0\u6d3b\u503c\u7684\u7edd\u5bf9\u503c\u7684\u6700\u5927\u503c\u5f53\u4f5c Scale \u503c\uff0c\u5f53\u8bbe\u7f6e\u4e3a 'KL' \u65f6\uff0c\u5219\u4f7f\u7528 KL \u6563\u5ea6\u7684\u65b9\u6cd5\u6765\u8ba1\u7b97 Scale \u503c\u3002\u9ed8\u8ba4\u503c\u4e3a 'KL' \u3002 quantizable_op_type(list[str]) - \u9700\u8981\u91cf\u5316\u7684 op \u7c7b\u578b\u5217\u8868\u3002\u9ed8\u8ba4\u503c\u4e3a [\"conv2d\", \"depthwise_conv2d\", \"mul\"] \u3002 \u8fd4\u56de \u65e0\u3002 \u6ce8\u610f\u4e8b\u9879 \u56e0\u4e3a\u8be5\u63a5\u53e3\u4f1a\u6536\u96c6\u6821\u6b63\u6570\u636e\u7684\u6240\u6709\u7684\u6fc0\u6d3b\u503c\uff0c\u6240\u4ee5\u4f7f\u7528\u7684\u6821\u6b63\u56fe\u7247\u4e0d\u80fd\u592a\u591a\u3002 'KL' \u6563\u5ea6\u7684\u8ba1\u7b97\u4e5f\u6bd4\u8f83\u8017\u65f6\u3002 \u4ee3\u7801\u793a\u4f8b \u6ce8\uff1a \u6b64\u793a\u4f8b\u4e0d\u80fd\u76f4\u63a5\u8fd0\u884c\uff0c\u56e0\u4e3a\u9700\u8981\u52a0\u8f7d ${model_dir} \u4e0b\u7684\u6a21\u578b\uff0c\u6240\u4ee5\u4e0d\u80fd\u76f4\u63a5\u8fd0\u884c\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 import paddle.fluid as fluid import paddle.dataset.mnist as reader from paddleslim.quant import quant_post val_reader = reader . train () use_gpu = True place = fluid . CUDAPlace ( 0 ) if use_gpu else fluid . CPUPlace () exe = fluid . Executor ( place ) quant_post ( executor = exe , model_dir = './model_path' , quantize_model_path = './save_path' , sample_generator = val_reader , model_filename = '__model__' , params_filename = '__params__' , batch_size = 16 , batch_nums = 10 ) \u66f4\u8be6\u7ec6\u7684\u7528\u6cd5\u8bf7\u53c2\u8003 \u79bb\u7ebf\u91cf\u5316demo \u3002","title":"quant_post"},{"location":"api/quantization_api/#quant_embedding","text":"paddleslim.quant.quant_embedding(program, place, config, scope=None) [\u6e90\u4ee3\u7801] \u5bf9 Embedding \u53c2\u6570\u8fdb\u884c\u91cf\u5316\u3002 \u53c2\u6570: program(fluid.Program) - \u9700\u8981\u91cf\u5316\u7684program scope(fluid.Scope, optional) - \u7528\u6765\u83b7\u53d6\u548c\u5199\u5165 Variable , \u5982\u679c\u8bbe\u7f6e\u4e3a None ,\u5219\u4f7f\u7528 fluid.global_scope() . place(fluid.CPUPlace | fluid.CUDAPlace) - \u8fd0\u884cprogram\u7684\u8bbe\u5907 config(dict) - \u5b9a\u4e49\u91cf\u5316\u7684\u914d\u7f6e\u3002\u53ef\u4ee5\u914d\u7f6e\u7684\u53c2\u6570\u6709\uff1a 'params_name' (str, required): \u9700\u8981\u8fdb\u884c\u91cf\u5316\u7684\u53c2\u6570\u540d\u79f0\uff0c\u6b64\u53c2\u6570\u5fc5\u987b\u8bbe\u7f6e\u3002 'quantize_type' (str, optional): \u91cf\u5316\u7684\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301\u7684\u7c7b\u578b\u662f 'abs_max' , \u5f85\u652f\u6301\u7684\u7c7b\u578b\u6709 'log', 'product_quantization' \u3002 \u9ed8\u8ba4\u503c\u662f 'abs_max' . 'quantize_bits' \uff08int, optional): \u91cf\u5316\u7684 bit \u6570\uff0c\u76ee\u524d\u652f\u6301\u7684 bit \u6570\u4e3a8\u3002\u9ed8\u8ba4\u503c\u662f8. 'dtype' (str, optional): \u91cf\u5316\u4e4b\u540e\u7684\u6570\u636e\u7c7b\u578b\uff0c \u76ee\u524d\u652f\u6301\u7684\u662f 'int8' . \u9ed8\u8ba4\u503c\u662f int8 \u3002 'threshold' (float, optional): \u91cf\u5316\u4e4b\u524d\u5c06\u6839\u636e\u6b64\u9608\u503c\u5bf9\u9700\u8981\u91cf\u5316\u7684\u53c2\u6570\u503c\u8fdb\u884c clip . \u5982\u679c\u4e0d\u8bbe\u7f6e\uff0c\u5219\u8df3\u8fc7 clip \u8fc7\u7a0b\u76f4\u63a5\u91cf\u5316\u3002 \u8fd4\u56de \u91cf\u5316\u4e4b\u540e\u7684program \u8fd4\u56de\u7c7b\u578b fluid.Program \u4ee3\u7801\u793a\u4f8b 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 import paddle.fluid as fluid import paddleslim.quant as quant train_program = fluid . Program () with fluid . program_guard ( train_program ): input_word = fluid . data ( name = \"input_word\" , shape = [ None , 1 ], dtype = 'int64' ) input_emb = fluid . embedding ( input = input_word , is_sparse = False , size = [ 100 , 128 ], param_attr = fluid . ParamAttr ( name = 'emb' , initializer = fluid . initializer . Uniform ( - 0.005 , 0.005 ))) infer_program = train_program . clone ( for_test = True ) use_gpu = True place = fluid . CUDAPlace ( 0 ) if use_gpu else fluid . CPUPlace () exe = fluid . Executor ( place ) exe . run ( fluid . default_startup_program ()) config = { 'params_name' : 'emb' , 'quantize_type' : 'abs_max' } quant_program = quant . quant_embedding ( infer_program , place , config ) \u66f4\u8be6\u7ec6\u7684\u7528\u6cd5\u8bf7\u53c2\u8003 Embedding\u91cf\u5316demo \u3002","title":"quant_embedding"},{"location":"api/search_space/","text":"paddleslim.nas \u63d0\u4f9b\u7684\u641c\u7d22\u7a7a\u95f4\uff1a # \u6839\u636e\u539f\u672c\u6a21\u578b\u7ed3\u6784\u6784\u9020\u641c\u7d22\u7a7a\u95f4\uff1a 1.1 MobileNetV2Space 1.2 MobileNetV1Space 1.3 ResNetSpace \u6839\u636e\u76f8\u5e94\u6a21\u578b\u7684block\u6784\u9020\u641c\u7d22\u7a7a\u95f4 2.1 MobileNetV1BlockSpace 2.2 MobileNetV2BlockSpace 2.3 ResNetBlockSpace 2.4 InceptionABlockSpace 2.5 InceptionCBlockSpace \u641c\u7d22\u7a7a\u95f4\u7684\u914d\u7f6e\u4ecb\u7ecd\uff1a # input_size(int|None) \uff1a input_size \u8868\u793a\u8f93\u5165feature map\u7684\u5927\u5c0f\u3002 output_size(int|None) \uff1a output_size \u8868\u793a\u8f93\u51fafeature map\u7684\u5927\u5c0f\u3002 block_num(int|None) \uff1a block_num \u8868\u793a\u641c\u7d22\u7a7a\u95f4\u4e2dblock\u7684\u6570\u91cf\u3002 block_mask(list|None) \uff1a block_mask \u8868\u793a\u5f53\u524d\u7684block\u662f\u4e00\u4e2areduction block\u8fd8\u662f\u4e00\u4e2anormal block\uff0c\u662f\u4e00\u7ec4\u75310\u30011\u7ec4\u6210\u7684\u5217\u8868\uff0c0\u8868\u793a\u5f53\u524dblock\u662fnormal block\uff0c1\u8868\u793a\u5f53\u524dblock\u662freduction block\u3002\u5982\u679c\u8bbe\u7f6e\u4e86 block_mask \uff0c\u5219\u4e3b\u8981\u4ee5 block_mask \u4e3a\u4e3b\u8981\u914d\u7f6e\uff0c input_size \uff0c output_size \u548c block_num \u4e09\u79cd\u914d\u7f6e\u662f\u65e0\u6548\u7684\u3002 Note: 1. reduction block\u8868\u793a\u7ecf\u8fc7\u8fd9\u4e2ablock\u4e4b\u540e\u7684feature map\u5927\u5c0f\u4e0b\u964d\u4e3a\u4e4b\u524d\u7684\u4e00\u534a\uff0cnormal block\u8868\u793a\u7ecf\u8fc7\u8fd9\u4e2ablock\u4e4b\u540efeature map\u5927\u5c0f\u4e0d\u53d8\u3002 2. input_size \u548c output_size \u7528\u6765\u8ba1\u7b97\u6574\u4e2a\u6a21\u578b\u7ed3\u6784\u4e2dreduction block\u6570\u91cf\u3002 \u641c\u7d22\u7a7a\u95f4\u793a\u4f8b\uff1a # \u4f7f\u7528paddleslim\u4e2d\u63d0\u4f9b\u7528\u539f\u672c\u7684\u6a21\u578b\u7ed3\u6784\u6765\u6784\u9020\u641c\u7d22\u7a7a\u95f4\u7684\u8bdd\uff0c\u4ec5\u9700\u8981\u6307\u5b9a\u641c\u7d22\u7a7a\u95f4\u540d\u5b57\u5373\u53ef\u3002\u4f8b\u5982\uff1a\u5982\u679c\u4f7f\u7528\u539f\u672c\u7684MobileNetV2\u7684\u641c\u7d22\u7a7a\u95f4\u8fdb\u884c\u641c\u7d22\u7684\u8bdd\uff0c\u4f20\u5165SANAS\u4e2d\u7684config\u76f4\u63a5\u6307\u5b9a\u4e3a[('MobileNetV2Space')]\u3002 \u4f7f\u7528paddleslim\u4e2d\u63d0\u4f9b\u7684block\u641c\u7d22\u7a7a\u95f4\u6784\u9020\u641c\u7d22\u7a7a\u95f4\uff1a 2.1 \u4f7f\u7528 input_size , output_size \u548c block_num \u6765\u6784\u9020\u641c\u7d22\u7a7a\u95f4\u3002\u4f8b\u5982\uff1a\u4f20\u5165SANAS\u7684config\u53ef\u4ee5\u6307\u5b9a\u4e3a[('MobileNetV2BlockSpace', {'input_size': 224, 'output_size': 32, 'block_num': 10})]\u3002 2.2 \u4f7f\u7528 block_mask \u6784\u9020\u641c\u7d22\u7a7a\u95f4\u3002\u4f8b\u5982\uff1a\u4f20\u5165SANAS\u7684config\u53ef\u4ee5\u6307\u5b9a\u4e3a[('MobileNetV2BlockSpace', {'block_mask': [0, 1, 1, 1, 1, 0, 1, 0]})]\u3002 \u81ea\u5b9a\u4e49\u641c\u7d22\u7a7a\u95f4(search space) # \u81ea\u5b9a\u4e49\u641c\u7d22\u7a7a\u95f4\u7c7b\u9700\u8981\u7ee7\u627f\u641c\u7d22\u7a7a\u95f4\u57fa\u7c7b\u5e76\u91cd\u5199\u4ee5\u4e0b\u51e0\u90e8\u5206\uff1a 1. \u521d\u59cb\u5316\u7684tokens( init_tokens \u51fd\u6570)\uff0c\u53ef\u4ee5\u8bbe\u7f6e\u4e3a\u81ea\u5df1\u60f3\u8981\u7684tokens\u5217\u8868, tokens\u5217\u8868\u4e2d\u7684\u6bcf\u4e2a\u6570\u5b57\u6307\u7684\u662f\u5f53\u524d\u6570\u5b57\u5728\u76f8\u5e94\u7684\u641c\u7d22\u5217\u8868\u4e2d\u7684\u7d22\u5f15\u3002\u4f8b\u5982\u672c\u793a\u4f8b\u4e2d\u82e5tokens=[0, 3, 5]\uff0c\u5219\u4ee3\u8868\u5f53\u524d\u6a21\u578b\u7ed3\u6784\u641c\u7d22\u5230\u7684\u901a\u9053\u6570\u4e3a[8, 40, 128]\u3002 2. token\u4e2d\u6bcf\u4e2a\u6570\u5b57\u7684\u641c\u7d22\u5217\u8868\u957f\u5ea6( range_table \u51fd\u6570)\uff0ctokens\u4e2d\u6bcf\u4e2atoken\u7684\u7d22\u5f15\u8303\u56f4\u3002 3. \u6839\u636etoken\u4ea7\u751f\u6a21\u578b\u7ed3\u6784( token2arch \u51fd\u6570)\uff0c\u6839\u636e\u641c\u7d22\u5230\u7684tokens\u5217\u8868\u4ea7\u751f\u6a21\u578b\u7ed3\u6784\u3002 \u4ee5\u65b0\u589ereset block\u4e3a\u4f8b\u8bf4\u660e\u5982\u4f55\u6784\u9020\u81ea\u5df1\u7684search space\u3002\u81ea\u5b9a\u4e49\u7684search space\u4e0d\u80fd\u548c\u5df2\u6709\u7684search space\u540c\u540d\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 ### \u5f15\u5165\u641c\u7d22\u7a7a\u95f4\u57fa\u7c7b\u51fd\u6570\u548csearch space\u7684\u6ce8\u518c\u7c7b\u51fd\u6570 from .search_space_base import SearchSpaceBase from .search_space_registry import SEARCHSPACE import numpy as np ### \u9700\u8981\u8c03\u7528\u6ce8\u518c\u51fd\u6570\u628a\u81ea\u5b9a\u4e49\u641c\u7d22\u7a7a\u95f4\u6ce8\u518c\u5230space space\u4e2d @SEARCHSPACE.register ### \u5b9a\u4e49\u4e00\u4e2a\u7ee7\u627fSearchSpaceBase\u57fa\u7c7b\u7684\u641c\u7d22\u7a7a\u95f4\u7684\u7c7b\u51fd\u6570 class ResNetBlockSpace2 ( SearchSpaceBase ): def __init__ ( self , input_size , output_size , block_num , block_mask ): ### \u5b9a\u4e49\u4e00\u4e9b\u5b9e\u9645\u60f3\u8981\u641c\u7d22\u7684\u5185\u5bb9\uff0c\u4f8b\u5982\uff1a\u901a\u9053\u6570\u3001\u6bcf\u4e2a\u5377\u79ef\u7684\u91cd\u590d\u6b21\u6570\u3001\u5377\u79ef\u6838\u5927\u5c0f\u7b49\u7b49 ### self.filter_num \u4ee3\u8868\u901a\u9053\u6570\u7684\u641c\u7d22\u5217\u8868 self . filter_num = np . array ([ 8 , 16 , 32 , 40 , 64 , 128 , 256 , 512 ]) ### \u5b9a\u4e49\u521d\u59cb\u5316token\uff0c\u521d\u59cb\u5316token\u7684\u957f\u5ea6\u6839\u636e\u4f20\u5165\u7684block_num\u6216\u8005block_mask\u7684\u957f\u5ea6\u6765\u5f97\u5230\u7684 def init_tokens ( self ): return [ 0 ] * 3 * len ( self . block_mask ) ### \u5b9a\u4e49 def range_table ( self ): return [ len ( self . filter_num )] * 3 * len ( self . block_mask ) def token2arch ( self , tokens = None ): if tokens == None : tokens = self . init_tokens () self . bottleneck_params_list = [] for i in range ( len ( self . block_mask )): self . bottleneck_params_list . append ( self . filter_num [ tokens [ i * 3 + 0 ]], self . filter_num [ tokens [ i * 3 + 1 ]], self . filter_num [ tokens [ i * 3 + 2 ]], 2 if self . block_mask [ i ] == 1 else 1 ) def net_arch ( input ): for i , layer_setting in enumerate ( self . bottleneck_params_list ): channel_num , stride = layer_setting [: - 1 ], layer_setting [ - 1 ] input = self . _resnet_block ( input , channel_num , stride , name = 'resnet_layer{}' . format ( i + 1 )) return input return net_arch ### \u6784\u9020\u5177\u4f53block\u7684\u64cd\u4f5c def _resnet_block ( self , input , channel_num , stride , name = None ): shortcut_conv = self . _shortcut ( input , channel_num [ 2 ], stride , name = name ) input = self . _conv_bn_layer ( input = input , num_filters = channel_num [ 0 ], filter_size = 1 , act = 'relu' , name = name + '_conv0' ) input = self . _conv_bn_layer ( input = input , num_filters = channel_num [ 1 ], filter_size = 3 , stride = stride , act = 'relu' , name = name + '_conv1' ) input = self . _conv_bn_layer ( input = input , num_filters = channel_num [ 2 ], filter_size = 1 , name = name + '_conv2' ) return fluid . layers . elementwise_add ( x = shortcut_conv , y = input , axis = 0 , name = name + '_elementwise_add' ) def _shortcut ( self , input , channel_num , stride , name = None ): channel_in = input . shape [ 1 ] if channel_in != channel_num or stride != 1 : return self . conv_bn_layer ( input , num_filters = channel_num , filter_size = 1 , stride = stride , name = name + '_shortcut' ) else : return input def _conv_bn_layer ( self , input , num_filters , filter_size , stride = 1 , padding = 'SAME' , act = None , name = None ): conv = fluid . layers . conv2d ( input , num_filters , filter_size , stride , name = name + '_conv' ) bn = fluid . layers . batch_norm ( conv , act = act , name = name + '_bn' ) return bn","title":"\u641c\u7d22\u7a7a\u95f4"},{"location":"api/search_space/#paddleslimnas","text":"\u6839\u636e\u539f\u672c\u6a21\u578b\u7ed3\u6784\u6784\u9020\u641c\u7d22\u7a7a\u95f4\uff1a 1.1 MobileNetV2Space 1.2 MobileNetV1Space 1.3 ResNetSpace \u6839\u636e\u76f8\u5e94\u6a21\u578b\u7684block\u6784\u9020\u641c\u7d22\u7a7a\u95f4 2.1 MobileNetV1BlockSpace 2.2 MobileNetV2BlockSpace 2.3 ResNetBlockSpace 2.4 InceptionABlockSpace 2.5 InceptionCBlockSpace","title":"paddleslim.nas \u63d0\u4f9b\u7684\u641c\u7d22\u7a7a\u95f4\uff1a"},{"location":"api/search_space/#_1","text":"input_size(int|None) \uff1a input_size \u8868\u793a\u8f93\u5165feature map\u7684\u5927\u5c0f\u3002 output_size(int|None) \uff1a output_size \u8868\u793a\u8f93\u51fafeature map\u7684\u5927\u5c0f\u3002 block_num(int|None) \uff1a block_num \u8868\u793a\u641c\u7d22\u7a7a\u95f4\u4e2dblock\u7684\u6570\u91cf\u3002 block_mask(list|None) \uff1a block_mask \u8868\u793a\u5f53\u524d\u7684block\u662f\u4e00\u4e2areduction block\u8fd8\u662f\u4e00\u4e2anormal block\uff0c\u662f\u4e00\u7ec4\u75310\u30011\u7ec4\u6210\u7684\u5217\u8868\uff0c0\u8868\u793a\u5f53\u524dblock\u662fnormal block\uff0c1\u8868\u793a\u5f53\u524dblock\u662freduction block\u3002\u5982\u679c\u8bbe\u7f6e\u4e86 block_mask \uff0c\u5219\u4e3b\u8981\u4ee5 block_mask \u4e3a\u4e3b\u8981\u914d\u7f6e\uff0c input_size \uff0c output_size \u548c block_num \u4e09\u79cd\u914d\u7f6e\u662f\u65e0\u6548\u7684\u3002 Note: 1. reduction block\u8868\u793a\u7ecf\u8fc7\u8fd9\u4e2ablock\u4e4b\u540e\u7684feature map\u5927\u5c0f\u4e0b\u964d\u4e3a\u4e4b\u524d\u7684\u4e00\u534a\uff0cnormal block\u8868\u793a\u7ecf\u8fc7\u8fd9\u4e2ablock\u4e4b\u540efeature map\u5927\u5c0f\u4e0d\u53d8\u3002 2. input_size \u548c output_size \u7528\u6765\u8ba1\u7b97\u6574\u4e2a\u6a21\u578b\u7ed3\u6784\u4e2dreduction block\u6570\u91cf\u3002","title":"\u641c\u7d22\u7a7a\u95f4\u7684\u914d\u7f6e\u4ecb\u7ecd\uff1a"},{"location":"api/search_space/#_2","text":"\u4f7f\u7528paddleslim\u4e2d\u63d0\u4f9b\u7528\u539f\u672c\u7684\u6a21\u578b\u7ed3\u6784\u6765\u6784\u9020\u641c\u7d22\u7a7a\u95f4\u7684\u8bdd\uff0c\u4ec5\u9700\u8981\u6307\u5b9a\u641c\u7d22\u7a7a\u95f4\u540d\u5b57\u5373\u53ef\u3002\u4f8b\u5982\uff1a\u5982\u679c\u4f7f\u7528\u539f\u672c\u7684MobileNetV2\u7684\u641c\u7d22\u7a7a\u95f4\u8fdb\u884c\u641c\u7d22\u7684\u8bdd\uff0c\u4f20\u5165SANAS\u4e2d\u7684config\u76f4\u63a5\u6307\u5b9a\u4e3a[('MobileNetV2Space')]\u3002 \u4f7f\u7528paddleslim\u4e2d\u63d0\u4f9b\u7684block\u641c\u7d22\u7a7a\u95f4\u6784\u9020\u641c\u7d22\u7a7a\u95f4\uff1a 2.1 \u4f7f\u7528 input_size , output_size \u548c block_num \u6765\u6784\u9020\u641c\u7d22\u7a7a\u95f4\u3002\u4f8b\u5982\uff1a\u4f20\u5165SANAS\u7684config\u53ef\u4ee5\u6307\u5b9a\u4e3a[('MobileNetV2BlockSpace', {'input_size': 224, 'output_size': 32, 'block_num': 10})]\u3002 2.2 \u4f7f\u7528 block_mask \u6784\u9020\u641c\u7d22\u7a7a\u95f4\u3002\u4f8b\u5982\uff1a\u4f20\u5165SANAS\u7684config\u53ef\u4ee5\u6307\u5b9a\u4e3a[('MobileNetV2BlockSpace', {'block_mask': [0, 1, 1, 1, 1, 0, 1, 0]})]\u3002","title":"\u641c\u7d22\u7a7a\u95f4\u793a\u4f8b\uff1a"},{"location":"api/search_space/#search-space","text":"\u81ea\u5b9a\u4e49\u641c\u7d22\u7a7a\u95f4\u7c7b\u9700\u8981\u7ee7\u627f\u641c\u7d22\u7a7a\u95f4\u57fa\u7c7b\u5e76\u91cd\u5199\u4ee5\u4e0b\u51e0\u90e8\u5206\uff1a 1. \u521d\u59cb\u5316\u7684tokens( init_tokens \u51fd\u6570)\uff0c\u53ef\u4ee5\u8bbe\u7f6e\u4e3a\u81ea\u5df1\u60f3\u8981\u7684tokens\u5217\u8868, tokens\u5217\u8868\u4e2d\u7684\u6bcf\u4e2a\u6570\u5b57\u6307\u7684\u662f\u5f53\u524d\u6570\u5b57\u5728\u76f8\u5e94\u7684\u641c\u7d22\u5217\u8868\u4e2d\u7684\u7d22\u5f15\u3002\u4f8b\u5982\u672c\u793a\u4f8b\u4e2d\u82e5tokens=[0, 3, 5]\uff0c\u5219\u4ee3\u8868\u5f53\u524d\u6a21\u578b\u7ed3\u6784\u641c\u7d22\u5230\u7684\u901a\u9053\u6570\u4e3a[8, 40, 128]\u3002 2. token\u4e2d\u6bcf\u4e2a\u6570\u5b57\u7684\u641c\u7d22\u5217\u8868\u957f\u5ea6( range_table \u51fd\u6570)\uff0ctokens\u4e2d\u6bcf\u4e2atoken\u7684\u7d22\u5f15\u8303\u56f4\u3002 3. \u6839\u636etoken\u4ea7\u751f\u6a21\u578b\u7ed3\u6784( token2arch \u51fd\u6570)\uff0c\u6839\u636e\u641c\u7d22\u5230\u7684tokens\u5217\u8868\u4ea7\u751f\u6a21\u578b\u7ed3\u6784\u3002 \u4ee5\u65b0\u589ereset block\u4e3a\u4f8b\u8bf4\u660e\u5982\u4f55\u6784\u9020\u81ea\u5df1\u7684search space\u3002\u81ea\u5b9a\u4e49\u7684search space\u4e0d\u80fd\u548c\u5df2\u6709\u7684search space\u540c\u540d\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 ### \u5f15\u5165\u641c\u7d22\u7a7a\u95f4\u57fa\u7c7b\u51fd\u6570\u548csearch space\u7684\u6ce8\u518c\u7c7b\u51fd\u6570 from .search_space_base import SearchSpaceBase from .search_space_registry import SEARCHSPACE import numpy as np ### \u9700\u8981\u8c03\u7528\u6ce8\u518c\u51fd\u6570\u628a\u81ea\u5b9a\u4e49\u641c\u7d22\u7a7a\u95f4\u6ce8\u518c\u5230space space\u4e2d @SEARCHSPACE.register ### \u5b9a\u4e49\u4e00\u4e2a\u7ee7\u627fSearchSpaceBase\u57fa\u7c7b\u7684\u641c\u7d22\u7a7a\u95f4\u7684\u7c7b\u51fd\u6570 class ResNetBlockSpace2 ( SearchSpaceBase ): def __init__ ( self , input_size , output_size , block_num , block_mask ): ### \u5b9a\u4e49\u4e00\u4e9b\u5b9e\u9645\u60f3\u8981\u641c\u7d22\u7684\u5185\u5bb9\uff0c\u4f8b\u5982\uff1a\u901a\u9053\u6570\u3001\u6bcf\u4e2a\u5377\u79ef\u7684\u91cd\u590d\u6b21\u6570\u3001\u5377\u79ef\u6838\u5927\u5c0f\u7b49\u7b49 ### self.filter_num \u4ee3\u8868\u901a\u9053\u6570\u7684\u641c\u7d22\u5217\u8868 self . filter_num = np . array ([ 8 , 16 , 32 , 40 , 64 , 128 , 256 , 512 ]) ### \u5b9a\u4e49\u521d\u59cb\u5316token\uff0c\u521d\u59cb\u5316token\u7684\u957f\u5ea6\u6839\u636e\u4f20\u5165\u7684block_num\u6216\u8005block_mask\u7684\u957f\u5ea6\u6765\u5f97\u5230\u7684 def init_tokens ( self ): return [ 0 ] * 3 * len ( self . block_mask ) ### \u5b9a\u4e49 def range_table ( self ): return [ len ( self . filter_num )] * 3 * len ( self . block_mask ) def token2arch ( self , tokens = None ): if tokens == None : tokens = self . init_tokens () self . bottleneck_params_list = [] for i in range ( len ( self . block_mask )): self . bottleneck_params_list . append ( self . filter_num [ tokens [ i * 3 + 0 ]], self . filter_num [ tokens [ i * 3 + 1 ]], self . filter_num [ tokens [ i * 3 + 2 ]], 2 if self . block_mask [ i ] == 1 else 1 ) def net_arch ( input ): for i , layer_setting in enumerate ( self . bottleneck_params_list ): channel_num , stride = layer_setting [: - 1 ], layer_setting [ - 1 ] input = self . _resnet_block ( input , channel_num , stride , name = 'resnet_layer{}' . format ( i + 1 )) return input return net_arch ### \u6784\u9020\u5177\u4f53block\u7684\u64cd\u4f5c def _resnet_block ( self , input , channel_num , stride , name = None ): shortcut_conv = self . _shortcut ( input , channel_num [ 2 ], stride , name = name ) input = self . _conv_bn_layer ( input = input , num_filters = channel_num [ 0 ], filter_size = 1 , act = 'relu' , name = name + '_conv0' ) input = self . _conv_bn_layer ( input = input , num_filters = channel_num [ 1 ], filter_size = 3 , stride = stride , act = 'relu' , name = name + '_conv1' ) input = self . _conv_bn_layer ( input = input , num_filters = channel_num [ 2 ], filter_size = 1 , name = name + '_conv2' ) return fluid . layers . elementwise_add ( x = shortcut_conv , y = input , axis = 0 , name = name + '_elementwise_add' ) def _shortcut ( self , input , channel_num , stride , name = None ): channel_in = input . shape [ 1 ] if channel_in != channel_num or stride != 1 : return self . conv_bn_layer ( input , num_filters = channel_num , filter_size = 1 , stride = stride , name = name + '_shortcut' ) else : return input def _conv_bn_layer ( self , input , num_filters , filter_size , stride = 1 , padding = 'SAME' , act = None , name = None ): conv = fluid . layers . conv2d ( input , num_filters , filter_size , stride , name = name + '_conv' ) bn = fluid . layers . batch_norm ( conv , act = act , name = name + '_bn' ) return bn","title":"\u81ea\u5b9a\u4e49\u641c\u7d22\u7a7a\u95f4(search space)"},{"location":"api/single_distiller_api/","text":"merge # paddleslim.dist.merge(teacher_program, student_program, data_name_map, place, scope=fluid.global_scope(), name_prefix='teacher_') [\u6e90\u4ee3\u7801] merge\u5c06\u4e24\u4e2apaddle program\uff08teacher_program, student_program\uff09\u878d\u5408\u4e3a\u4e00\u4e2aprogram\uff0c\u5e76\u5c06\u878d\u5408\u5f97\u5230\u7684program\u8fd4\u56de\u3002\u5728\u878d\u5408\u7684program\u4e2d\uff0c\u53ef\u4ee5\u4e3a\u5176\u4e2d\u5408\u9002\u7684teacher\u7279\u5f81\u56fe\u548cstudent\u7279\u5f81\u56fe\u6dfb\u52a0\u84b8\u998f\u635f\u5931\u51fd\u6570\uff0c\u4ece\u800c\u8fbe\u5230\u7528teacher\u6a21\u578b\u7684\u6697\u77e5\u8bc6\uff08Dark Knowledge\uff09\u6307\u5bfcstudent\u6a21\u578b\u5b66\u4e60\u7684\u76ee\u7684\u3002 \u53c2\u6570\uff1a teacher_program (Program)-\u5b9a\u4e49\u4e86teacher\u6a21\u578b\u7684 paddle program student_program (Program)-\u5b9a\u4e49\u4e86student\u6a21\u578b\u7684 paddle program data_name_map (dict)-teacher\u8f93\u5165\u63a5\u53e3\u540d\u4e0estudent\u8f93\u5165\u63a5\u53e3\u540d\u7684\u6620\u5c04\uff0c\u5176\u4e2ddict\u7684 key \u4e3ateacher\u7684\u8f93\u5165\u540d\uff0c value \u4e3astudent\u7684\u8f93\u5165\u540d place (fluid.CPUPlace()|fluid.CUDAPlace(N))-\u8be5\u53c2\u6570\u8868\u793a\u7a0b\u5e8f\u8fd0\u884c\u5728\u4f55\u79cd\u8bbe\u5907\u4e0a\uff0c\u8fd9\u91cc\u7684N\u4e3aGPU\u5bf9\u5e94\u7684ID scope (Scope)-\u8be5\u53c2\u6570\u8868\u793a\u7a0b\u5e8f\u4f7f\u7528\u7684\u53d8\u91cf\u4f5c\u7528\u57df\uff0c\u5982\u679c\u4e0d\u6307\u5b9a\u5c06\u4f7f\u7528\u9ed8\u8ba4\u7684\u5168\u5c40\u4f5c\u7528\u57df\u3002\u9ed8\u8ba4\u503c\uff1a fluid.global_scope() name_prefix (str)-merge\u64cd\u4f5c\u5c06\u7edf\u4e00\u4e3ateacher\u7684 Variables \u6dfb\u52a0\u7684\u540d\u79f0\u524d\u7f00name_prefix\u3002\u9ed8\u8ba4\u503c\uff1a'teacher_' \u8fd4\u56de\uff1a \u7531student_program\u548cteacher_program merge\u5f97\u5230\u7684program Note data_name_map \u662f teacher_var name\u5230student_var name\u7684\u6620\u5c04 \uff0c\u5982\u679c\u5199\u53cd\u53ef\u80fd\u65e0\u6cd5\u6b63\u786e\u8fdb\u884cmerge \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = dist . merge ( teacher_program , student_program , data_name_map , place ) fsp_loss # paddleslim.dist.fsp_loss(teacher_var1_name, teacher_var2_name, student_var1_name, student_var2_name, program=fluid.default_main_program()) [\u6e90\u4ee3\u7801] fsp_loss\u4e3aprogram\u5185\u7684teacher var\u548cstudent var\u6dfb\u52a0fsp loss\uff0c\u51fa\u81ea\u8bba\u6587 <> \u53c2\u6570\uff1a teacher_var1_name (str): teacher_var1\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, x_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64 teacher_var2_name (str): teacher_var2\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, y_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64\u3002\u53ea\u6709y_channel\u53ef\u4ee5\u4e0eteacher_var1\u7684x_channel\u4e0d\u540c\uff0c\u5176\u4ed6\u7ef4\u5ea6\u5fc5\u987b\u4e0eteacher_var1\u76f8\u540c student_var1_name (str): student_var1\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u9700\u4e0eteacher_var1\u5c3a\u5bf8\u4fdd\u6301\u4e00\u81f4\uff0c\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, x_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64 student_var2_name (str): student_var2\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u9700\u4e0eteacher_var2\u5c3a\u5bf8\u4fdd\u6301\u4e00\u81f4\uff0c\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, y_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64\u3002\u53ea\u6709y_channel\u53ef\u4ee5\u4e0estudent_var1\u7684x_channel\u4e0d\u540c\uff0c\u5176\u4ed6\u7ef4\u5ea6\u5fc5\u987b\u4e0estudent_var1\u76f8\u540c program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() \u8fd4\u56de\uff1a \u7531teacher_var1, teacher_var2, student_var1, student_var2\u7ec4\u5408\u5f97\u5230\u7684fsp_loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) with fluid . program_guard ( main_program ): distillation_loss = dist . fsp_loss ( 'teacher_t1.tmp_1' , 'teacher_t2.tmp_1' , 's1.tmp_1' , 's2.tmp_1' , main_program ) l2_loss # paddleslim.dist.l2_loss(teacher_var_name, student_var_name, program=fluid.default_main_program()) [\u6e90\u4ee3\u7801] l2_loss\u4e3aprogram\u5185\u7684teacher var\u548cstudent var\u6dfb\u52a0l2 loss \u53c2\u6570\uff1a teacher_var_name (str): teacher_var\u7684\u540d\u79f0. student_var_name (str): student_var\u7684\u540d\u79f0. program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() \u8fd4\u56de\uff1a \u7531teacher_var, student_var\u7ec4\u5408\u5f97\u5230\u7684l2_loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) with fluid . program_guard ( main_program ): distillation_loss = dist . l2_loss ( 'teacher_t2.tmp_1' , 's2.tmp_1' , main_program ) soft_label_loss # paddleslim.dist.soft_label_loss(teacher_var_name, student_var_name, program=fluid.default_main_program(), teacher_temperature=1., student_temperature=1.) [\u6e90\u4ee3\u7801] soft_label_loss\u4e3aprogram\u5185\u7684teacher var\u548cstudent var\u6dfb\u52a0soft label loss\uff0c\u51fa\u81ea\u8bba\u6587 <> \u53c2\u6570\uff1a teacher_var_name (str): teacher_var\u7684\u540d\u79f0. student_var_name (str): student_var\u7684\u540d\u79f0. program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() teacher_temperature (float): \u5bf9teacher_var\u8fdb\u884csoft\u64cd\u4f5c\u7684\u6e29\u5ea6\u503c\uff0c\u6e29\u5ea6\u503c\u8d8a\u5927\u5f97\u5230\u7684\u7279\u5f81\u56fe\u8d8a\u5e73\u6ed1 student_temperature (float): \u5bf9student_var\u8fdb\u884csoft\u64cd\u4f5c\u7684\u6e29\u5ea6\u503c\uff0c\u6e29\u5ea6\u503c\u8d8a\u5927\u5f97\u5230\u7684\u7279\u5f81\u56fe\u8d8a\u5e73\u6ed1 \u8fd4\u56de\uff1a \u7531teacher_var, student_var\u7ec4\u5408\u5f97\u5230\u7684soft_label_loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) with fluid . program_guard ( main_program ): distillation_loss = dist . soft_label_loss ( 'teacher_t2.tmp_1' , 's2.tmp_1' , main_program , 1. , 1. ) loss # paddleslim.dist.loss(loss_func, program=fluid.default_main_program(), **kwargs) [\u6e90\u4ee3\u7801] loss\u51fd\u6570\u652f\u6301\u5bf9\u4efb\u610f\u591a\u5bf9teacher_var\u548cstudent_var\u4f7f\u7528\u81ea\u5b9a\u4e49\u635f\u5931\u51fd\u6570 \u53c2\u6570\uff1a loss_func (python function): \u81ea\u5b9a\u4e49\u7684\u635f\u5931\u51fd\u6570\uff0c\u8f93\u5165\u4e3ateacher var\u548cstudent var\uff0c\u8f93\u51fa\u4e3a\u81ea\u5b9a\u4e49\u7684loss program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() **kwargs : loss_func\u8f93\u5165\u540d\u4e0e\u5bf9\u5e94variable\u540d\u79f0 \u8fd4\u56de \uff1a\u81ea\u5b9a\u4e49\u7684\u635f\u5931\u51fd\u6570loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) def adaptation_loss ( t_var , s_var ): teacher_channel = t_var . shape [ 1 ] s_hint = fluid . layers . conv2d ( s_var , teacher_channel , 1 ) hint_loss = fluid . layers . reduce_mean ( fluid . layers . square ( s_hint - t_var )) return hint_loss with fluid . program_guard ( main_program ): distillation_loss = dist . loss ( main_program , adaptation_loss , t_var = 'teacher_t2.tmp_1' , s_var = 's2.tmp_1' ) \u6ce8\u610f\u4e8b\u9879 \u5728\u6dfb\u52a0\u84b8\u998floss\u65f6\u4f1a\u5f15\u5165\u65b0\u7684variable\uff0c\u9700\u8981\u6ce8\u610f\u65b0\u5f15\u5165\u7684variable\u4e0d\u8981\u4e0estudent variables\u547d\u540d\u51b2\u7a81\u3002\u8fd9\u91cc\u5efa\u8bae\u4e24\u79cd\u7528\u6cd5\uff08\u4e24\u79cd\u65b9\u6cd5\u4efb\u9009\u5176\u4e00\u5373\u53ef\uff09\uff1a \u5efa\u8bae\u4e0estudent_program\u4f7f\u7528\u540c\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u4ee5\u907f\u514d\u4e00\u4e9b\u672a\u6307\u5b9a\u540d\u79f0\u7684variables(\u4f8b\u5982tmp_0, tmp_1...)\u591a\u6b21\u5b9a\u4e49\u4e3a\u540c\u4e00\u540d\u79f0\u51fa\u73b0\u547d\u540d\u51b2\u7a81 \u5efa\u8bae\u5728\u6dfb\u52a0\u84b8\u998floss\u65f6\u6307\u5b9a\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u524d\u7f00\uff0c\u5177\u4f53\u7528\u6cd5\u8bf7\u53c2\u8003Paddle\u5b98\u65b9\u6587\u6863 fluid.name_scope","title":"\u77e5\u8bc6\u84b8\u998f"},{"location":"api/single_distiller_api/#merge","text":"paddleslim.dist.merge(teacher_program, student_program, data_name_map, place, scope=fluid.global_scope(), name_prefix='teacher_') [\u6e90\u4ee3\u7801] merge\u5c06\u4e24\u4e2apaddle program\uff08teacher_program, student_program\uff09\u878d\u5408\u4e3a\u4e00\u4e2aprogram\uff0c\u5e76\u5c06\u878d\u5408\u5f97\u5230\u7684program\u8fd4\u56de\u3002\u5728\u878d\u5408\u7684program\u4e2d\uff0c\u53ef\u4ee5\u4e3a\u5176\u4e2d\u5408\u9002\u7684teacher\u7279\u5f81\u56fe\u548cstudent\u7279\u5f81\u56fe\u6dfb\u52a0\u84b8\u998f\u635f\u5931\u51fd\u6570\uff0c\u4ece\u800c\u8fbe\u5230\u7528teacher\u6a21\u578b\u7684\u6697\u77e5\u8bc6\uff08Dark Knowledge\uff09\u6307\u5bfcstudent\u6a21\u578b\u5b66\u4e60\u7684\u76ee\u7684\u3002 \u53c2\u6570\uff1a teacher_program (Program)-\u5b9a\u4e49\u4e86teacher\u6a21\u578b\u7684 paddle program student_program (Program)-\u5b9a\u4e49\u4e86student\u6a21\u578b\u7684 paddle program data_name_map (dict)-teacher\u8f93\u5165\u63a5\u53e3\u540d\u4e0estudent\u8f93\u5165\u63a5\u53e3\u540d\u7684\u6620\u5c04\uff0c\u5176\u4e2ddict\u7684 key \u4e3ateacher\u7684\u8f93\u5165\u540d\uff0c value \u4e3astudent\u7684\u8f93\u5165\u540d place (fluid.CPUPlace()|fluid.CUDAPlace(N))-\u8be5\u53c2\u6570\u8868\u793a\u7a0b\u5e8f\u8fd0\u884c\u5728\u4f55\u79cd\u8bbe\u5907\u4e0a\uff0c\u8fd9\u91cc\u7684N\u4e3aGPU\u5bf9\u5e94\u7684ID scope (Scope)-\u8be5\u53c2\u6570\u8868\u793a\u7a0b\u5e8f\u4f7f\u7528\u7684\u53d8\u91cf\u4f5c\u7528\u57df\uff0c\u5982\u679c\u4e0d\u6307\u5b9a\u5c06\u4f7f\u7528\u9ed8\u8ba4\u7684\u5168\u5c40\u4f5c\u7528\u57df\u3002\u9ed8\u8ba4\u503c\uff1a fluid.global_scope() name_prefix (str)-merge\u64cd\u4f5c\u5c06\u7edf\u4e00\u4e3ateacher\u7684 Variables \u6dfb\u52a0\u7684\u540d\u79f0\u524d\u7f00name_prefix\u3002\u9ed8\u8ba4\u503c\uff1a'teacher_' \u8fd4\u56de\uff1a \u7531student_program\u548cteacher_program merge\u5f97\u5230\u7684program Note data_name_map \u662f teacher_var name\u5230student_var name\u7684\u6620\u5c04 \uff0c\u5982\u679c\u5199\u53cd\u53ef\u80fd\u65e0\u6cd5\u6b63\u786e\u8fdb\u884cmerge \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = dist . merge ( teacher_program , student_program , data_name_map , place )","title":"merge"},{"location":"api/single_distiller_api/#fsp_loss","text":"paddleslim.dist.fsp_loss(teacher_var1_name, teacher_var2_name, student_var1_name, student_var2_name, program=fluid.default_main_program()) [\u6e90\u4ee3\u7801] fsp_loss\u4e3aprogram\u5185\u7684teacher var\u548cstudent var\u6dfb\u52a0fsp loss\uff0c\u51fa\u81ea\u8bba\u6587 <> \u53c2\u6570\uff1a teacher_var1_name (str): teacher_var1\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, x_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64 teacher_var2_name (str): teacher_var2\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, y_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64\u3002\u53ea\u6709y_channel\u53ef\u4ee5\u4e0eteacher_var1\u7684x_channel\u4e0d\u540c\uff0c\u5176\u4ed6\u7ef4\u5ea6\u5fc5\u987b\u4e0eteacher_var1\u76f8\u540c student_var1_name (str): student_var1\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u9700\u4e0eteacher_var1\u5c3a\u5bf8\u4fdd\u6301\u4e00\u81f4\uff0c\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, x_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64 student_var2_name (str): student_var2\u7684\u540d\u79f0. \u5bf9\u5e94\u7684variable\u9700\u4e0eteacher_var2\u5c3a\u5bf8\u4fdd\u6301\u4e00\u81f4\uff0c\u662f\u4e00\u4e2a\u5f62\u4e3a [batch_size, y_channel, height, width] \u76844-D\u7279\u5f81\u56feTensor\uff0c\u6570\u636e\u7c7b\u578b\u4e3afloat32\u6216float64\u3002\u53ea\u6709y_channel\u53ef\u4ee5\u4e0estudent_var1\u7684x_channel\u4e0d\u540c\uff0c\u5176\u4ed6\u7ef4\u5ea6\u5fc5\u987b\u4e0estudent_var1\u76f8\u540c program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() \u8fd4\u56de\uff1a \u7531teacher_var1, teacher_var2, student_var1, student_var2\u7ec4\u5408\u5f97\u5230\u7684fsp_loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) with fluid . program_guard ( main_program ): distillation_loss = dist . fsp_loss ( 'teacher_t1.tmp_1' , 'teacher_t2.tmp_1' , 's1.tmp_1' , 's2.tmp_1' , main_program )","title":"fsp_loss"},{"location":"api/single_distiller_api/#l2_loss","text":"paddleslim.dist.l2_loss(teacher_var_name, student_var_name, program=fluid.default_main_program()) [\u6e90\u4ee3\u7801] l2_loss\u4e3aprogram\u5185\u7684teacher var\u548cstudent var\u6dfb\u52a0l2 loss \u53c2\u6570\uff1a teacher_var_name (str): teacher_var\u7684\u540d\u79f0. student_var_name (str): student_var\u7684\u540d\u79f0. program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() \u8fd4\u56de\uff1a \u7531teacher_var, student_var\u7ec4\u5408\u5f97\u5230\u7684l2_loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) with fluid . program_guard ( main_program ): distillation_loss = dist . l2_loss ( 'teacher_t2.tmp_1' , 's2.tmp_1' , main_program )","title":"l2_loss"},{"location":"api/single_distiller_api/#soft_label_loss","text":"paddleslim.dist.soft_label_loss(teacher_var_name, student_var_name, program=fluid.default_main_program(), teacher_temperature=1., student_temperature=1.) [\u6e90\u4ee3\u7801] soft_label_loss\u4e3aprogram\u5185\u7684teacher var\u548cstudent var\u6dfb\u52a0soft label loss\uff0c\u51fa\u81ea\u8bba\u6587 <> \u53c2\u6570\uff1a teacher_var_name (str): teacher_var\u7684\u540d\u79f0. student_var_name (str): student_var\u7684\u540d\u79f0. program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() teacher_temperature (float): \u5bf9teacher_var\u8fdb\u884csoft\u64cd\u4f5c\u7684\u6e29\u5ea6\u503c\uff0c\u6e29\u5ea6\u503c\u8d8a\u5927\u5f97\u5230\u7684\u7279\u5f81\u56fe\u8d8a\u5e73\u6ed1 student_temperature (float): \u5bf9student_var\u8fdb\u884csoft\u64cd\u4f5c\u7684\u6e29\u5ea6\u503c\uff0c\u6e29\u5ea6\u503c\u8d8a\u5927\u5f97\u5230\u7684\u7279\u5f81\u56fe\u8d8a\u5e73\u6ed1 \u8fd4\u56de\uff1a \u7531teacher_var, student_var\u7ec4\u5408\u5f97\u5230\u7684soft_label_loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) with fluid . program_guard ( main_program ): distillation_loss = dist . soft_label_loss ( 'teacher_t2.tmp_1' , 's2.tmp_1' , main_program , 1. , 1. )","title":"soft_label_loss"},{"location":"api/single_distiller_api/#loss","text":"paddleslim.dist.loss(loss_func, program=fluid.default_main_program(), **kwargs) [\u6e90\u4ee3\u7801] loss\u51fd\u6570\u652f\u6301\u5bf9\u4efb\u610f\u591a\u5bf9teacher_var\u548cstudent_var\u4f7f\u7528\u81ea\u5b9a\u4e49\u635f\u5931\u51fd\u6570 \u53c2\u6570\uff1a loss_func (python function): \u81ea\u5b9a\u4e49\u7684\u635f\u5931\u51fd\u6570\uff0c\u8f93\u5165\u4e3ateacher var\u548cstudent var\uff0c\u8f93\u51fa\u4e3a\u81ea\u5b9a\u4e49\u7684loss program (Program): \u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684fluid program\u3002\u9ed8\u8ba4\u503c\uff1a fluid.default_main_program() **kwargs : loss_func\u8f93\u5165\u540d\u4e0e\u5bf9\u5e94variable\u540d\u79f0 \u8fd4\u56de \uff1a\u81ea\u5b9a\u4e49\u7684\u635f\u5931\u51fd\u6570loss \u4f7f\u7528\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 import paddle.fluid as fluid import paddleslim.dist as dist student_program = fluid . Program () with fluid . program_guard ( student_program ): x = fluid . layers . data ( name = 'x' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( x , 32 , 1 , name = 's1' ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 's2' ) teacher_program = fluid . Program () with fluid . program_guard ( teacher_program ): y = fluid . layers . data ( name = 'y' , shape = [ 1 , 28 , 28 ]) conv = fluid . layers . conv2d ( y , 32 , 1 , name = 't1' ) conv = fluid . layers . conv2d ( conv , 32 , 3 , padding = 1 ) out = fluid . layers . conv2d ( conv , 64 , 3 , padding = 1 , name = 't2' ) data_name_map = { 'y' : 'x' } USE_GPU = False place = fluid . CUDAPlace ( 0 ) if USE_GPU else fluid . CPUPlace () main_program = merge ( teacher_program , student_program , data_name_map , place ) def adaptation_loss ( t_var , s_var ): teacher_channel = t_var . shape [ 1 ] s_hint = fluid . layers . conv2d ( s_var , teacher_channel , 1 ) hint_loss = fluid . layers . reduce_mean ( fluid . layers . square ( s_hint - t_var )) return hint_loss with fluid . program_guard ( main_program ): distillation_loss = dist . loss ( main_program , adaptation_loss , t_var = 'teacher_t2.tmp_1' , s_var = 's2.tmp_1' ) \u6ce8\u610f\u4e8b\u9879 \u5728\u6dfb\u52a0\u84b8\u998floss\u65f6\u4f1a\u5f15\u5165\u65b0\u7684variable\uff0c\u9700\u8981\u6ce8\u610f\u65b0\u5f15\u5165\u7684variable\u4e0d\u8981\u4e0estudent variables\u547d\u540d\u51b2\u7a81\u3002\u8fd9\u91cc\u5efa\u8bae\u4e24\u79cd\u7528\u6cd5\uff08\u4e24\u79cd\u65b9\u6cd5\u4efb\u9009\u5176\u4e00\u5373\u53ef\uff09\uff1a \u5efa\u8bae\u4e0estudent_program\u4f7f\u7528\u540c\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u4ee5\u907f\u514d\u4e00\u4e9b\u672a\u6307\u5b9a\u540d\u79f0\u7684variables(\u4f8b\u5982tmp_0, tmp_1...)\u591a\u6b21\u5b9a\u4e49\u4e3a\u540c\u4e00\u540d\u79f0\u51fa\u73b0\u547d\u540d\u51b2\u7a81 \u5efa\u8bae\u5728\u6dfb\u52a0\u84b8\u998floss\u65f6\u6307\u5b9a\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u524d\u7f00\uff0c\u5177\u4f53\u7528\u6cd5\u8bf7\u53c2\u8003Paddle\u5b98\u65b9\u6587\u6863 fluid.name_scope","title":"loss"},{"location":"tutorials/demo_guide/","text":"\u84b8\u998f # \u84b8\u998fdemo\u9ed8\u8ba4\u4f7f\u7528ResNet50\u4f5c\u4e3ateacher\u7f51\u7edc\uff0cMobileNet\u4f5c\u4e3astudent\u7f51\u7edc\uff0c\u6b64\u5916\u8fd8\u652f\u6301\u5c06teacher\u548cstudent\u6362\u6210 models\u76ee\u5f55 \u652f\u6301\u7684\u4efb\u610f\u6a21\u578b\u3002 demo\u4e2d\u5bf9teahcer\u6a21\u578b\u548cstudent\u6a21\u578b\u7684\u4e00\u5c42\u7279\u5f81\u56fe\u6dfb\u52a0\u4e86l2_loss\u7684\u84b8\u998f\u635f\u5931\u51fd\u6570\uff0c\u4f7f\u7528\u65f6\u4e5f\u53ef\u6839\u636e\u9700\u8981\u9009\u62e9fsp_loss, soft_label_loss\u4ee5\u53ca\u81ea\u5b9a\u4e49\u7684loss\u51fd\u6570\u3002 \u8bad\u7ec3\u9ed8\u8ba4\u4f7f\u7528\u7684\u662fcifar10\u6570\u636e\u96c6\uff0cpiecewise_decay\u5b66\u4e60\u7387\u8870\u51cf\u7b56\u7565\uff0cmomentum\u4f18\u5316\u5668\u8fdb\u884c120\u8f6e\u84b8\u998f\u8bad\u7ec3\u3002\u4f7f\u7528\u8005\u4e5f\u53ef\u4ee5\u7b80\u5355\u5730\u7528args\u53c2\u6570\u5207\u6362\u4e3a\u4f7f\u7528ImageNet\u6570\u636e\u96c6\uff0ccosine_decay\u5b66\u4e60\u7387\u8870\u51cf\u7b56\u7565\u7b49\u5176\u4ed6\u8bad\u7ec3\u914d\u7f6e\u3002 \u91cf\u5316 # \u91cf\u5316\u8bad\u7ec3demo\u6587\u6863 # \u79bb\u7ebf\u91cf\u5316demo\u6587\u6863 # Embedding\u91cf\u5316demo\u6587\u6863 # NAS # NAS\u793a\u4f8b #","title":"Demo guide"},{"location":"tutorials/demo_guide/#_1","text":"\u84b8\u998fdemo\u9ed8\u8ba4\u4f7f\u7528ResNet50\u4f5c\u4e3ateacher\u7f51\u7edc\uff0cMobileNet\u4f5c\u4e3astudent\u7f51\u7edc\uff0c\u6b64\u5916\u8fd8\u652f\u6301\u5c06teacher\u548cstudent\u6362\u6210 models\u76ee\u5f55 \u652f\u6301\u7684\u4efb\u610f\u6a21\u578b\u3002 demo\u4e2d\u5bf9teahcer\u6a21\u578b\u548cstudent\u6a21\u578b\u7684\u4e00\u5c42\u7279\u5f81\u56fe\u6dfb\u52a0\u4e86l2_loss\u7684\u84b8\u998f\u635f\u5931\u51fd\u6570\uff0c\u4f7f\u7528\u65f6\u4e5f\u53ef\u6839\u636e\u9700\u8981\u9009\u62e9fsp_loss, soft_label_loss\u4ee5\u53ca\u81ea\u5b9a\u4e49\u7684loss\u51fd\u6570\u3002 \u8bad\u7ec3\u9ed8\u8ba4\u4f7f\u7528\u7684\u662fcifar10\u6570\u636e\u96c6\uff0cpiecewise_decay\u5b66\u4e60\u7387\u8870\u51cf\u7b56\u7565\uff0cmomentum\u4f18\u5316\u5668\u8fdb\u884c120\u8f6e\u84b8\u998f\u8bad\u7ec3\u3002\u4f7f\u7528\u8005\u4e5f\u53ef\u4ee5\u7b80\u5355\u5730\u7528args\u53c2\u6570\u5207\u6362\u4e3a\u4f7f\u7528ImageNet\u6570\u636e\u96c6\uff0ccosine_decay\u5b66\u4e60\u7387\u8870\u51cf\u7b56\u7565\u7b49\u5176\u4ed6\u8bad\u7ec3\u914d\u7f6e\u3002","title":"\u84b8\u998f"},{"location":"tutorials/demo_guide/#_2","text":"","title":"\u91cf\u5316"},{"location":"tutorials/demo_guide/#demo","text":"","title":"\u91cf\u5316\u8bad\u7ec3demo\u6587\u6863"},{"location":"tutorials/demo_guide/#demo_1","text":"","title":"\u79bb\u7ebf\u91cf\u5316demo\u6587\u6863"},{"location":"tutorials/demo_guide/#embeddingdemo","text":"","title":"Embedding\u91cf\u5316demo\u6587\u6863"},{"location":"tutorials/demo_guide/#nas","text":"","title":"NAS"},{"location":"tutorials/demo_guide/#nas_1","text":"","title":"NAS\u793a\u4f8b"},{"location":"tutorials/distillation_demo/","text":"\u672c\u793a\u4f8b\u5c06\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528PaddleSlim\u84b8\u998f\u63a5\u53e3\u6765\u5bf9\u6a21\u578b\u8fdb\u884c\u84b8\u998f\u8bad\u7ec3\u3002 \u63a5\u53e3\u4ecb\u7ecd # \u8bf7\u53c2\u8003 \u84b8\u998fAPI\u6587\u6863 \u3002 PaddleSlim\u84b8\u998f\u8bad\u7ec3\u6d41\u7a0b # \u4e00\u822c\u60c5\u51b5\u4e0b\uff0c\u6a21\u578b\u53c2\u6570\u91cf\u8d8a\u591a\uff0c\u7ed3\u6784\u8d8a\u590d\u6742\uff0c\u5176\u6027\u80fd\u8d8a\u597d\uff0c\u4f46\u8fd0\u7b97\u91cf\u548c\u8d44\u6e90\u6d88\u8017\u4e5f\u8d8a\u5927\u3002 \u77e5\u8bc6\u84b8\u998f \u5c31\u662f\u4e00\u79cd\u5c06\u5927\u6a21\u578b\u5b66\u4e60\u5230\u7684\u6709\u7528\u4fe1\u606f\uff08Dark Knowledge\uff09\u538b\u7f29\u8fdb\u66f4\u5c0f\u66f4\u5feb\u7684\u6a21\u578b\uff0c\u800c\u83b7\u5f97\u53ef\u4ee5\u5339\u654c\u5927\u6a21\u578b\u7ed3\u679c\u7684\u65b9\u6cd5\u3002 \u5728\u672c\u793a\u4f8b\u4e2d\u7cbe\u5ea6\u8f83\u9ad8\u7684\u5927\u6a21\u578b\u88ab\u79f0\u4e3ateacher\uff0c\u7cbe\u5ea6\u7a0d\u900a\u4f46\u901f\u5ea6\u66f4\u5feb\u7684\u5c0f\u6a21\u578b\u88ab\u79f0\u4e3astudent\u3002 1. \u5b9a\u4e49student_program # 1 2 3 4 5 6 7 8 9 10 11 student_program = fluid . Program () student_startup = fluid . Program () with fluid . program_guard ( student_program , student_startup ): image = fluid . data ( name = 'image' , shape = [ None ] + [ 3 , 224 , 224 ], dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) # student model definition model = MobileNet () out = model . net ( input = image , class_dim = 1000 ) cost = fluid . layers . cross_entropy ( input = out , label = label ) avg_cost = fluid . layers . mean ( x = cost ) 2. \u5b9a\u4e49teacher_program # \u5728\u5b9a\u4e49\u597d teacher_program \u540e\uff0c\u53ef\u4ee5\u4e00\u5e76\u52a0\u8f7d\u8bad\u7ec3\u597d\u7684pretrained_model\u3002 \u5728 teacher_program \u5185\u9700\u8981\u52a0\u4e0a with fluid.unique_name.guard(): \uff0c\u4fdd\u8bc1teacher\u7684\u53d8\u91cf\u547d\u540d\u4e0d\u88ab student_program \u5f71\u54cd\uff0c\u4ece\u800c\u80fd\u591f\u6b63\u786e\u5730\u52a0\u8f7d\u9884\u8bad\u7ec3\u53c2\u6570\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 teacher_program = fluid . Program () teacher_startup = fluid . Program () with fluid . program_guard ( teacher_program , teacher_startup ): with fluid . unique_name . guard (): image = fluid . data ( name = 'data' , shape = [ None ] + [ 3 , 224 , 224 ], dtype = 'float32' ) # teacher model definition teacher_model = ResNet () predict = teacher_model . net ( image , class_dim = 1000 ) exe . run ( teacher_startup ) def if_exist ( var ): return os . path . exists ( os . path . join ( \"./pretrained\" , var . name ) fluid . io . load_vars ( exe , \"./pretrained\" , main_program = teacher_program , predicate = if_exist ) 3.\u9009\u62e9\u7279\u5f81\u56fe # \u5b9a\u4e49\u597d student_program \u548c teacher_program \u540e\uff0c\u6211\u4eec\u9700\u8981\u4ece\u4e2d\u4e24\u4e24\u5bf9\u5e94\u5730\u6311\u9009\u51fa\u82e5\u5e72\u4e2a\u7279\u5f81\u56fe\uff0c\u7559\u5f85\u540e\u7eed\u4e3a\u5176\u6dfb\u52a0\u77e5\u8bc6\u84b8\u998f\u635f\u5931\u51fd\u6570\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 # get all student variables student_vars = [] for v in student_program . list_vars (): try : student_vars . append (( v . name , v . shape )) except : pass print ( \"=\" * 50 + \"student_model_vars\" + \"=\" * 50 ) print ( student_vars ) # get all teacher variables teacher_vars = [] for v in teacher_program . list_vars (): try : teacher_vars . append (( v . name , v . shape )) except : pass print ( \"=\" * 50 + \"teacher_model_vars\" + \"=\" * 50 ) print ( teacher_vars ) 4. \u5408\u5e76Program\uff08merge\uff09 # PaddlePaddle\u4f7f\u7528Program\u6765\u63cf\u8ff0\u8ba1\u7b97\u56fe\uff0c\u4e3a\u4e86\u540c\u65f6\u8ba1\u7b97student\u548cteacher\u4e24\u4e2aProgram\uff0c\u8fd9\u91cc\u9700\u8981\u5c06\u5176\u4e24\u8005\u5408\u5e76\uff08merge\uff09\u4e3a\u4e00\u4e2aProgram\u3002 merge\u8fc7\u7a0b\u64cd\u4f5c\u8f83\u591a\uff0c\u5177\u4f53\u7ec6\u8282\u8bf7\u53c2\u8003 merge API\u6587\u6863 \u3002 1 2 data_name_map = { 'data' : 'image' } student_program = merge ( teacher_program , student_program , data_name_map , place ) 5.\u6dfb\u52a0\u84b8\u998floss # \u5728\u6dfb\u52a0\u84b8\u998floss\u7684\u8fc7\u7a0b\u4e2d\uff0c\u53ef\u80fd\u8fd8\u4f1a\u5f15\u5165\u90e8\u5206\u53d8\u91cf\uff08Variable\uff09\uff0c\u4e3a\u4e86\u907f\u514d\u547d\u540d\u91cd\u590d\u8fd9\u91cc\u53ef\u4ee5\u4f7f\u7528 with fluid.name_scope(\"distill\"): \u4e3a\u65b0\u5f15\u5165\u7684\u53d8\u91cf\u52a0\u4e00\u4e2a\u547d\u540d\u4f5c\u7528\u57df\u3002 \u53e6\u5916\u9700\u8981\u6ce8\u610f\u7684\u662f\uff0cmerge\u8fc7\u7a0b\u4e3a teacher_program \u7684\u53d8\u91cf\u7edf\u4e00\u52a0\u4e86\u540d\u79f0\u524d\u7f00\uff0c\u9ed8\u8ba4\u662f \"teacher_\" , \u8fd9\u91cc\u5728\u6dfb\u52a0 l2_loss \u65f6\u4e5f\u8981\u4e3ateacher\u7684\u53d8\u91cf\u52a0\u4e0a\u8fd9\u4e2a\u524d\u7f00\u3002 1 2 3 4 5 6 7 8 9 with fluid . program_guard ( student_program , student_startup ): with fluid . name_scope ( \"distill\" ): distill_loss = l2_loss ( 'teacher_bn5c_branch2b.output.1.tmp_3' , 'depthwise_conv2d_11.tmp_0' , student_program ) distill_weight = 1 loss = avg_cost + distill_loss * distill_weight opt = create_optimizer () opt . minimize ( loss ) exe . run ( student_startup ) \u81f3\u6b64\uff0c\u6211\u4eec\u5c31\u5f97\u5230\u4e86\u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684 student_program \uff0c\u540e\u9762\u5c31\u53ef\u4ee5\u4f7f\u7528\u4e00\u4e2a\u666e\u901aprogram\u4e00\u6837\u5bf9\u5176\u5f00\u59cb\u8bad\u7ec3\u548c\u8bc4\u4f30\u3002","title":"\u77e5\u8bc6\u84b8\u998f"},{"location":"tutorials/distillation_demo/#_1","text":"\u8bf7\u53c2\u8003 \u84b8\u998fAPI\u6587\u6863 \u3002","title":"\u63a5\u53e3\u4ecb\u7ecd"},{"location":"tutorials/distillation_demo/#paddleslim","text":"\u4e00\u822c\u60c5\u51b5\u4e0b\uff0c\u6a21\u578b\u53c2\u6570\u91cf\u8d8a\u591a\uff0c\u7ed3\u6784\u8d8a\u590d\u6742\uff0c\u5176\u6027\u80fd\u8d8a\u597d\uff0c\u4f46\u8fd0\u7b97\u91cf\u548c\u8d44\u6e90\u6d88\u8017\u4e5f\u8d8a\u5927\u3002 \u77e5\u8bc6\u84b8\u998f \u5c31\u662f\u4e00\u79cd\u5c06\u5927\u6a21\u578b\u5b66\u4e60\u5230\u7684\u6709\u7528\u4fe1\u606f\uff08Dark Knowledge\uff09\u538b\u7f29\u8fdb\u66f4\u5c0f\u66f4\u5feb\u7684\u6a21\u578b\uff0c\u800c\u83b7\u5f97\u53ef\u4ee5\u5339\u654c\u5927\u6a21\u578b\u7ed3\u679c\u7684\u65b9\u6cd5\u3002 \u5728\u672c\u793a\u4f8b\u4e2d\u7cbe\u5ea6\u8f83\u9ad8\u7684\u5927\u6a21\u578b\u88ab\u79f0\u4e3ateacher\uff0c\u7cbe\u5ea6\u7a0d\u900a\u4f46\u901f\u5ea6\u66f4\u5feb\u7684\u5c0f\u6a21\u578b\u88ab\u79f0\u4e3astudent\u3002","title":"PaddleSlim\u84b8\u998f\u8bad\u7ec3\u6d41\u7a0b"},{"location":"tutorials/distillation_demo/#1-student_program","text":"1 2 3 4 5 6 7 8 9 10 11 student_program = fluid . Program () student_startup = fluid . Program () with fluid . program_guard ( student_program , student_startup ): image = fluid . data ( name = 'image' , shape = [ None ] + [ 3 , 224 , 224 ], dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) # student model definition model = MobileNet () out = model . net ( input = image , class_dim = 1000 ) cost = fluid . layers . cross_entropy ( input = out , label = label ) avg_cost = fluid . layers . mean ( x = cost )","title":"1. \u5b9a\u4e49student_program"},{"location":"tutorials/distillation_demo/#2-teacher_program","text":"\u5728\u5b9a\u4e49\u597d teacher_program \u540e\uff0c\u53ef\u4ee5\u4e00\u5e76\u52a0\u8f7d\u8bad\u7ec3\u597d\u7684pretrained_model\u3002 \u5728 teacher_program \u5185\u9700\u8981\u52a0\u4e0a with fluid.unique_name.guard(): \uff0c\u4fdd\u8bc1teacher\u7684\u53d8\u91cf\u547d\u540d\u4e0d\u88ab student_program \u5f71\u54cd\uff0c\u4ece\u800c\u80fd\u591f\u6b63\u786e\u5730\u52a0\u8f7d\u9884\u8bad\u7ec3\u53c2\u6570\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 teacher_program = fluid . Program () teacher_startup = fluid . Program () with fluid . program_guard ( teacher_program , teacher_startup ): with fluid . unique_name . guard (): image = fluid . data ( name = 'data' , shape = [ None ] + [ 3 , 224 , 224 ], dtype = 'float32' ) # teacher model definition teacher_model = ResNet () predict = teacher_model . net ( image , class_dim = 1000 ) exe . run ( teacher_startup ) def if_exist ( var ): return os . path . exists ( os . path . join ( \"./pretrained\" , var . name ) fluid . io . load_vars ( exe , \"./pretrained\" , main_program = teacher_program , predicate = if_exist )","title":"2. \u5b9a\u4e49teacher_program"},{"location":"tutorials/distillation_demo/#3","text":"\u5b9a\u4e49\u597d student_program \u548c teacher_program \u540e\uff0c\u6211\u4eec\u9700\u8981\u4ece\u4e2d\u4e24\u4e24\u5bf9\u5e94\u5730\u6311\u9009\u51fa\u82e5\u5e72\u4e2a\u7279\u5f81\u56fe\uff0c\u7559\u5f85\u540e\u7eed\u4e3a\u5176\u6dfb\u52a0\u77e5\u8bc6\u84b8\u998f\u635f\u5931\u51fd\u6570\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 # get all student variables student_vars = [] for v in student_program . list_vars (): try : student_vars . append (( v . name , v . shape )) except : pass print ( \"=\" * 50 + \"student_model_vars\" + \"=\" * 50 ) print ( student_vars ) # get all teacher variables teacher_vars = [] for v in teacher_program . list_vars (): try : teacher_vars . append (( v . name , v . shape )) except : pass print ( \"=\" * 50 + \"teacher_model_vars\" + \"=\" * 50 ) print ( teacher_vars )","title":"3.\u9009\u62e9\u7279\u5f81\u56fe"},{"location":"tutorials/distillation_demo/#4-programmerge","text":"PaddlePaddle\u4f7f\u7528Program\u6765\u63cf\u8ff0\u8ba1\u7b97\u56fe\uff0c\u4e3a\u4e86\u540c\u65f6\u8ba1\u7b97student\u548cteacher\u4e24\u4e2aProgram\uff0c\u8fd9\u91cc\u9700\u8981\u5c06\u5176\u4e24\u8005\u5408\u5e76\uff08merge\uff09\u4e3a\u4e00\u4e2aProgram\u3002 merge\u8fc7\u7a0b\u64cd\u4f5c\u8f83\u591a\uff0c\u5177\u4f53\u7ec6\u8282\u8bf7\u53c2\u8003 merge API\u6587\u6863 \u3002 1 2 data_name_map = { 'data' : 'image' } student_program = merge ( teacher_program , student_program , data_name_map , place )","title":"4. \u5408\u5e76Program\uff08merge\uff09"},{"location":"tutorials/distillation_demo/#5loss","text":"\u5728\u6dfb\u52a0\u84b8\u998floss\u7684\u8fc7\u7a0b\u4e2d\uff0c\u53ef\u80fd\u8fd8\u4f1a\u5f15\u5165\u90e8\u5206\u53d8\u91cf\uff08Variable\uff09\uff0c\u4e3a\u4e86\u907f\u514d\u547d\u540d\u91cd\u590d\u8fd9\u91cc\u53ef\u4ee5\u4f7f\u7528 with fluid.name_scope(\"distill\"): \u4e3a\u65b0\u5f15\u5165\u7684\u53d8\u91cf\u52a0\u4e00\u4e2a\u547d\u540d\u4f5c\u7528\u57df\u3002 \u53e6\u5916\u9700\u8981\u6ce8\u610f\u7684\u662f\uff0cmerge\u8fc7\u7a0b\u4e3a teacher_program \u7684\u53d8\u91cf\u7edf\u4e00\u52a0\u4e86\u540d\u79f0\u524d\u7f00\uff0c\u9ed8\u8ba4\u662f \"teacher_\" , \u8fd9\u91cc\u5728\u6dfb\u52a0 l2_loss \u65f6\u4e5f\u8981\u4e3ateacher\u7684\u53d8\u91cf\u52a0\u4e0a\u8fd9\u4e2a\u524d\u7f00\u3002 1 2 3 4 5 6 7 8 9 with fluid . program_guard ( student_program , student_startup ): with fluid . name_scope ( \"distill\" ): distill_loss = l2_loss ( 'teacher_bn5c_branch2b.output.1.tmp_3' , 'depthwise_conv2d_11.tmp_0' , student_program ) distill_weight = 1 loss = avg_cost + distill_loss * distill_weight opt = create_optimizer () opt . minimize ( loss ) exe . run ( student_startup ) \u81f3\u6b64\uff0c\u6211\u4eec\u5c31\u5f97\u5230\u4e86\u7528\u4e8e\u84b8\u998f\u8bad\u7ec3\u7684 student_program \uff0c\u540e\u9762\u5c31\u53ef\u4ee5\u4f7f\u7528\u4e00\u4e2a\u666e\u901aprogram\u4e00\u6837\u5bf9\u5176\u5f00\u59cb\u8bad\u7ec3\u548c\u8bc4\u4f30\u3002","title":"5.\u6dfb\u52a0\u84b8\u998floss"},{"location":"tutorials/nas_demo/","text":"\u7f51\u7edc\u7ed3\u6784\u641c\u7d22\u793a\u4f8b # \u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u7f51\u7edc\u7ed3\u6784\u641c\u7d22\u63a5\u53e3\uff0c\u641c\u7d22\u5230\u4e00\u4e2a\u66f4\u5c0f\u6216\u8005\u7cbe\u5ea6\u66f4\u9ad8\u7684\u6a21\u578b\uff0c\u8be5\u6587\u6863\u4ec5\u4ecb\u7ecdpaddleslim\u4e2dSANAS\u7684\u4f7f\u7528\u53ca\u5982\u4f55\u5229\u7528SANAS\u5f97\u5230\u6a21\u578b\u7ed3\u6784\uff0c\u5b8c\u6574\u793a\u4f8b\u4ee3\u7801\u8bf7\u53c2\u8003sa_nas_mobilenetv2.py\u6216\u8005block_sa_nas_mobilenetv2.py\u3002 \u63a5\u53e3\u4ecb\u7ecd # \u8bf7\u53c2\u8003\u3002 1. \u914d\u7f6e\u641c\u7d22\u7a7a\u95f4 # \u8be6\u7ec6\u7684\u641c\u7d22\u7a7a\u95f4\u914d\u7f6e\u53ef\u4ee5\u53c2\u8003 \u795e\u7ecf\u7f51\u7edc\u641c\u7d22API\u6587\u6863 \u3002 1 config = [( 'MobileNetV2Space' )] 2. \u5229\u7528\u641c\u7d22\u7a7a\u95f4\u521d\u59cb\u5316SANAS\u5b9e\u4f8b # 1 2 3 4 5 6 7 8 9 from paddleslim.nas import SANAS sa_nas = SANAS ( config , server_addr = ( \"\" , 8881 ), init_temperature = 10.24 , reduce_rate = 0.85 , search_steps = 300 , is_server = True ) 3. \u6839\u636e\u5b9e\u4f8b\u5316\u7684NAS\u5f97\u5230\u5f53\u524d\u7684\u7f51\u7edc\u7ed3\u6784 # 1 archs = sa_nas . next_archs () 4. \u6839\u636e\u5f97\u5230\u7684\u7f51\u7edc\u7ed3\u6784\u548c\u8f93\u5165\u6784\u9020\u8bad\u7ec3\u548c\u6d4b\u8bd5program # 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid train_program = fluid . Program () test_program = fluid . Program () startup_program = fluid . Program () with fluid . program_guard ( train_program , startup_program ): data = fluid . data ( name = 'data' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) for arch in archs : data = arch ( data ) output = fluid . layers . fc ( data , 10 ) softmax_out = fluid . layers . softmax ( input = output , use_cudnn = False ) cost = fluid . layers . cross_entropy ( input = softmax_out , label = label ) avg_cost = fluid . layers . mean ( cost ) acc_top1 = fluid . layers . accuracy ( input = softmax_out , label = label , k = 1 ) test_program = train_program . clone ( for_test = True ) sgd = fluid . optimizer . SGD ( learning_rate = 1e-3 ) sgd . minimize ( avg_cost ) 5. \u6839\u636e\u6784\u9020\u7684\u8bad\u7ec3program\u6dfb\u52a0\u9650\u5236\u6761\u4ef6 # 1 2 3 4 from paddleslim.analysis import flops if flops ( train_program ) > 321208544 : continue 6. \u56de\u4f20score # 1 sa_nas . reward ( score )","title":"SA\u641c\u7d22"},{"location":"tutorials/nas_demo/#_1","text":"\u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u7f51\u7edc\u7ed3\u6784\u641c\u7d22\u63a5\u53e3\uff0c\u641c\u7d22\u5230\u4e00\u4e2a\u66f4\u5c0f\u6216\u8005\u7cbe\u5ea6\u66f4\u9ad8\u7684\u6a21\u578b\uff0c\u8be5\u6587\u6863\u4ec5\u4ecb\u7ecdpaddleslim\u4e2dSANAS\u7684\u4f7f\u7528\u53ca\u5982\u4f55\u5229\u7528SANAS\u5f97\u5230\u6a21\u578b\u7ed3\u6784\uff0c\u5b8c\u6574\u793a\u4f8b\u4ee3\u7801\u8bf7\u53c2\u8003sa_nas_mobilenetv2.py\u6216\u8005block_sa_nas_mobilenetv2.py\u3002","title":"\u7f51\u7edc\u7ed3\u6784\u641c\u7d22\u793a\u4f8b"},{"location":"tutorials/nas_demo/#_2","text":"\u8bf7\u53c2\u8003\u3002","title":"\u63a5\u53e3\u4ecb\u7ecd"},{"location":"tutorials/nas_demo/#1","text":"\u8be6\u7ec6\u7684\u641c\u7d22\u7a7a\u95f4\u914d\u7f6e\u53ef\u4ee5\u53c2\u8003 \u795e\u7ecf\u7f51\u7edc\u641c\u7d22API\u6587\u6863 \u3002 1 config = [( 'MobileNetV2Space' )]","title":"1. \u914d\u7f6e\u641c\u7d22\u7a7a\u95f4"},{"location":"tutorials/nas_demo/#2-sanas","text":"1 2 3 4 5 6 7 8 9 from paddleslim.nas import SANAS sa_nas = SANAS ( config , server_addr = ( \"\" , 8881 ), init_temperature = 10.24 , reduce_rate = 0.85 , search_steps = 300 , is_server = True )","title":"2. \u5229\u7528\u641c\u7d22\u7a7a\u95f4\u521d\u59cb\u5316SANAS\u5b9e\u4f8b"},{"location":"tutorials/nas_demo/#3-nas","text":"1 archs = sa_nas . next_archs ()","title":"3. \u6839\u636e\u5b9e\u4f8b\u5316\u7684NAS\u5f97\u5230\u5f53\u524d\u7684\u7f51\u7edc\u7ed3\u6784"},{"location":"tutorials/nas_demo/#4-program","text":"1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import paddle.fluid as fluid train_program = fluid . Program () test_program = fluid . Program () startup_program = fluid . Program () with fluid . program_guard ( train_program , startup_program ): data = fluid . data ( name = 'data' , shape = [ None , 3 , 32 , 32 ], dtype = 'float32' ) label = fluid . data ( name = 'label' , shape = [ None , 1 ], dtype = 'int64' ) for arch in archs : data = arch ( data ) output = fluid . layers . fc ( data , 10 ) softmax_out = fluid . layers . softmax ( input = output , use_cudnn = False ) cost = fluid . layers . cross_entropy ( input = softmax_out , label = label ) avg_cost = fluid . layers . mean ( cost ) acc_top1 = fluid . layers . accuracy ( input = softmax_out , label = label , k = 1 ) test_program = train_program . clone ( for_test = True ) sgd = fluid . optimizer . SGD ( learning_rate = 1e-3 ) sgd . minimize ( avg_cost )","title":"4. \u6839\u636e\u5f97\u5230\u7684\u7f51\u7edc\u7ed3\u6784\u548c\u8f93\u5165\u6784\u9020\u8bad\u7ec3\u548c\u6d4b\u8bd5program"},{"location":"tutorials/nas_demo/#5-program","text":"1 2 3 4 from paddleslim.analysis import flops if flops ( train_program ) > 321208544 : continue","title":"5. \u6839\u636e\u6784\u9020\u7684\u8bad\u7ec3program\u6dfb\u52a0\u9650\u5236\u6761\u4ef6"},{"location":"tutorials/nas_demo/#6-score","text":"1 sa_nas . reward ( score )","title":"6. \u56de\u4f20score"},{"location":"tutorials/quant_aware_demo/","text":"\u5728\u7ebf\u91cf\u5316\u793a\u4f8b # \u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u5728\u7ebf\u91cf\u5316\u63a5\u53e3\uff0c\u6765\u5bf9\u8bad\u7ec3\u597d\u7684\u5206\u7c7b\u6a21\u578b\u8fdb\u884c\u91cf\u5316, \u53ef\u4ee5\u51cf\u5c11\u6a21\u578b\u7684\u5b58\u50a8\u7a7a\u95f4\u548c\u663e\u5b58\u5360\u7528\u3002 \u63a5\u53e3\u4ecb\u7ecd # \u8bf7\u53c2\u8003 \u91cf\u5316API\u6587\u6863 \u3002 \u5206\u7c7b\u6a21\u578b\u7684\u79bb\u7ebf\u91cf\u5316\u6d41\u7a0b # 1. \u914d\u7f6e\u91cf\u5316\u53c2\u6570 # 1 2 3 4 5 6 7 8 9 10 11 12 quant_config = { 'weight_quantize_type' : 'abs_max' , 'activation_quantize_type' : 'moving_average_abs_max' , 'weight_bits' : 8 , 'activation_bits' : 8 , 'not_quant_pattern' : [ 'skip_quant' ], 'quantize_op_types' : [ 'conv2d' , 'depthwise_conv2d' , 'mul' ], 'dtype' : 'int8' , 'window_size' : 10000 , 'moving_rate' : 0 . 9 , 'quant_weight_only' : False } 2. \u5bf9\u8bad\u7ec3\u548c\u6d4b\u8bd5program\u63d2\u5165\u53ef\u8bad\u7ec3\u91cf\u5316op # 1 2 3 val_program = quant_aware ( val_program , place , quant_config , scope = None , for_test = True ) compiled_train_prog = quant_aware ( train_prog , place , quant_config , scope = None , for_test = False ) 3.\u5173\u6389\u6307\u5b9abuild\u7b56\u7565 # 1 2 3 4 5 6 7 8 build_strategy = fluid . BuildStrategy () build_strategy . fuse_all_reduce_ops = False build_strategy . sync_batch_norm = False exec_strategy = fluid . ExecutionStrategy () compiled_train_prog = compiled_train_prog . with_data_parallel ( loss_name = avg_cost . name , build_strategy = build_strategy , exec_strategy = exec_strategy ) 4. freeze program # 1 2 3 4 5 float_program , int8_program = convert ( val_program , place , quant_config , scope = None , save_int8 = True ) 5.\u4fdd\u5b58\u9884\u6d4b\u6a21\u578b # 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 fluid . io . save_inference_model ( dirname = float_path , feeded_var_names = [ image . name ], target_vars = [ out ], executor = exe , main_program = float_program , model_filename = float_path + ' /model ' , params_filename = float_path + ' /params ' ) fluid . io . save_inference_model ( dirname = int8_path , feeded_var_names = [ image . name ], target_vars = [ out ], executor = exe , main_program = int8_program , model_filename = int8_path + ' /model ' , params_filename = int8_path + ' /params ' )","title":"\u91cf\u5316\u8bad\u7ec3"},{"location":"tutorials/quant_aware_demo/#_1","text":"\u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u5728\u7ebf\u91cf\u5316\u63a5\u53e3\uff0c\u6765\u5bf9\u8bad\u7ec3\u597d\u7684\u5206\u7c7b\u6a21\u578b\u8fdb\u884c\u91cf\u5316, \u53ef\u4ee5\u51cf\u5c11\u6a21\u578b\u7684\u5b58\u50a8\u7a7a\u95f4\u548c\u663e\u5b58\u5360\u7528\u3002","title":"\u5728\u7ebf\u91cf\u5316\u793a\u4f8b"},{"location":"tutorials/quant_aware_demo/#_2","text":"\u8bf7\u53c2\u8003 \u91cf\u5316API\u6587\u6863 \u3002","title":"\u63a5\u53e3\u4ecb\u7ecd"},{"location":"tutorials/quant_aware_demo/#_3","text":"","title":"\u5206\u7c7b\u6a21\u578b\u7684\u79bb\u7ebf\u91cf\u5316\u6d41\u7a0b"},{"location":"tutorials/quant_aware_demo/#1","text":"1 2 3 4 5 6 7 8 9 10 11 12 quant_config = { 'weight_quantize_type' : 'abs_max' , 'activation_quantize_type' : 'moving_average_abs_max' , 'weight_bits' : 8 , 'activation_bits' : 8 , 'not_quant_pattern' : [ 'skip_quant' ], 'quantize_op_types' : [ 'conv2d' , 'depthwise_conv2d' , 'mul' ], 'dtype' : 'int8' , 'window_size' : 10000 , 'moving_rate' : 0 . 9 , 'quant_weight_only' : False }","title":"1. \u914d\u7f6e\u91cf\u5316\u53c2\u6570"},{"location":"tutorials/quant_aware_demo/#2-programop","text":"1 2 3 val_program = quant_aware ( val_program , place , quant_config , scope = None , for_test = True ) compiled_train_prog = quant_aware ( train_prog , place , quant_config , scope = None , for_test = False )","title":"2. \u5bf9\u8bad\u7ec3\u548c\u6d4b\u8bd5program\u63d2\u5165\u53ef\u8bad\u7ec3\u91cf\u5316op"},{"location":"tutorials/quant_aware_demo/#3build","text":"1 2 3 4 5 6 7 8 build_strategy = fluid . BuildStrategy () build_strategy . fuse_all_reduce_ops = False build_strategy . sync_batch_norm = False exec_strategy = fluid . ExecutionStrategy () compiled_train_prog = compiled_train_prog . with_data_parallel ( loss_name = avg_cost . name , build_strategy = build_strategy , exec_strategy = exec_strategy )","title":"3.\u5173\u6389\u6307\u5b9abuild\u7b56\u7565"},{"location":"tutorials/quant_aware_demo/#4-freeze-program","text":"1 2 3 4 5 float_program , int8_program = convert ( val_program , place , quant_config , scope = None , save_int8 = True )","title":"4. freeze program"},{"location":"tutorials/quant_aware_demo/#5","text":"1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 fluid . io . save_inference_model ( dirname = float_path , feeded_var_names = [ image . name ], target_vars = [ out ], executor = exe , main_program = float_program , model_filename = float_path + ' /model ' , params_filename = float_path + ' /params ' ) fluid . io . save_inference_model ( dirname = int8_path , feeded_var_names = [ image . name ], target_vars = [ out ], executor = exe , main_program = int8_program , model_filename = int8_path + ' /model ' , params_filename = int8_path + ' /params ' )","title":"5.\u4fdd\u5b58\u9884\u6d4b\u6a21\u578b"},{"location":"tutorials/quant_embedding_demo/","text":"Embedding\u91cf\u5316\u793a\u4f8b # \u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528Embedding\u91cf\u5316\u7684\u63a5\u53e3 paddleslim.quant.quant_embedding \u3002 quant_embedding \u63a5\u53e3\u5c06\u7f51\u7edc\u4e2d\u7684Embedding\u53c2\u6570\u4ece float32 \u7c7b\u578b\u91cf\u5316\u5230 8-bit \u6574\u6570\u7c7b\u578b\uff0c\u5728\u51e0\u4e4e\u4e0d\u635f\u5931\u6a21\u578b\u7cbe\u5ea6\u7684\u60c5\u51b5\u4e0b\u51cf\u5c11\u6a21\u578b\u7684\u5b58\u50a8\u7a7a\u95f4\u548c\u663e\u5b58\u5360\u7528\u3002 \u63a5\u53e3\u4ecb\u7ecd\u8bf7\u53c2\u8003 \u91cf\u5316API\u6587\u6863 \u3002 \u8be5\u63a5\u53e3\u5bf9program\u7684\u4fee\u6539\uff1a \u91cf\u5316\u524d: \u56fe1\uff1a\u91cf\u5316\u524d\u7684\u6a21\u578b\u7ed3\u6784 \u91cf\u5316\u540e\uff1a \u56fe2: \u91cf\u5316\u540e\u7684\u6a21\u578b\u7ed3\u6784 \u4ee5\u4e0b\u5c06\u4ee5 \u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b \u4e3a\u4f8b\u6765\u8bf4\u660e\u5982\u4f55\u4f7f\u7528 quant_embedding \u63a5\u53e3\u3002\u9996\u5148\u4ecb\u7ecd \u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b \u7684\u6b63\u5e38\u8bad\u7ec3\u548c\u6d4b\u8bd5\u6d41\u7a0b\u3002 \u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b # \u4ee5\u4e0b\u662f\u672c\u4f8b\u7684\u7b80\u8981\u76ee\u5f55\u7ed3\u6784\u53ca\u8bf4\u660e\uff1a 1 2 3 4 5 6 7 8 9 10 . \u251c\u2500\u2500 cluster_train.py # \u5206\u5e03\u5f0f\u8bad\u7ec3\u51fd\u6570 \u251c\u2500\u2500 cluster_train.sh # \u672c\u5730\u6a21\u62df\u591a\u673a\u811a\u672c \u251c\u2500\u2500 train.py # \u8bad\u7ec3\u51fd\u6570 \u251c\u2500\u2500 infer.py # \u9884\u6d4b\u811a\u672c \u251c\u2500\u2500 net.py # \u7f51\u7edc\u7ed3\u6784 \u251c\u2500\u2500 preprocess.py # \u9884\u5904\u7406\u811a\u672c\uff0c\u5305\u62ec\u6784\u5efa\u8bcd\u5178\u548c\u9884\u5904\u7406\u6587\u672c \u251c\u2500\u2500 reader.py # \u8bad\u7ec3\u9636\u6bb5\u7684\u6587\u672c\u8bfb\u5199 \u251c\u2500\u2500 train.py # \u8bad\u7ec3\u51fd\u6570 \u2514\u2500\u2500 utils.py # \u901a\u7528\u51fd\u6570 \u4ecb\u7ecd # \u672c\u4f8b\u5b9e\u73b0\u4e86skip-gram\u6a21\u5f0f\u7684word2vector\u6a21\u578b\u3002 \u540c\u65f6\u63a8\u8350\u7528\u6237\u53c2\u8003 IPython Notebook demo \u6570\u636e\u4e0b\u8f7d # \u5168\u91cf\u6570\u636e\u96c6\u4f7f\u7528\u7684\u662f\u6765\u81ea1 Billion Word Language Model Benchmark\u7684( http://www.statmt.org/lm-benchmark ) \u7684\u6570\u636e\u96c6. 1 2 3 4 mkdir data wget http://www.statmt.org/lm-benchmark/1-billion-word-language-modeling-benchmark-r13output.tar.gz tar xzvf 1 -billion-word-language-modeling-benchmark-r13output.tar.gz mv 1 -billion-word-language-modeling-benchmark-r13output/training-monolingual.tokenized.shuffled/ data/ \u5907\u7528\u6570\u636e\u5730\u5740\u4e0b\u8f7d\u547d\u4ee4\u5982\u4e0b 1 2 3 4 mkdir data wget https://paddlerec.bj.bcebos.com/word2vec/1-billion-word-language-modeling-benchmark-r13output.tar tar xvf 1 -billion-word-language-modeling-benchmark-r13output.tar mv 1 -billion-word-language-modeling-benchmark-r13output/training-monolingual.tokenized.shuffled/ data/ \u4e3a\u4e86\u65b9\u4fbf\u5feb\u901f\u9a8c\u8bc1\uff0c\u6211\u4eec\u4e5f\u63d0\u4f9b\u4e86\u7ecf\u5178\u7684text8\u6837\u4f8b\u6570\u636e\u96c6\uff0c\u5305\u542b1700w\u4e2a\u8bcd\u3002 \u4e0b\u8f7d\u547d\u4ee4\u5982\u4e0b 1 2 3 4 mkdir data wget https://paddlerec.bj.bcebos.com/word2vec/text.tar tar xvf text.tar mv text data/ \u6570\u636e\u9884\u5904\u7406 # \u4ee5\u6837\u4f8b\u6570\u636e\u96c6\u4e3a\u4f8b\u8fdb\u884c\u9884\u5904\u7406\u3002\u5168\u91cf\u6570\u636e\u96c6\u6ce8\u610f\u89e3\u538b\u540e\u4ee5training-monolingual.tokenized.shuffled \u76ee\u5f55\u4e3a\u9884\u5904\u7406\u76ee\u5f55\uff0c\u548c\u6837\u4f8b\u6570\u636e\u96c6\u7684text\u76ee\u5f55\u5e76\u5217\u3002 \u8bcd\u5178\u683c\u5f0f: \u8bcd<\u7a7a\u683c>\u8bcd\u9891\u3002\u6ce8\u610f\u4f4e\u9891\u8bcd\u7528'UNK'\u8868\u793a \u53ef\u4ee5\u6309\u683c\u5f0f\u81ea\u5efa\u8bcd\u5178\uff0c\u5982\u679c\u81ea\u5efa\u8bcd\u5178\u8df3\u8fc7\u7b2c\u4e00\u6b65\u3002 1 2 3 4 5 6 7 8 9 10 the 1061396 of 593677 and 416629 one 411764 in 372201 a 325873 < UNK > 324608 to 316376 zero 264975 nine 250430 \u7b2c\u4e00\u6b65\u6839\u636e\u82f1\u6587\u8bed\u6599\u751f\u6210\u8bcd\u5178\uff0c\u4e2d\u6587\u8bed\u6599\u53ef\u4ee5\u901a\u8fc7\u4fee\u6539text_strip\u65b9\u6cd5\u81ea\u5b9a\u4e49\u5904\u7406\u65b9\u6cd5\u3002 1 python preprocess.py --build_dict --build_dict_corpus_dir data/text/ --dict_path data/test_build_dict \u7b2c\u4e8c\u6b65\u6839\u636e\u8bcd\u5178\u5c06\u6587\u672c\u8f6c\u6210id, \u540c\u65f6\u8fdb\u884cdownsample\uff0c\u6309\u7167\u6982\u7387\u8fc7\u6ee4\u5e38\u89c1\u8bcd, \u540c\u65f6\u751f\u6210word\u548cid\u6620\u5c04\u7684\u6587\u4ef6\uff0c\u6587\u4ef6\u540d\u4e3a\u8bcd\u5178+\" word_to_id \"\u3002 1 python preprocess.py --filter_corpus --dict_path data/test_build_dict --input_corpus_dir data/text --output_corpus_dir data/convert_text8 --min_count 5 --downsample 0 .001 \u8bad\u7ec3 # \u5177\u4f53\u7684\u53c2\u6570\u914d\u7f6e\u53ef\u8fd0\u884c 1 python train.py -h \u5355\u673a\u591a\u7ebf\u7a0b\u8bad\u7ec3 1 OPENBLAS_NUM_THREADS = 1 CPU_NUM = 5 python train.py --train_data_dir data/convert_text8 --dict_path data/test_build_dict --num_passes 10 --batch_size 100 --model_output_dir v1_cpu5_b100_lr1dir --base_lr 1 .0 --print_batch 1000 --with_speed --is_sparse \u672c\u5730\u5355\u673a\u6a21\u62df\u591a\u673a\u8bad\u7ec3 1 sh cluster_train.sh \u672c\u793a\u4f8b\u4e2d\u6309\u7167\u5355\u673a\u591a\u7ebf\u7a0b\u8bad\u7ec3\u7684\u547d\u4ee4\u8fdb\u884c\u8bad\u7ec3\uff0c\u8bad\u7ec3\u5b8c\u6bd5\u540e\uff0c\u53ef\u770b\u5230\u5728\u5f53\u524d\u6587\u4ef6\u5939\u4e0b\u4fdd\u5b58\u6a21\u578b\u7684\u8def\u5f84\u4e3a: v1_cpu5_b100_lr1dir , \u8fd0\u884c ls v1_cpu5_b100_lr1dir \u53ef\u770b\u5230\u8be5\u6587\u4ef6\u5939\u4e0b\u4fdd\u5b58\u4e86\u8bad\u7ec3\u768410\u4e2aepoch\u7684\u6a21\u578b\u6587\u4ef6\u3002 1 pass - 0 pass - 1 pass - 2 pass - 3 pass - 4 pass - 5 pass - 6 pass - 7 pass - 8 pass - 9 \u9884\u6d4b # \u6d4b\u8bd5\u96c6\u4e0b\u8f7d\u547d\u4ee4\u5982\u4e0b 1 2 3 4 #\u5168\u91cf\u6570\u636e\u96c6\u6d4b\u8bd5\u96c6 wget https://paddlerec.bj.bcebos.com/word2vec/test_dir.tar #\u6837\u672c\u6570\u636e\u96c6\u6d4b\u8bd5\u96c6 wget https://paddlerec.bj.bcebos.com/word2vec/test_mid_dir.tar \u9884\u6d4b\u547d\u4ee4\uff0c\u6ce8\u610f\u8bcd\u5178\u540d\u79f0\u9700\u8981\u52a0\u540e\u7f00\" word_to_id \", \u6b64\u6587\u4ef6\u662f\u9884\u5904\u7406\u9636\u6bb5\u751f\u6210\u7684\u3002 1 python infer.py --infer_epoch --test_dir data/test_mid_dir --dict_path data/test_build_dict_word_to_id_ --batch_size 20000 --model_dir v1_cpu5_b100_lr1dir/ --start_index 0 --last_index 9 \u8fd0\u884c\u8be5\u9884\u6d4b\u547d\u4ee4, \u53ef\u770b\u5230\u5982\u4e0b\u8f93\u51fa 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 ( 'start index: ' , 0 , ' last_index:' , 9 ) ( 'vocab_size:' , 63642 ) step : 1 249 epoch : 0 acc : 0 . 014 step : 1 590 epoch : 1 acc : 0 . 033 step : 1 982 epoch : 2 acc : 0 . 055 step : 1 1338 epoch : 3 acc : 0 . 075 step : 1 1653 epoch : 4 acc : 0 . 093 step : 1 1914 epoch : 5 acc : 0 . 107 step : 1 2204 epoch : 6 acc : 0 . 124 step : 1 2416 epoch : 7 acc : 0 . 136 step : 1 2606 epoch : 8 acc : 0 . 146 step : 1 2722 epoch : 9 acc : 0 . 153 \u91cf\u5316 \u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b # \u91cf\u5316\u914d\u7f6e\u4e3a: 1 2 3 4 config = { 'params_name' : 'emb' , 'quantize_type' : 'abs_max' } \u8fd0\u884c\u547d\u4ee4\u4e3a\uff1a 1 python infer.py --infer_epoch --test_dir data/test_mid_dir --dict_path data/test_build_dict_word_to_id_ --batch_size 20000 --model_dir v1_cpu5_b100_lr1dir/ --start_index 0 --last_index 9 --emb_quant True \u8fd0\u884c\u8f93\u51fa\u4e3a: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 ( 'start index: ' , 0 , ' last_index:' , 9 ) ( 'vocab_size:' , 63642 ) quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 253 epoch : 0 acc : 0 . 014 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 586 epoch : 1 acc : 0 . 033 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 970 epoch : 2 acc : 0 . 054 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 1364 epoch : 3 acc : 0 . 077 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 1642 epoch : 4 acc : 0 . 092 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 1936 epoch : 5 acc : 0 . 109 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2216 epoch : 6 acc : 0 . 124 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2419 epoch : 7 acc : 0 . 136 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2603 epoch : 8 acc : 0 . 146 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2719 epoch : 9 acc : 0 . 153 \u91cf\u5316\u540e\u7684\u6a21\u578b\u4fdd\u5b58\u5728 ./output_quant \u4e2d\uff0c\u53ef\u770b\u5230\u91cf\u5316\u540e\u7684\u53c2\u6570 'emb.int8' \u7684\u5927\u5c0f\u4e3a3.9M, \u5728 ./v1_cpu5_b100_lr1dir \u4e2d\u53ef\u770b\u5230\u91cf\u5316\u524d\u7684\u53c2\u6570 'emb' \u7684\u5927\u5c0f\u4e3a16M\u3002","title":"Embedding\u91cf\u5316"},{"location":"tutorials/quant_embedding_demo/#embedding","text":"\u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528Embedding\u91cf\u5316\u7684\u63a5\u53e3 paddleslim.quant.quant_embedding \u3002 quant_embedding \u63a5\u53e3\u5c06\u7f51\u7edc\u4e2d\u7684Embedding\u53c2\u6570\u4ece float32 \u7c7b\u578b\u91cf\u5316\u5230 8-bit \u6574\u6570\u7c7b\u578b\uff0c\u5728\u51e0\u4e4e\u4e0d\u635f\u5931\u6a21\u578b\u7cbe\u5ea6\u7684\u60c5\u51b5\u4e0b\u51cf\u5c11\u6a21\u578b\u7684\u5b58\u50a8\u7a7a\u95f4\u548c\u663e\u5b58\u5360\u7528\u3002 \u63a5\u53e3\u4ecb\u7ecd\u8bf7\u53c2\u8003 \u91cf\u5316API\u6587\u6863 \u3002 \u8be5\u63a5\u53e3\u5bf9program\u7684\u4fee\u6539\uff1a \u91cf\u5316\u524d: \u56fe1\uff1a\u91cf\u5316\u524d\u7684\u6a21\u578b\u7ed3\u6784 \u91cf\u5316\u540e\uff1a \u56fe2: \u91cf\u5316\u540e\u7684\u6a21\u578b\u7ed3\u6784 \u4ee5\u4e0b\u5c06\u4ee5 \u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b \u4e3a\u4f8b\u6765\u8bf4\u660e\u5982\u4f55\u4f7f\u7528 quant_embedding \u63a5\u53e3\u3002\u9996\u5148\u4ecb\u7ecd \u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b \u7684\u6b63\u5e38\u8bad\u7ec3\u548c\u6d4b\u8bd5\u6d41\u7a0b\u3002","title":"Embedding\u91cf\u5316\u793a\u4f8b"},{"location":"tutorials/quant_embedding_demo/#skip-gramword2vector","text":"\u4ee5\u4e0b\u662f\u672c\u4f8b\u7684\u7b80\u8981\u76ee\u5f55\u7ed3\u6784\u53ca\u8bf4\u660e\uff1a 1 2 3 4 5 6 7 8 9 10 . \u251c\u2500\u2500 cluster_train.py # \u5206\u5e03\u5f0f\u8bad\u7ec3\u51fd\u6570 \u251c\u2500\u2500 cluster_train.sh # \u672c\u5730\u6a21\u62df\u591a\u673a\u811a\u672c \u251c\u2500\u2500 train.py # \u8bad\u7ec3\u51fd\u6570 \u251c\u2500\u2500 infer.py # \u9884\u6d4b\u811a\u672c \u251c\u2500\u2500 net.py # \u7f51\u7edc\u7ed3\u6784 \u251c\u2500\u2500 preprocess.py # \u9884\u5904\u7406\u811a\u672c\uff0c\u5305\u62ec\u6784\u5efa\u8bcd\u5178\u548c\u9884\u5904\u7406\u6587\u672c \u251c\u2500\u2500 reader.py # \u8bad\u7ec3\u9636\u6bb5\u7684\u6587\u672c\u8bfb\u5199 \u251c\u2500\u2500 train.py # \u8bad\u7ec3\u51fd\u6570 \u2514\u2500\u2500 utils.py # \u901a\u7528\u51fd\u6570","title":"\u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b"},{"location":"tutorials/quant_embedding_demo/#_1","text":"\u672c\u4f8b\u5b9e\u73b0\u4e86skip-gram\u6a21\u5f0f\u7684word2vector\u6a21\u578b\u3002 \u540c\u65f6\u63a8\u8350\u7528\u6237\u53c2\u8003 IPython Notebook demo","title":"\u4ecb\u7ecd"},{"location":"tutorials/quant_embedding_demo/#_2","text":"\u5168\u91cf\u6570\u636e\u96c6\u4f7f\u7528\u7684\u662f\u6765\u81ea1 Billion Word Language Model Benchmark\u7684( http://www.statmt.org/lm-benchmark ) \u7684\u6570\u636e\u96c6. 1 2 3 4 mkdir data wget http://www.statmt.org/lm-benchmark/1-billion-word-language-modeling-benchmark-r13output.tar.gz tar xzvf 1 -billion-word-language-modeling-benchmark-r13output.tar.gz mv 1 -billion-word-language-modeling-benchmark-r13output/training-monolingual.tokenized.shuffled/ data/ \u5907\u7528\u6570\u636e\u5730\u5740\u4e0b\u8f7d\u547d\u4ee4\u5982\u4e0b 1 2 3 4 mkdir data wget https://paddlerec.bj.bcebos.com/word2vec/1-billion-word-language-modeling-benchmark-r13output.tar tar xvf 1 -billion-word-language-modeling-benchmark-r13output.tar mv 1 -billion-word-language-modeling-benchmark-r13output/training-monolingual.tokenized.shuffled/ data/ \u4e3a\u4e86\u65b9\u4fbf\u5feb\u901f\u9a8c\u8bc1\uff0c\u6211\u4eec\u4e5f\u63d0\u4f9b\u4e86\u7ecf\u5178\u7684text8\u6837\u4f8b\u6570\u636e\u96c6\uff0c\u5305\u542b1700w\u4e2a\u8bcd\u3002 \u4e0b\u8f7d\u547d\u4ee4\u5982\u4e0b 1 2 3 4 mkdir data wget https://paddlerec.bj.bcebos.com/word2vec/text.tar tar xvf text.tar mv text data/","title":"\u6570\u636e\u4e0b\u8f7d"},{"location":"tutorials/quant_embedding_demo/#_3","text":"\u4ee5\u6837\u4f8b\u6570\u636e\u96c6\u4e3a\u4f8b\u8fdb\u884c\u9884\u5904\u7406\u3002\u5168\u91cf\u6570\u636e\u96c6\u6ce8\u610f\u89e3\u538b\u540e\u4ee5training-monolingual.tokenized.shuffled \u76ee\u5f55\u4e3a\u9884\u5904\u7406\u76ee\u5f55\uff0c\u548c\u6837\u4f8b\u6570\u636e\u96c6\u7684text\u76ee\u5f55\u5e76\u5217\u3002 \u8bcd\u5178\u683c\u5f0f: \u8bcd<\u7a7a\u683c>\u8bcd\u9891\u3002\u6ce8\u610f\u4f4e\u9891\u8bcd\u7528'UNK'\u8868\u793a \u53ef\u4ee5\u6309\u683c\u5f0f\u81ea\u5efa\u8bcd\u5178\uff0c\u5982\u679c\u81ea\u5efa\u8bcd\u5178\u8df3\u8fc7\u7b2c\u4e00\u6b65\u3002 1 2 3 4 5 6 7 8 9 10 the 1061396 of 593677 and 416629 one 411764 in 372201 a 325873 < UNK > 324608 to 316376 zero 264975 nine 250430 \u7b2c\u4e00\u6b65\u6839\u636e\u82f1\u6587\u8bed\u6599\u751f\u6210\u8bcd\u5178\uff0c\u4e2d\u6587\u8bed\u6599\u53ef\u4ee5\u901a\u8fc7\u4fee\u6539text_strip\u65b9\u6cd5\u81ea\u5b9a\u4e49\u5904\u7406\u65b9\u6cd5\u3002 1 python preprocess.py --build_dict --build_dict_corpus_dir data/text/ --dict_path data/test_build_dict \u7b2c\u4e8c\u6b65\u6839\u636e\u8bcd\u5178\u5c06\u6587\u672c\u8f6c\u6210id, \u540c\u65f6\u8fdb\u884cdownsample\uff0c\u6309\u7167\u6982\u7387\u8fc7\u6ee4\u5e38\u89c1\u8bcd, \u540c\u65f6\u751f\u6210word\u548cid\u6620\u5c04\u7684\u6587\u4ef6\uff0c\u6587\u4ef6\u540d\u4e3a\u8bcd\u5178+\" word_to_id \"\u3002 1 python preprocess.py --filter_corpus --dict_path data/test_build_dict --input_corpus_dir data/text --output_corpus_dir data/convert_text8 --min_count 5 --downsample 0 .001","title":"\u6570\u636e\u9884\u5904\u7406"},{"location":"tutorials/quant_embedding_demo/#_4","text":"\u5177\u4f53\u7684\u53c2\u6570\u914d\u7f6e\u53ef\u8fd0\u884c 1 python train.py -h \u5355\u673a\u591a\u7ebf\u7a0b\u8bad\u7ec3 1 OPENBLAS_NUM_THREADS = 1 CPU_NUM = 5 python train.py --train_data_dir data/convert_text8 --dict_path data/test_build_dict --num_passes 10 --batch_size 100 --model_output_dir v1_cpu5_b100_lr1dir --base_lr 1 .0 --print_batch 1000 --with_speed --is_sparse \u672c\u5730\u5355\u673a\u6a21\u62df\u591a\u673a\u8bad\u7ec3 1 sh cluster_train.sh \u672c\u793a\u4f8b\u4e2d\u6309\u7167\u5355\u673a\u591a\u7ebf\u7a0b\u8bad\u7ec3\u7684\u547d\u4ee4\u8fdb\u884c\u8bad\u7ec3\uff0c\u8bad\u7ec3\u5b8c\u6bd5\u540e\uff0c\u53ef\u770b\u5230\u5728\u5f53\u524d\u6587\u4ef6\u5939\u4e0b\u4fdd\u5b58\u6a21\u578b\u7684\u8def\u5f84\u4e3a: v1_cpu5_b100_lr1dir , \u8fd0\u884c ls v1_cpu5_b100_lr1dir \u53ef\u770b\u5230\u8be5\u6587\u4ef6\u5939\u4e0b\u4fdd\u5b58\u4e86\u8bad\u7ec3\u768410\u4e2aepoch\u7684\u6a21\u578b\u6587\u4ef6\u3002 1 pass - 0 pass - 1 pass - 2 pass - 3 pass - 4 pass - 5 pass - 6 pass - 7 pass - 8 pass - 9","title":"\u8bad\u7ec3"},{"location":"tutorials/quant_embedding_demo/#_5","text":"\u6d4b\u8bd5\u96c6\u4e0b\u8f7d\u547d\u4ee4\u5982\u4e0b 1 2 3 4 #\u5168\u91cf\u6570\u636e\u96c6\u6d4b\u8bd5\u96c6 wget https://paddlerec.bj.bcebos.com/word2vec/test_dir.tar #\u6837\u672c\u6570\u636e\u96c6\u6d4b\u8bd5\u96c6 wget https://paddlerec.bj.bcebos.com/word2vec/test_mid_dir.tar \u9884\u6d4b\u547d\u4ee4\uff0c\u6ce8\u610f\u8bcd\u5178\u540d\u79f0\u9700\u8981\u52a0\u540e\u7f00\" word_to_id \", \u6b64\u6587\u4ef6\u662f\u9884\u5904\u7406\u9636\u6bb5\u751f\u6210\u7684\u3002 1 python infer.py --infer_epoch --test_dir data/test_mid_dir --dict_path data/test_build_dict_word_to_id_ --batch_size 20000 --model_dir v1_cpu5_b100_lr1dir/ --start_index 0 --last_index 9 \u8fd0\u884c\u8be5\u9884\u6d4b\u547d\u4ee4, \u53ef\u770b\u5230\u5982\u4e0b\u8f93\u51fa 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 ( 'start index: ' , 0 , ' last_index:' , 9 ) ( 'vocab_size:' , 63642 ) step : 1 249 epoch : 0 acc : 0 . 014 step : 1 590 epoch : 1 acc : 0 . 033 step : 1 982 epoch : 2 acc : 0 . 055 step : 1 1338 epoch : 3 acc : 0 . 075 step : 1 1653 epoch : 4 acc : 0 . 093 step : 1 1914 epoch : 5 acc : 0 . 107 step : 1 2204 epoch : 6 acc : 0 . 124 step : 1 2416 epoch : 7 acc : 0 . 136 step : 1 2606 epoch : 8 acc : 0 . 146 step : 1 2722 epoch : 9 acc : 0 . 153","title":"\u9884\u6d4b"},{"location":"tutorials/quant_embedding_demo/#skip-gramword2vector_1","text":"\u91cf\u5316\u914d\u7f6e\u4e3a: 1 2 3 4 config = { 'params_name' : 'emb' , 'quantize_type' : 'abs_max' } \u8fd0\u884c\u547d\u4ee4\u4e3a\uff1a 1 python infer.py --infer_epoch --test_dir data/test_mid_dir --dict_path data/test_build_dict_word_to_id_ --batch_size 20000 --model_dir v1_cpu5_b100_lr1dir/ --start_index 0 --last_index 9 --emb_quant True \u8fd0\u884c\u8f93\u51fa\u4e3a: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 ( 'start index: ' , 0 , ' last_index:' , 9 ) ( 'vocab_size:' , 63642 ) quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 253 epoch : 0 acc : 0 . 014 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 586 epoch : 1 acc : 0 . 033 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 970 epoch : 2 acc : 0 . 054 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 1364 epoch : 3 acc : 0 . 077 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 1642 epoch : 4 acc : 0 . 092 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 1936 epoch : 5 acc : 0 . 109 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2216 epoch : 6 acc : 0 . 124 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2419 epoch : 7 acc : 0 . 136 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2603 epoch : 8 acc : 0 . 146 quant_embedding config { 'quantize_type' : 'abs_max' , 'params_name' : 'emb' , 'quantize_bits' : 8 , 'dtype' : 'int8' } step : 1 2719 epoch : 9 acc : 0 . 153 \u91cf\u5316\u540e\u7684\u6a21\u578b\u4fdd\u5b58\u5728 ./output_quant \u4e2d\uff0c\u53ef\u770b\u5230\u91cf\u5316\u540e\u7684\u53c2\u6570 'emb.int8' \u7684\u5927\u5c0f\u4e3a3.9M, \u5728 ./v1_cpu5_b100_lr1dir \u4e2d\u53ef\u770b\u5230\u91cf\u5316\u524d\u7684\u53c2\u6570 'emb' \u7684\u5927\u5c0f\u4e3a16M\u3002","title":"\u91cf\u5316\u57fa\u4e8eskip-gram\u7684word2vector\u6a21\u578b"},{"location":"tutorials/quant_post_demo/","text":"\u79bb\u7ebf\u91cf\u5316\u793a\u4f8b # \u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u79bb\u7ebf\u91cf\u5316\u63a5\u53e3 paddleslim.quant.quant_post \u6765\u5bf9\u8bad\u7ec3\u597d\u7684\u5206\u7c7b\u6a21\u578b\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316, \u8be5\u63a5\u53e3\u65e0\u9700\u5bf9\u6a21\u578b\u8fdb\u884c\u8bad\u7ec3\u5c31\u53ef\u5f97\u5230\u91cf\u5316\u6a21\u578b\uff0c\u51cf\u5c11\u6a21\u578b\u7684\u5b58\u50a8\u7a7a\u95f4\u548c\u663e\u5b58\u5360\u7528\u3002 \u63a5\u53e3\u4ecb\u7ecd # \u8bf7\u53c2\u8003 \u91cf\u5316API\u6587\u6863 \u3002 \u5206\u7c7b\u6a21\u578b\u7684\u79bb\u7ebf\u91cf\u5316\u6d41\u7a0b # \u51c6\u5907\u6570\u636e # \u5728\u5f53\u524d\u6587\u4ef6\u5939\u4e0b\u521b\u5efa data \u6587\u4ef6\u5939\uff0c\u5c06 imagenet \u6570\u636e\u96c6\u89e3\u538b\u5728 data \u6587\u4ef6\u5939\u4e0b\uff0c\u89e3\u538b\u540e data \u6587\u4ef6\u5939\u4e0b\u5e94\u5305\u542b\u4ee5\u4e0b\u6587\u4ef6\uff1a - 'train' \u6587\u4ef6\u5939\uff0c\u8bad\u7ec3\u56fe\u7247 - 'train_list.txt' \u6587\u4ef6 - 'val' \u6587\u4ef6\u5939\uff0c\u9a8c\u8bc1\u56fe\u7247 - 'val_list.txt' \u6587\u4ef6 \u51c6\u5907\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b # \u56e0\u4e3a\u79bb\u7ebf\u91cf\u5316\u63a5\u53e3\u53ea\u652f\u6301\u52a0\u8f7d\u901a\u8fc7 fluid.io.save_inference_model \u63a5\u53e3\u4fdd\u5b58\u7684\u6a21\u578b\uff0c\u56e0\u6b64\u5982\u679c\u60a8\u7684\u6a21\u578b\u662f\u901a\u8fc7\u5176\u4ed6\u63a5\u53e3\u4fdd\u5b58\u7684\uff0c\u90a3\u9700\u8981\u5148\u5c06\u6a21\u578b\u8fdb\u884c\u8f6c\u5316\u3002\u672c\u793a\u4f8b\u5c06\u4ee5\u5206\u7c7b\u6a21\u578b\u4e3a\u4f8b\u8fdb\u884c\u8bf4\u660e\u3002 \u9996\u5148\u5728 imagenet\u5206\u7c7b\u6a21\u578b \u4e2d\u4e0b\u8f7d\u8bad\u7ec3\u597d\u7684 mobilenetv1 \u6a21\u578b\u3002 \u5728\u5f53\u524d\u6587\u4ef6\u5939\u4e0b\u521b\u5efa 'pretrain' \u6587\u4ef6\u5939\uff0c\u5c06 mobilenetv1 \u6a21\u578b\u5728\u8be5\u6587\u4ef6\u5939\u4e0b\u89e3\u538b\uff0c\u89e3\u538b\u540e\u7684\u76ee\u5f55\u4e3a pretrain/MobileNetV1_pretrained \u5bfc\u51fa\u6a21\u578b # \u901a\u8fc7\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u53ef\u5c06\u6a21\u578b\u8f6c\u5316\u4e3a\u79bb\u7ebf\u91cf\u5316\u63a5\u53e3\u53ef\u7528\u7684\u6a21\u578b\uff1a 1 python export_model . py --model \"MobileNet\" --pretrained_model ./pretrain/MobileNetV1_pretrained --data imagenet \u8f6c\u5316\u4e4b\u540e\u7684\u6a21\u578b\u5b58\u50a8\u5728 inference_model/MobileNet/ \u6587\u4ef6\u5939\u4e0b\uff0c\u53ef\u770b\u5230\u8be5\u6587\u4ef6\u5939\u4e0b\u6709 'model' , 'weights' \u4e24\u4e2a\u6587\u4ef6\u3002 \u79bb\u7ebf\u91cf\u5316 # \u63a5\u4e0b\u6765\u5bf9\u5bfc\u51fa\u7684\u6a21\u578b\u6587\u4ef6\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316\uff0c\u79bb\u7ebf\u91cf\u5316\u7684\u811a\u672c\u4e3a quant_post.py \uff0c\u811a\u672c\u4e2d\u4f7f\u7528\u63a5\u53e3 paddleslim.quant.quant_post \u5bf9\u6a21\u578b\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316\u3002\u8fd0\u884c\u547d\u4ee4\u4e3a\uff1a 1 python quant_post . py --model_path ./inference_model/MobileNet --save_path ./quant_model_train/MobileNet --model_filename model --params_filename weights model_path : \u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u5750\u5728\u7684\u6587\u4ef6\u5939 save_path : \u91cf\u5316\u540e\u7684\u6a21\u578b\u4fdd\u5b58\u7684\u8def\u5f84 model_filename : \u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u8bbe\u7f6e\u4e3a\u8be5\u6a21\u578b\u7684\u6a21\u578b\u6587\u4ef6\u540d\u79f0\uff0c\u5982\u679c\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u591a\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u4e0d\u9700\u8981\u8bbe\u7f6e\u3002 params_filename : \u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u8bbe\u7f6e\u4e3a\u8be5\u6a21\u578b\u7684\u53c2\u6570\u6587\u4ef6\u540d\u79f0\uff0c\u5982\u679c\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u591a\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u4e0d\u9700\u8981\u8bbe\u7f6e\u3002 \u8fd0\u884c\u4ee5\u4e0a\u547d\u4ee4\u540e\uff0c\u53ef\u5728 ${save_path} \u4e0b\u770b\u5230\u91cf\u5316\u540e\u7684\u6a21\u578b\u6587\u4ef6\u548c\u53c2\u6570\u6587\u4ef6\u3002 \u4f7f\u7528\u7684\u91cf\u5316\u7b97\u6cd5\u4e3a 'KL' , \u4f7f\u7528\u8bad\u7ec3\u96c6\u4e2d\u7684160\u5f20\u56fe\u7247\u8fdb\u884c\u91cf\u5316\u53c2\u6570\u7684\u6821\u6b63\u3002 \u6d4b\u8bd5\u7cbe\u5ea6 # \u4f7f\u7528 eval.py \u811a\u672c\u5bf9\u91cf\u5316\u524d\u540e\u7684\u6a21\u578b\u8fdb\u884c\u6d4b\u8bd5\uff0c\u5f97\u5230\u6a21\u578b\u7684\u5206\u7c7b\u7cbe\u5ea6\u8fdb\u884c\u5bf9\u6bd4\u3002 \u9996\u5148\u6d4b\u8bd5\u91cf\u5316\u524d\u7684\u6a21\u578b\u7684\u7cbe\u5ea6\uff0c\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a 1 python eval . py --model_path ./inference_model/MobileNet --model_name model --params_name weights \u7cbe\u5ea6\u8f93\u51fa\u4e3a: 1 top1_acc / top5_acc = [ 0 . 70913923 0 . 89548034 ] \u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u6d4b\u8bd5\u79bb\u7ebf\u91cf\u5316\u540e\u7684\u6a21\u578b\u7684\u7cbe\u5ea6\uff1a 1 python eval . py --model_path ./quant_model_train/MobileNet \u7cbe\u5ea6\u8f93\u51fa\u4e3a 1 top1_acc / top5_acc = [ 0 . 70141864 0 . 89086477 ] \u4ece\u4ee5\u4e0a\u7cbe\u5ea6\u5bf9\u6bd4\u53ef\u4ee5\u770b\u51fa\uff0c\u5bf9 mobilenet \u5728 imagenet \u4e0a\u7684\u5206\u7c7b\u6a21\u578b\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316\u540e top1 \u7cbe\u5ea6\u635f\u5931\u4e3a 0.77% \uff0c top5 \u7cbe\u5ea6\u635f\u5931\u4e3a 0.46% .","title":"\u79bb\u7ebf\u91cf\u5316"},{"location":"tutorials/quant_post_demo/#_1","text":"\u672c\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u79bb\u7ebf\u91cf\u5316\u63a5\u53e3 paddleslim.quant.quant_post \u6765\u5bf9\u8bad\u7ec3\u597d\u7684\u5206\u7c7b\u6a21\u578b\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316, \u8be5\u63a5\u53e3\u65e0\u9700\u5bf9\u6a21\u578b\u8fdb\u884c\u8bad\u7ec3\u5c31\u53ef\u5f97\u5230\u91cf\u5316\u6a21\u578b\uff0c\u51cf\u5c11\u6a21\u578b\u7684\u5b58\u50a8\u7a7a\u95f4\u548c\u663e\u5b58\u5360\u7528\u3002","title":"\u79bb\u7ebf\u91cf\u5316\u793a\u4f8b"},{"location":"tutorials/quant_post_demo/#_2","text":"\u8bf7\u53c2\u8003 \u91cf\u5316API\u6587\u6863 \u3002","title":"\u63a5\u53e3\u4ecb\u7ecd"},{"location":"tutorials/quant_post_demo/#_3","text":"","title":"\u5206\u7c7b\u6a21\u578b\u7684\u79bb\u7ebf\u91cf\u5316\u6d41\u7a0b"},{"location":"tutorials/quant_post_demo/#_4","text":"\u5728\u5f53\u524d\u6587\u4ef6\u5939\u4e0b\u521b\u5efa data \u6587\u4ef6\u5939\uff0c\u5c06 imagenet \u6570\u636e\u96c6\u89e3\u538b\u5728 data \u6587\u4ef6\u5939\u4e0b\uff0c\u89e3\u538b\u540e data \u6587\u4ef6\u5939\u4e0b\u5e94\u5305\u542b\u4ee5\u4e0b\u6587\u4ef6\uff1a - 'train' \u6587\u4ef6\u5939\uff0c\u8bad\u7ec3\u56fe\u7247 - 'train_list.txt' \u6587\u4ef6 - 'val' \u6587\u4ef6\u5939\uff0c\u9a8c\u8bc1\u56fe\u7247 - 'val_list.txt' \u6587\u4ef6","title":"\u51c6\u5907\u6570\u636e"},{"location":"tutorials/quant_post_demo/#_5","text":"\u56e0\u4e3a\u79bb\u7ebf\u91cf\u5316\u63a5\u53e3\u53ea\u652f\u6301\u52a0\u8f7d\u901a\u8fc7 fluid.io.save_inference_model \u63a5\u53e3\u4fdd\u5b58\u7684\u6a21\u578b\uff0c\u56e0\u6b64\u5982\u679c\u60a8\u7684\u6a21\u578b\u662f\u901a\u8fc7\u5176\u4ed6\u63a5\u53e3\u4fdd\u5b58\u7684\uff0c\u90a3\u9700\u8981\u5148\u5c06\u6a21\u578b\u8fdb\u884c\u8f6c\u5316\u3002\u672c\u793a\u4f8b\u5c06\u4ee5\u5206\u7c7b\u6a21\u578b\u4e3a\u4f8b\u8fdb\u884c\u8bf4\u660e\u3002 \u9996\u5148\u5728 imagenet\u5206\u7c7b\u6a21\u578b \u4e2d\u4e0b\u8f7d\u8bad\u7ec3\u597d\u7684 mobilenetv1 \u6a21\u578b\u3002 \u5728\u5f53\u524d\u6587\u4ef6\u5939\u4e0b\u521b\u5efa 'pretrain' \u6587\u4ef6\u5939\uff0c\u5c06 mobilenetv1 \u6a21\u578b\u5728\u8be5\u6587\u4ef6\u5939\u4e0b\u89e3\u538b\uff0c\u89e3\u538b\u540e\u7684\u76ee\u5f55\u4e3a pretrain/MobileNetV1_pretrained","title":"\u51c6\u5907\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b"},{"location":"tutorials/quant_post_demo/#_6","text":"\u901a\u8fc7\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u53ef\u5c06\u6a21\u578b\u8f6c\u5316\u4e3a\u79bb\u7ebf\u91cf\u5316\u63a5\u53e3\u53ef\u7528\u7684\u6a21\u578b\uff1a 1 python export_model . py --model \"MobileNet\" --pretrained_model ./pretrain/MobileNetV1_pretrained --data imagenet \u8f6c\u5316\u4e4b\u540e\u7684\u6a21\u578b\u5b58\u50a8\u5728 inference_model/MobileNet/ \u6587\u4ef6\u5939\u4e0b\uff0c\u53ef\u770b\u5230\u8be5\u6587\u4ef6\u5939\u4e0b\u6709 'model' , 'weights' \u4e24\u4e2a\u6587\u4ef6\u3002","title":"\u5bfc\u51fa\u6a21\u578b"},{"location":"tutorials/quant_post_demo/#_7","text":"\u63a5\u4e0b\u6765\u5bf9\u5bfc\u51fa\u7684\u6a21\u578b\u6587\u4ef6\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316\uff0c\u79bb\u7ebf\u91cf\u5316\u7684\u811a\u672c\u4e3a quant_post.py \uff0c\u811a\u672c\u4e2d\u4f7f\u7528\u63a5\u53e3 paddleslim.quant.quant_post \u5bf9\u6a21\u578b\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316\u3002\u8fd0\u884c\u547d\u4ee4\u4e3a\uff1a 1 python quant_post . py --model_path ./inference_model/MobileNet --save_path ./quant_model_train/MobileNet --model_filename model --params_filename weights model_path : \u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u5750\u5728\u7684\u6587\u4ef6\u5939 save_path : \u91cf\u5316\u540e\u7684\u6a21\u578b\u4fdd\u5b58\u7684\u8def\u5f84 model_filename : \u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u8bbe\u7f6e\u4e3a\u8be5\u6a21\u578b\u7684\u6a21\u578b\u6587\u4ef6\u540d\u79f0\uff0c\u5982\u679c\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u591a\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u4e0d\u9700\u8981\u8bbe\u7f6e\u3002 params_filename : \u5982\u679c\u9700\u8981\u91cf\u5316\u7684\u6a21\u578b\u7684\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u4e00\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u8bbe\u7f6e\u4e3a\u8be5\u6a21\u578b\u7684\u53c2\u6570\u6587\u4ef6\u540d\u79f0\uff0c\u5982\u679c\u53c2\u6570\u6587\u4ef6\u4fdd\u5b58\u5728\u591a\u4e2a\u6587\u4ef6\u4e2d\uff0c\u5219\u4e0d\u9700\u8981\u8bbe\u7f6e\u3002 \u8fd0\u884c\u4ee5\u4e0a\u547d\u4ee4\u540e\uff0c\u53ef\u5728 ${save_path} \u4e0b\u770b\u5230\u91cf\u5316\u540e\u7684\u6a21\u578b\u6587\u4ef6\u548c\u53c2\u6570\u6587\u4ef6\u3002 \u4f7f\u7528\u7684\u91cf\u5316\u7b97\u6cd5\u4e3a 'KL' , \u4f7f\u7528\u8bad\u7ec3\u96c6\u4e2d\u7684160\u5f20\u56fe\u7247\u8fdb\u884c\u91cf\u5316\u53c2\u6570\u7684\u6821\u6b63\u3002","title":"\u79bb\u7ebf\u91cf\u5316"},{"location":"tutorials/quant_post_demo/#_8","text":"\u4f7f\u7528 eval.py \u811a\u672c\u5bf9\u91cf\u5316\u524d\u540e\u7684\u6a21\u578b\u8fdb\u884c\u6d4b\u8bd5\uff0c\u5f97\u5230\u6a21\u578b\u7684\u5206\u7c7b\u7cbe\u5ea6\u8fdb\u884c\u5bf9\u6bd4\u3002 \u9996\u5148\u6d4b\u8bd5\u91cf\u5316\u524d\u7684\u6a21\u578b\u7684\u7cbe\u5ea6\uff0c\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a 1 python eval . py --model_path ./inference_model/MobileNet --model_name model --params_name weights \u7cbe\u5ea6\u8f93\u51fa\u4e3a: 1 top1_acc / top5_acc = [ 0 . 70913923 0 . 89548034 ] \u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u6d4b\u8bd5\u79bb\u7ebf\u91cf\u5316\u540e\u7684\u6a21\u578b\u7684\u7cbe\u5ea6\uff1a 1 python eval . py --model_path ./quant_model_train/MobileNet \u7cbe\u5ea6\u8f93\u51fa\u4e3a 1 top1_acc / top5_acc = [ 0 . 70141864 0 . 89086477 ] \u4ece\u4ee5\u4e0a\u7cbe\u5ea6\u5bf9\u6bd4\u53ef\u4ee5\u770b\u51fa\uff0c\u5bf9 mobilenet \u5728 imagenet \u4e0a\u7684\u5206\u7c7b\u6a21\u578b\u8fdb\u884c\u79bb\u7ebf\u91cf\u5316\u540e top1 \u7cbe\u5ea6\u635f\u5931\u4e3a 0.77% \uff0c top5 \u7cbe\u5ea6\u635f\u5931\u4e3a 0.46% .","title":"\u6d4b\u8bd5\u7cbe\u5ea6"},{"location":"tutorials/sensitivity_demo/","text":"\u8be5\u793a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u5206\u6790\u5377\u79ef\u7f51\u7edc\u4e2d\u5404\u5377\u79ef\u5c42\u7684\u654f\u611f\u5ea6\uff0c\u4ee5\u53ca\u5982\u4f55\u6839\u636e\u8ba1\u7b97\u51fa\u7684\u654f\u611f\u5ea6\u9009\u62e9\u4e00\u7ec4\u5408\u9002\u7684\u526a\u88c1\u7387\u3002 \u8be5\u793a\u4f8b\u9ed8\u8ba4\u4f1a\u81ea\u52a8\u4e0b\u8f7d\u5e76\u4f7f\u7528MNIST\u6570\u636e\u3002\u652f\u6301\u4ee5\u4e0b\u6a21\u578b\uff1a MobileNetV1 MobileNetV2 ResNet50 1. \u63a5\u53e3\u4ecb\u7ecd # \u8be5\u793a\u4f8b\u6d89\u53ca\u4ee5\u4e0b\u63a5\u53e3\uff1a paddleslim.prune.sensitivity paddleslim.prune.merge_sensitive paddleslim.prune.get_ratios_by_loss 2. \u8fd0\u884c\u793a\u4f8b # \u5728\u8def\u5f84 PaddleSlim/demo/sensitive \u4e0b\u6267\u884c\u4ee5\u4e0b\u4ee3\u7801\u8fd0\u884c\u793a\u4f8b\uff1a 1 2 export CUDA_VISIBLE_DEVICES = 0 python train . py --model \"MobileNetV1\" \u901a\u8fc7 python train.py --help \u67e5\u770b\u66f4\u591a\u9009\u9879\u3002 3. \u91cd\u8981\u6b65\u9aa4\u8bf4\u660e # 3.1 \u8ba1\u7b97\u654f\u611f\u5ea6 # \u8ba1\u7b97\u654f\u611f\u5ea6\u4e4b\u524d\uff0c\u7528\u6237\u9700\u8981\u642d\u5efa\u597d\u7528\u4e8e\u6d4b\u8bd5\u7684\u7f51\u7edc\uff0c\u4ee5\u53ca\u5b9e\u73b0\u8bc4\u4f30\u6a21\u578b\u7cbe\u5ea6\u7684\u56de\u8c03\u51fd\u6570\u3002 \u8c03\u7528 paddleslim.prune.sensitivity \u63a5\u53e3\u8ba1\u7b97\u654f\u611f\u5ea6\u3002\u654f\u611f\u5ea6\u4fe1\u606f\u4f1a\u8ffd\u52a0\u5230 sensitivities_file \u9009\u9879\u6240\u6307\u5b9a\u7684\u6587\u4ef6\u4e2d\uff0c\u5982\u679c\u9700\u8981\u91cd\u65b0\u8ba1\u7b97\u654f\u611f\u5ea6\uff0c\u9700\u8981\u5148\u5220\u9664 sensitivities_file \u6587\u4ef6\u3002 \u5982\u679c\u6a21\u578b\u8bc4\u4f30\u901f\u5ea6\u8f83\u6162\uff0c\u53ef\u4ee5\u901a\u8fc7\u591a\u8fdb\u7a0b\u7684\u65b9\u5f0f\u52a0\u901f\u654f\u611f\u5ea6\u8ba1\u7b97\u8fc7\u7a0b\u3002\u6bd4\u5982\u5728\u8fdb\u7a0b1\u4e2d\u8bbe\u7f6e pruned_ratios=[0.1, 0.2, 0.3, 0.4] \uff0c\u5e76\u5c06\u654f\u611f\u5ea6\u4fe1\u606f\u5b58\u653e\u5728\u6587\u4ef6 sensitivities_0.data \u4e2d\uff0c\u7136\u540e\u5728\u8fdb\u7a0b2\u4e2d\u8bbe\u7f6e pruned_ratios=[0.5, 0.6, 0.7] \uff0c\u5e76\u5c06\u654f\u611f\u5ea6\u4fe1\u606f\u5b58\u50a8\u5728\u6587\u4ef6 sensitivities_1.data \u4e2d\u3002\u8fd9\u6837\u6bcf\u4e2a\u8fdb\u7a0b\u53ea\u4f1a\u8ba1\u7b97\u6307\u5b9a\u526a\u5207\u7387\u4e0b\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u3002\u591a\u8fdb\u7a0b\u53ef\u4ee5\u8fd0\u884c\u5728\u5355\u673a\u591a\u5361\uff0c\u6216\u591a\u673a\u591a\u5361\u3002 \u4ee3\u7801\u5982\u4e0b\uff1a 1 2 3 4 5 6 7 8 # \u8fdb\u7a0b 1 sensitivity ( val_program , place , params , test , sensitivities_file = \"sensitivities_0.data\" , pruned_ratios = [ 0 . 1 , 0 . 2 , 0 . 3 , 0 . 4 ]) 1 2 3 4 5 6 7 8 # \u8fdb\u7a0b 2 sensitivity ( val_program , place , params , test , sensitivities_file = \"sensitivities_1.data\" , pruned_ratios = [ 0 . 5 , 0 . 6 , 0 . 7 ]) 3.2 \u5408\u5e76\u654f\u611f\u5ea6 # \u5982\u679c\u7528\u6237\u901a\u8fc7\u4e0a\u4e00\u8282\u591a\u8fdb\u7a0b\u7684\u65b9\u5f0f\u751f\u6210\u4e86\u591a\u4e2a\u5b58\u50a8\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u6587\u4ef6\uff0c\u53ef\u4ee5\u901a\u8fc7 paddleslim.prune.merge_sensitive \u5c06\u5176\u5408\u5e76\uff0c\u5408\u5e76\u540e\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u5b58\u50a8\u5728\u4e00\u4e2a dict \u4e2d\u3002\u4ee3\u7801\u5982\u4e0b\uff1a 1 sens = merge_sensitive ([ \"./sensitivities_0.data\" , \"./sensitivities_1.data\" ]) 3.3 \u8ba1\u7b97\u526a\u88c1\u7387 # \u8c03\u7528 paddleslim.prune.get_ratios_by_loss \u63a5\u53e3\u8ba1\u7b97\u4e00\u7ec4\u526a\u88c1\u7387\u3002 1 ratios = get_ratios_by_loss ( sens , 0 . 01 ) \u5176\u4e2d\uff0c 0.01 \u4e3a\u4e00\u4e2a\u9608\u503c\uff0c\u5bf9\u4e8e\u4efb\u610f\u5377\u79ef\u5c42\uff0c\u5176\u526a\u88c1\u7387\u4e3a\u4f7f\u7cbe\u5ea6\u635f\u5931\u4f4e\u4e8e\u9608\u503c 0.01 \u7684\u6700\u5927\u526a\u88c1\u7387\u3002 \u7528\u6237\u5728\u8ba1\u7b97\u51fa\u4e00\u7ec4\u526a\u88c1\u7387\u4e4b\u540e\u53ef\u4ee5\u901a\u8fc7\u63a5\u53e3 paddleslim.prune.Pruner \u526a\u88c1\u7f51\u7edc\uff0c\u5e76\u7528\u63a5\u53e3 paddleslim.analysis.flops \u8ba1\u7b97 FLOPs \u3002\u5982\u679c FLOPs \u4e0d\u6ee1\u8db3\u8981\u6c42\uff0c\u8c03\u6574\u9608\u503c\u91cd\u65b0\u8ba1\u7b97\u51fa\u4e00\u7ec4\u526a\u88c1\u7387\u3002","title":"Sensitivity demo"},{"location":"tutorials/sensitivity_demo/#1","text":"\u8be5\u793a\u4f8b\u6d89\u53ca\u4ee5\u4e0b\u63a5\u53e3\uff1a paddleslim.prune.sensitivity paddleslim.prune.merge_sensitive paddleslim.prune.get_ratios_by_loss","title":"1. \u63a5\u53e3\u4ecb\u7ecd"},{"location":"tutorials/sensitivity_demo/#2","text":"\u5728\u8def\u5f84 PaddleSlim/demo/sensitive \u4e0b\u6267\u884c\u4ee5\u4e0b\u4ee3\u7801\u8fd0\u884c\u793a\u4f8b\uff1a 1 2 export CUDA_VISIBLE_DEVICES = 0 python train . py --model \"MobileNetV1\" \u901a\u8fc7 python train.py --help \u67e5\u770b\u66f4\u591a\u9009\u9879\u3002","title":"2. \u8fd0\u884c\u793a\u4f8b"},{"location":"tutorials/sensitivity_demo/#3","text":"","title":"3. \u91cd\u8981\u6b65\u9aa4\u8bf4\u660e"},{"location":"tutorials/sensitivity_demo/#31","text":"\u8ba1\u7b97\u654f\u611f\u5ea6\u4e4b\u524d\uff0c\u7528\u6237\u9700\u8981\u642d\u5efa\u597d\u7528\u4e8e\u6d4b\u8bd5\u7684\u7f51\u7edc\uff0c\u4ee5\u53ca\u5b9e\u73b0\u8bc4\u4f30\u6a21\u578b\u7cbe\u5ea6\u7684\u56de\u8c03\u51fd\u6570\u3002 \u8c03\u7528 paddleslim.prune.sensitivity \u63a5\u53e3\u8ba1\u7b97\u654f\u611f\u5ea6\u3002\u654f\u611f\u5ea6\u4fe1\u606f\u4f1a\u8ffd\u52a0\u5230 sensitivities_file \u9009\u9879\u6240\u6307\u5b9a\u7684\u6587\u4ef6\u4e2d\uff0c\u5982\u679c\u9700\u8981\u91cd\u65b0\u8ba1\u7b97\u654f\u611f\u5ea6\uff0c\u9700\u8981\u5148\u5220\u9664 sensitivities_file \u6587\u4ef6\u3002 \u5982\u679c\u6a21\u578b\u8bc4\u4f30\u901f\u5ea6\u8f83\u6162\uff0c\u53ef\u4ee5\u901a\u8fc7\u591a\u8fdb\u7a0b\u7684\u65b9\u5f0f\u52a0\u901f\u654f\u611f\u5ea6\u8ba1\u7b97\u8fc7\u7a0b\u3002\u6bd4\u5982\u5728\u8fdb\u7a0b1\u4e2d\u8bbe\u7f6e pruned_ratios=[0.1, 0.2, 0.3, 0.4] \uff0c\u5e76\u5c06\u654f\u611f\u5ea6\u4fe1\u606f\u5b58\u653e\u5728\u6587\u4ef6 sensitivities_0.data \u4e2d\uff0c\u7136\u540e\u5728\u8fdb\u7a0b2\u4e2d\u8bbe\u7f6e pruned_ratios=[0.5, 0.6, 0.7] \uff0c\u5e76\u5c06\u654f\u611f\u5ea6\u4fe1\u606f\u5b58\u50a8\u5728\u6587\u4ef6 sensitivities_1.data \u4e2d\u3002\u8fd9\u6837\u6bcf\u4e2a\u8fdb\u7a0b\u53ea\u4f1a\u8ba1\u7b97\u6307\u5b9a\u526a\u5207\u7387\u4e0b\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u3002\u591a\u8fdb\u7a0b\u53ef\u4ee5\u8fd0\u884c\u5728\u5355\u673a\u591a\u5361\uff0c\u6216\u591a\u673a\u591a\u5361\u3002 \u4ee3\u7801\u5982\u4e0b\uff1a 1 2 3 4 5 6 7 8 # \u8fdb\u7a0b 1 sensitivity ( val_program , place , params , test , sensitivities_file = \"sensitivities_0.data\" , pruned_ratios = [ 0 . 1 , 0 . 2 , 0 . 3 , 0 . 4 ]) 1 2 3 4 5 6 7 8 # \u8fdb\u7a0b 2 sensitivity ( val_program , place , params , test , sensitivities_file = \"sensitivities_1.data\" , pruned_ratios = [ 0 . 5 , 0 . 6 , 0 . 7 ])","title":"3.1 \u8ba1\u7b97\u654f\u611f\u5ea6"},{"location":"tutorials/sensitivity_demo/#32","text":"\u5982\u679c\u7528\u6237\u901a\u8fc7\u4e0a\u4e00\u8282\u591a\u8fdb\u7a0b\u7684\u65b9\u5f0f\u751f\u6210\u4e86\u591a\u4e2a\u5b58\u50a8\u654f\u611f\u5ea6\u4fe1\u606f\u7684\u6587\u4ef6\uff0c\u53ef\u4ee5\u901a\u8fc7 paddleslim.prune.merge_sensitive \u5c06\u5176\u5408\u5e76\uff0c\u5408\u5e76\u540e\u7684\u654f\u611f\u5ea6\u4fe1\u606f\u5b58\u50a8\u5728\u4e00\u4e2a dict \u4e2d\u3002\u4ee3\u7801\u5982\u4e0b\uff1a 1 sens = merge_sensitive ([ \"./sensitivities_0.data\" , \"./sensitivities_1.data\" ])","title":"3.2 \u5408\u5e76\u654f\u611f\u5ea6"},{"location":"tutorials/sensitivity_demo/#33","text":"\u8c03\u7528 paddleslim.prune.get_ratios_by_loss \u63a5\u53e3\u8ba1\u7b97\u4e00\u7ec4\u526a\u88c1\u7387\u3002 1 ratios = get_ratios_by_loss ( sens , 0 . 01 ) \u5176\u4e2d\uff0c 0.01 \u4e3a\u4e00\u4e2a\u9608\u503c\uff0c\u5bf9\u4e8e\u4efb\u610f\u5377\u79ef\u5c42\uff0c\u5176\u526a\u88c1\u7387\u4e3a\u4f7f\u7cbe\u5ea6\u635f\u5931\u4f4e\u4e8e\u9608\u503c 0.01 \u7684\u6700\u5927\u526a\u88c1\u7387\u3002 \u7528\u6237\u5728\u8ba1\u7b97\u51fa\u4e00\u7ec4\u526a\u88c1\u7387\u4e4b\u540e\u53ef\u4ee5\u901a\u8fc7\u63a5\u53e3 paddleslim.prune.Pruner \u526a\u88c1\u7f51\u7edc\uff0c\u5e76\u7528\u63a5\u53e3 paddleslim.analysis.flops \u8ba1\u7b97 FLOPs \u3002\u5982\u679c FLOPs \u4e0d\u6ee1\u8db3\u8981\u6c42\uff0c\u8c03\u6574\u9608\u503c\u91cd\u65b0\u8ba1\u7b97\u51fa\u4e00\u7ec4\u526a\u88c1\u7387\u3002","title":"3.3 \u8ba1\u7b97\u526a\u88c1\u7387"}]} \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index 7a9c2ec508caf6961d5042f3d9e424d0b0f14845..a064298c8792d62dce9cb5cc86773d83b7b3532b 100644 Binary files a/sitemap.xml.gz and b/sitemap.xml.gz differ
Model压缩方法 数据集 Image/GPU 输入608 mAP
MobileNet-V1-YOLOv3- Pasacl VOC 8 76.2下载链接
MobileNet-V1-YOLOv3 prune xx%MobileNet-V1-YOLOv3uniform -xx% Pasacl VOC 8 xx
MobileNet-V1-YOLOv3- COCO 8 29.3下载链接
MobileNet-V1-YOLOv3 prune xx%MobileNet-V1-YOLOv3uniform -xx% COCO 8 xx
R50-dcn-YOLOv3- COCO 8 41.4下载链接
R50-dcn-YOLOv3 prune xx%R50-dcn-YOLOv3uniform -xx% COCO 8 xx
Model压缩方法 mIoU 模型大小(MB) FLOPs
DeepLabv3+/MobileNetv2- 69.81 xx xx 下载链接
DeepLabv3+/MobileNetv2 prune xx%DeepLabv3+/MobileNetv2prune -xx% xx xx xx
Model蒸馏 teacher baseline 下载
MobileNetV1- 70.99%/89.68% 下载链接
MobileNetV1 distilled (teacher: ResNet50_vd1)MobileNetV1ResNet50_vd1 72.79%/90.69% 下载链接
MobileNetV2- 72.15%/90.65% 下载链接
MobileNetV2 distilled (teacher: ResNet50_vd)MobileNetV2ResNet50_vd1 74.30%/91.52% 下载链接
ResNet50- 76.50%/93.00% 下载链接
ResNet50 distilled (teacher: ResNet1012)ResNet50ResNet1012 77.40%/93.48% 下载链接
Model蒸馏 teacher 数据集 Image/GPU 输入640 mAP
MobileNet-V1-YOLOv3- Pasacl VOC 16 76.2下载链接
MobileNet-V1-YOLOv3 distilled (teacher: ResNet34-YOLOv3-VOC3)MobileNet-V1-YOLOv3ResNet34-YOLOv3-VOC3 Pasacl VOC 16 xx
MobileNet-V1-YOLOv3- COCO 16 29.3下载链接
MobileNet-V1-YOLOv3 distilled (teacher: ResNet34-YOLOv3-COCO4)MobileNet-V1-YOLOv3ResNet34-YOLOv3-COCO4 COCO 16 xx