Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
d6442df6
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d6442df6
编写于
5月 04, 2022
作者:
G
Guanghua Yu
提交者:
GitHub
5月 04, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support fuse conv and bn in QAT (#42255)
上级
b621a4f1
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
88 addition
and
1 deletion
+88
-1
python/paddle/fluid/contrib/slim/quantization/imperative/fuse_utils.py
.../fluid/contrib/slim/quantization/imperative/fuse_utils.py
+21
-0
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
.../paddle/fluid/contrib/slim/quantization/imperative/qat.py
+10
-0
python/paddle/fluid/contrib/slim/tests/CMakeLists.txt
python/paddle/fluid/contrib/slim/tests/CMakeLists.txt
+1
-0
python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py
...on/paddle/fluid/contrib/slim/tests/test_imperative_qat.py
+4
-1
python/paddle/fluid/contrib/slim/tests/test_imperative_qat_channelwise.py
...uid/contrib/slim/tests/test_imperative_qat_channelwise.py
+2
-0
python/paddle/fluid/contrib/slim/tests/test_imperative_qat_fuse.py
...ddle/fluid/contrib/slim/tests/test_imperative_qat_fuse.py
+50
-0
未找到文件。
python/paddle/fluid/contrib/slim/quantization/imperative/fuse_utils.py
浏览文件 @
d6442df6
...
...
@@ -28,6 +28,27 @@ class Identity(nn.Layer):
return
input
def
fuse_conv_bn
(
model
):
is_train
=
False
if
model
.
training
:
model
.
eval
()
is_train
=
True
fuse_list
=
[]
tmp_pair
=
[
None
,
None
]
for
name
,
layer
in
model
.
named_sublayers
():
if
isinstance
(
layer
,
nn
.
Conv2D
):
tmp_pair
[
0
]
=
name
if
isinstance
(
layer
,
nn
.
BatchNorm2D
):
tmp_pair
[
1
]
=
name
if
tmp_pair
[
0
]
and
tmp_pair
[
1
]
and
len
(
tmp_pair
)
==
2
:
fuse_list
.
append
(
tmp_pair
)
tmp_pair
=
[
None
,
None
]
model
=
fuse_layers
(
model
,
fuse_list
)
if
is_train
:
model
.
train
()
def
fuse_layers
(
model
,
layers_to_fuse
,
inplace
=
False
):
'''
fuse layers in layers_to_fuse
...
...
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
浏览文件 @
d6442df6
...
...
@@ -20,6 +20,7 @@ import os
import
warnings
import
paddle
import
paddle.nn
as
nn
import
paddle.nn.quant.quant_layers
as
quant_layers
from
paddle.fluid
import
dygraph
,
core
,
framework
,
unique_name
from
paddle.fluid.framework
import
IrGraph
...
...
@@ -32,6 +33,7 @@ from ..quantization_pass import ReplaceFakeQuantDequantPass, QuantWeightPass
from
paddle.fluid.log_helper
import
get_logger
from
..
import
quantization_pass
from
.
import
utils
from
.
import
fuse_utils
__all__
=
[
'ImperativeQuantAware'
]
...
...
@@ -52,6 +54,7 @@ class ImperativeQuantAware(object):
weight_bits
=
8
,
activation_bits
=
8
,
moving_rate
=
0.9
,
fuse_conv_bn
=
False
,
weight_preprocess_layer
=
None
,
act_preprocess_layer
=
None
,
weight_quantize_layer
=
None
,
...
...
@@ -76,6 +79,7 @@ class ImperativeQuantAware(object):
activation_bits(int): quantization bit number for activations.
moving_rate(float): the parameter for 'moving_average_abs_max'
quantization.
fuse_conv_bn(bool): Whether to fuse conv and bn, default is False.
weight_preprocess_layer(paddle.nn.Layer, optional): A paddle
Layer that defines how to preprocess weight before quantization.
Using this can quickly test if user's preprocess method works
...
...
@@ -188,6 +192,7 @@ class ImperativeQuantAware(object):
model_path="./imperative_model_qat")
"""
super
(
ImperativeQuantAware
,
self
).
__init__
()
self
.
fuse_conv_bn
=
fuse_conv_bn
kwargs
=
{
"quantizable_layer_type"
:
quantizable_layer_type
,
...
...
@@ -256,8 +261,13 @@ class ImperativeQuantAware(object):
"""
assert
isinstance
(
model
,
dygraph
.
Layer
),
\
"The model must be the instance of dygraph.Layer."
if
self
.
fuse_conv_bn
:
fuse_utils
.
fuse_conv_bn
(
model
)
self
.
_quantize_inputs
.
apply
(
model
)
self
.
_quantize_outputs
.
apply
(
model
)
return
model
def
save_quantized_model
(
self
,
layer
,
path
,
input_spec
=
None
,
**
config
):
self
.
_quantize_outputs
.
save_quantized_model
(
layer
,
path
,
input_spec
,
...
...
python/paddle/fluid/contrib/slim/tests/CMakeLists.txt
浏览文件 @
d6442df6
...
...
@@ -354,6 +354,7 @@ set_tests_properties(test_quantization_pass PROPERTIES TIMEOUT 120)
set_tests_properties
(
test_imperative_qat_channelwise PROPERTIES TIMEOUT 200
)
set_tests_properties
(
test_user_defined_quantization PROPERTIES TIMEOUT 200
)
set_tests_properties
(
test_imperative_qat PROPERTIES TIMEOUT 200
)
set_tests_properties
(
test_imperative_qat_fuse PROPERTIES TIMEOUT 200
)
set_tests_properties
(
test_imperative_out_scale PROPERTIES TIMEOUT 200
)
set_tests_properties
(
test_imperative_qat_user_defined PROPERTIES TIMEOUT 200
)
...
...
python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py
浏览文件 @
d6442df6
...
...
@@ -56,13 +56,15 @@ class TestImperativeQat(unittest.TestCase):
self
.
onnx_format
=
False
self
.
check_export_model_accuracy
=
True
self
.
diff_threshold
=
0.01
self
.
fuse_conv_bn
=
False
def
func_qat
(
self
):
self
.
set_vars
()
imperative_qat
=
ImperativeQuantAware
(
weight_quantize_type
=
self
.
weight_quantize_type
,
activation_quantize_type
=
self
.
activation_quantize_type
)
activation_quantize_type
=
self
.
activation_quantize_type
,
fuse_conv_bn
=
self
.
fuse_conv_bn
)
with
fluid
.
dygraph
.
guard
():
# For CI coverage
...
...
@@ -214,6 +216,7 @@ class TestImperativeQatONNXFormat(unittest.TestCase):
self
.
activation_quantize_type
=
'moving_average_abs_max'
self
.
onnx_format
=
True
self
.
diff_threshold
=
0.025
self
.
fuse_conv_bn
=
False
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/contrib/slim/tests/test_imperative_qat_channelwise.py
浏览文件 @
d6442df6
...
...
@@ -43,6 +43,7 @@ class TestImperativeQatChannelWise(TestImperativeQat):
self
.
activation_quantize_type
=
'moving_average_abs_max'
self
.
diff_threshold
=
0.01
self
.
onnx_format
=
False
self
.
fuse_conv_bn
=
False
print
(
'weight_quantize_type'
,
self
.
weight_quantize_type
)
...
...
@@ -52,6 +53,7 @@ class TestImperativeQatChannelWiseONNXFormat(TestImperativeQat):
self
.
activation_quantize_type
=
'moving_average_abs_max'
self
.
onnx_format
=
True
self
.
diff_threshold
=
0.025
self
.
fuse_conv_bn
=
False
print
(
'weight_quantize_type'
,
self
.
weight_quantize_type
)
...
...
python/paddle/fluid/contrib/slim/tests/test_imperative_qat_fuse.py
0 → 100644
浏览文件 @
d6442df6
# copyright (c) 2018 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
from
__future__
import
print_function
import
os
import
numpy
as
np
import
random
import
unittest
import
logging
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.log_helper
import
get_logger
from
test_imperative_qat
import
TestImperativeQat
paddle
.
enable_static
()
os
.
environ
[
"CPU_NUM"
]
=
"1"
if
core
.
is_compiled_with_cuda
():
fluid
.
set_flags
({
"FLAGS_cudnn_deterministic"
:
True
})
_logger
=
get_logger
(
__name__
,
logging
.
INFO
,
fmt
=
'%(asctime)s-%(levelname)s: %(message)s'
)
class
TestImperativeQatfuseBN
(
TestImperativeQat
):
def
set_vars
(
self
):
self
.
weight_quantize_type
=
'abs_max'
self
.
activation_quantize_type
=
'moving_average_abs_max'
self
.
diff_threshold
=
0.01
self
.
onnx_format
=
False
self
.
fuse_conv_bn
=
True
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录