Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
7ced3017
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
7ced3017
编写于
3月 15, 2022
作者:
G
Guanghua Yu
提交者:
GitHub
3月 15, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Support some ops for full quantization (#40083)
* add some op for full_quantization
上级
31729a62
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
182 addition
and
13 deletion
+182
-13
python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py
...d/contrib/slim/quantization/post_training_quantization.py
+0
-2
python/paddle/fluid/contrib/slim/quantization/quantization_pass.py
...ddle/fluid/contrib/slim/quantization/quantization_pass.py
+182
-11
未找到文件。
python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py
浏览文件 @
7ced3017
...
@@ -979,8 +979,6 @@ class PostTrainingQuantization(object):
...
@@ -979,8 +979,6 @@ class PostTrainingQuantization(object):
if
op
.
type
in
(
if
op
.
type
in
(
self
.
_quantizable_op_type
+
self
.
_out_scale_op_list
):
self
.
_quantizable_op_type
+
self
.
_out_scale_op_list
):
out_var_names
=
_get_op_output_var_names
(
op
)
out_var_names
=
_get_op_output_var_names
(
op
)
assert
len
(
out_var_names
)
==
1
,
"Post training "
+
\
"quantization only support one output for "
+
op
.
type
for
var_name
in
out_var_names
:
for
var_name
in
out_var_names
:
analysis_and_save_info
(
op
,
var_name
)
analysis_and_save_info
(
op
,
var_name
)
...
...
python/paddle/fluid/contrib/slim/quantization/quantization_pass.py
浏览文件 @
7ced3017
...
@@ -59,6 +59,7 @@ _out_scale_op_list = [
...
@@ -59,6 +59,7 @@ _out_scale_op_list = [
"tanh"
,
"tanh"
,
"prelu"
,
"prelu"
,
"swish"
,
"swish"
,
"dropout"
,
"softmax"
,
"softmax"
,
"batch_norm"
,
"batch_norm"
,
"layer_norm"
,
"layer_norm"
,
...
@@ -68,6 +69,8 @@ _out_scale_op_list = [
...
@@ -68,6 +69,8 @@ _out_scale_op_list = [
"transpose2"
,
"transpose2"
,
"concat"
,
"concat"
,
"elementwise_mul"
,
"elementwise_mul"
,
"elementwise_pow"
,
"elementwise_sub"
,
"scale"
,
"scale"
,
"slice"
,
"slice"
,
"hard_swish"
,
"hard_swish"
,
...
@@ -81,8 +84,54 @@ _out_scale_op_list = [
...
@@ -81,8 +84,54 @@ _out_scale_op_list = [
"flatten2"
,
"flatten2"
,
"transpose"
,
"transpose"
,
"pad2d"
,
"pad2d"
,
"pad3d"
,
"reshape"
,
"reshape"
,
"layer_norm"
,
"split"
,
"flatten_contiguous_range"
,
"squeeze"
,
"squeeze2"
,
"nearest_interp_v2"
,
"fill_constant_batch_size_like"
,
"bilinear_interp"
,
"bilinear_interp_v2"
,
"arg_max"
,
"abs"
,
"assign"
,
"cast"
,
"clip"
,
"box_coder"
,
"crop"
,
"cumsum"
,
"equal"
,
"expand_v2"
,
"fill_any_like"
,
"fill_constant"
,
"gelu"
,
"instance_norm"
,
"lookup_table"
,
"lookup_table_v2"
,
"norm"
,
"p_norm"
,
"pow"
,
"reduce_mean"
,
"stack"
,
"top_k_v2"
,
"unsqueeze"
,
"unsqueeze2"
,
"logical_and"
,
"logical_not"
,
"meshgrid"
,
"roi_align"
,
"strided_slice"
,
"where"
,
"grid_sampler"
,
"tile"
,
"group_norm"
,
"reduce_sum"
,
"square"
,
"softplus"
,
"gather"
,
"shuffle_channel"
,
]
]
# list op real input and output names, to avoid processing input such as AxisTensor.
# list op real input and output names, to avoid processing input such as AxisTensor.
...
@@ -119,7 +168,7 @@ _op_real_in_out_name = {
...
@@ -119,7 +168,7 @@ _op_real_in_out_name = {
"relu"
:
[[
"X"
],
[
"Out"
]],
"relu"
:
[[
"X"
],
[
"Out"
]],
"relu6"
:
[[
"X"
],
[
"Out"
]],
"relu6"
:
[[
"X"
],
[
"Out"
]],
"leaky_relu"
:
[[
"X"
],
[
"Out"
]],
"leaky_relu"
:
[[
"X"
],
[
"Out"
]],
"prelu"
:
[[
"X"
],
[
"Out"
]],
"prelu"
:
[[
"X"
,
"Alpha"
],
[
"Out"
]],
"tanh"
:
[[
"X"
],
[
"Out"
]],
"tanh"
:
[[
"X"
],
[
"Out"
]],
"swish"
:
[[
"X"
],
[
"Out"
]],
"swish"
:
[[
"X"
],
[
"Out"
]],
"dropout"
:
[[
"X"
],
[
"Out"
]],
"dropout"
:
[[
"X"
],
[
"Out"
]],
...
@@ -127,16 +176,59 @@ _op_real_in_out_name = {
...
@@ -127,16 +176,59 @@ _op_real_in_out_name = {
"layer_norm"
:
[[
"X"
],
[
"Y"
]],
"layer_norm"
:
[[
"X"
],
[
"Y"
]],
"sigmoid"
:
[[
"X"
],
[
"Out"
]],
"sigmoid"
:
[[
"X"
],
[
"Out"
]],
"elementwise_mul"
:
[[
"X"
,
"Y"
],
[
"Out"
]],
"elementwise_mul"
:
[[
"X"
,
"Y"
],
[
"Out"
]],
"elementwise_pow"
:
[[
"X"
,
"Y"
],
[
"Out"
]],
"scale"
:
[[
"X"
],
[
"Out"
]],
"scale"
:
[[
"X"
],
[
"Out"
]],
"hard_swish"
:
[[
"X"
],
[
"Out"
]],
"hard_swish"
:
[[
"X"
],
[
"Out"
]],
"hard_sigmoid"
:
[[
"X"
],
[
"Out"
]],
"hard_sigmoid"
:
[[
"X"
],
[
"Out"
]],
"gru"
:
[[
"Input"
,
"Weight"
],
[
"Hidden"
]],
"gru"
:
[[
"Input"
,
"Weight"
],
[
"Hidden"
]],
"lstm"
:
[[
"Input"
,
"Weight"
],
[
"Hidden"
]],
"lstm"
:
[[
"Input"
,
"Weight"
],
[
"Hidden"
]],
"pad2d"
:
[[
"X"
],
[
"Out"
]],
"pad2d"
:
[[
"X"
],
[
"Out"
]],
"pad3d"
:
[[
"X"
],
[
"Out"
]],
"flatten"
:
[[
"X"
],
[
"Out"
]],
"flatten"
:
[[
"X"
],
[
"Out"
]],
"flatten2"
:
[[
"X"
],
[
"Out"
]],
"flatten2"
:
[[
"X"
],
[
"Out"
]],
"unsqueeze2"
:
[[
"X"
],
[
"Out"
]],
"unsqueeze2"
:
[[
"X"
],
[
"Out"
]],
"flatten_contiguous_range"
:
[[
'X'
],
[
"Out"
]],
"unsqueeze2"
:
[[
"X"
],
[
"Out"
]],
"flatten_contiguous_range"
:
[[
"X"
],
[
"Out"
]],
"split"
:
[[
"X"
],
[
"Out"
]],
"squeeze2"
:
[[
"X"
],
[
"Out"
]],
"nearest_interp_v2"
:
[[
"X"
],
[
"Out"
]],
"bilinear_interp"
:
[[
"X"
],
[
"Out"
]],
"bilinear_interp_v2"
:
[[
"X"
],
[
"Out"
]],
"fill_constant_batch_size_like"
:
[[
"Input"
],
[
"Out"
]],
"arg_max"
:
[[
"X"
],
[
"Out"
]],
"abs"
:
[[
"X"
],
[
"Out"
]],
"assign"
:
[[
"X"
],
[
"Out"
]],
"cast"
:
[[
"X"
],
[
"Out"
]],
"clip"
:
[[
"X"
],
[
"Out"
]],
"box_coder"
:
[[
"PriorBox"
],
[
"OutputBox"
]],
"crop"
:
[[
"X"
],
[
"Out"
]],
"cumsum"
:
[[
"X"
],
[
"Out"
]],
"expand_v2"
:
[[
"X"
],
[
"Out"
]],
"fill_any_like"
:
[[
"X"
],
[
"Out"
]],
"fill_constant"
:
[[],
[
"Out"
]],
"gelu"
:
[[
"X"
],
[
"Out"
]],
"instance_norm"
:
[[
"X"
],
[
"Out"
]],
"lookup_table"
:
[[
"W"
,
"Ids"
],
[
"Out"
]],
"lookup_table_v2"
:
[[
"W"
,
"Ids"
],
[
"Out"
]],
"norm"
:
[[
"X"
],
[
"Norm"
]],
"p_norm"
:
[[
"X"
],
[
"Out"
]],
"pow"
:
[[
"X"
],
[
"Out"
]],
"reduce_mean"
:
[[
"X"
],
[
"Out"
]],
"stack"
:
[[
"X"
],
[
"Y"
]],
"top_k_v2"
:
[[
"X"
],
[
"Out"
,
"Indices"
]],
"logical_and"
:
[[
"X"
,
"Y"
],
[
"Out"
]],
"logical_not"
:
[[
"X"
],
[
"Out"
]],
"meshgrid"
:
[[
"X"
],
[
"Out"
]],
"roi_align"
:
[[
"X"
,
"ROIs"
],
[
"Out"
]],
"strided_slice"
:
[[
"Input"
],
[
"Out"
]],
"where"
:
[[
"Condition"
,
"X"
,
"Y"
],
[
"Out"
]],
"grid_sampler"
:
[[
"X"
,
"Grid"
],
[
"Output"
]],
"tile"
:
[[
"X"
],
[
"Out"
]],
"group_norm"
:
[[
"X"
],
[
"Y"
,
"Mean"
,
"Variance"
]],
"reduce_sum"
:
[[
"X"
],
[
"Out"
]],
"square"
:
[[
"X"
],
[
"Out"
]],
"softplus"
:
[[
"X"
],
[
"Out"
]],
"shuffle_channel"
:
[[
"X"
],
[
"Out"
]],
}
}
_conv_ops
=
[
'conv2d'
,
'depthwise_conv2d'
,
'conv2d_transpose'
]
_conv_ops
=
[
'conv2d'
,
'depthwise_conv2d'
,
'conv2d_transpose'
]
...
@@ -1797,14 +1889,93 @@ class AddQuantDequantPass(object):
...
@@ -1797,14 +1889,93 @@ class AddQuantDequantPass(object):
quantized ops's inputs.
quantized ops's inputs.
"""
"""
_supported_quantizable_op_type
=
[
_supported_quantizable_op_type
=
[
"pool2d"
,
"elementwise_add"
,
"concat"
,
"softmax"
,
"argmax"
,
"transpose"
,
"pool2d"
,
"equal"
,
"gather"
,
"greater_equal"
,
"greater_than"
,
"less_equal"
,
"elementwise_add"
,
"less_than"
,
"mean"
,
"not_equal"
,
"reshape"
,
"reshape2"
,
"concat"
,
"bilinear_interp"
,
"nearest_interp"
,
"trilinear_interp"
,
"slice"
,
"softmax"
,
"squeeze"
,
"elementwise_sub"
,
"mul"
,
"matmul"
,
"relu"
,
"relu6"
,
"argmax"
,
"leaky_relu"
,
"tanh"
,
"swish"
,
"scale"
,
"transpose"
,
"transpose2"
,
"transpose"
,
"sigmoid"
,
"pad2d"
,
"flatten"
,
"flatten2"
,
"batch_norm"
,
"layer_norm"
,
"equal"
,
"matmul_v2"
"gather"
,
"greater_equal"
,
"greater_than"
,
"less_equal"
,
"less_than"
,
"mean"
,
"not_equal"
,
"reshape"
,
"reshape2"
,
"dropout"
,
"bilinear_interp"
,
"nearest_interp"
,
"trilinear_interp"
,
"slice"
,
"squeeze"
,
"elementwise_sub"
,
"mul"
,
"matmul"
,
"relu"
,
"relu6"
,
"leaky_relu"
,
"tanh"
,
"swish"
,
"scale"
,
"transpose"
,
"transpose2"
,
"sigmoid"
,
"pad2d"
,
"flatten"
,
"flatten2"
,
"batch_norm"
,
"layer_norm"
,
"matmul_v2"
,
"split"
,
"flatten_contiguous_range"
,
"squeeze2"
,
"nearest_interp_v2"
,
"bilinear_interp"
,
"bilinear_interp_v2"
,
"fill_constant_batch_size_like"
,
"arg_max"
,
"abs"
,
"assign"
,
"cast"
,
"clip"
,
"box_coder"
,
"crop"
,
"cumsum"
,
"elementwise_mul"
,
"elementwise_pow"
,
"expand_v2"
,
"fill_any_like"
,
"fill_constant"
,
"gelu"
,
"hard_sigmoid"
,
"hard_swish"
,
"instance_norm"
,
"lookup_table"
,
"lookup_table_v2"
,
"norm"
,
"p_norm"
,
"pad3d"
,
"pow"
,
"prelu"
,
"reduce_mean"
,
"unsqueeze"
,
"unsqueeze2"
,
"logical_and"
,
"logical_not"
,
"meshgrid"
,
"roi_align"
,
"strided_slice"
,
"where"
,
"grid_sampler"
,
"tile"
,
"group_norm"
,
"reduce_sum"
,
"square"
,
"softplus"
,
"shuffle_channel"
,
]
]
# To be compatible with PaddleSlim, not remove _activation_type for now
# To be compatible with PaddleSlim, not remove _activation_type for now
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录