Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
66a1df3c
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
66a1df3c
编写于
11月 03, 2022
作者:
C
Chang Xu
提交者:
GitHub
11月 03, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Avoid Quant Weight Repeatedly (#47587)
上级
b3d52d46
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
41 addition
and
34 deletion
+41
-34
python/paddle/fluid/contrib/slim/quantization/quantization_pass.py
...ddle/fluid/contrib/slim/quantization/quantization_pass.py
+41
-34
未找到文件。
python/paddle/fluid/contrib/slim/quantization/quantization_pass.py
浏览文件 @
66a1df3c
...
@@ -1119,6 +1119,7 @@ class QuantizationFreezePass(object):
...
@@ -1119,6 +1119,7 @@ class QuantizationFreezePass(object):
self
.
_op_input_rename_map
=
collections
.
OrderedDict
()
self
.
_op_input_rename_map
=
collections
.
OrderedDict
()
self
.
_op_output_rename_map
=
collections
.
OrderedDict
()
self
.
_op_output_rename_map
=
collections
.
OrderedDict
()
self
.
_quant_var_scale_map
=
collections
.
OrderedDict
()
self
.
_quant_var_scale_map
=
collections
.
OrderedDict
()
self
.
_quantized_ops
=
set
()
def
apply
(
self
,
graph
):
def
apply
(
self
,
graph
):
"""
"""
...
@@ -1173,24 +1174,27 @@ class QuantizationFreezePass(object):
...
@@ -1173,24 +1174,27 @@ class QuantizationFreezePass(object):
quant_axis
=
1
quant_axis
=
1
else
:
else
:
quant_axis
=
0
quant_axis
=
0
quantized_param_v
=
utils
.
quant_tensor
(
if
input_arg_name
not
in
self
.
_quantized_ops
:
param_v
.
copy
(),
self
.
_quantized_ops
.
add
(
input_arg_name
)
scale_v
,
quantized_param_v
=
utils
.
quant_tensor
(
quant_axis
,
param_v
.
copy
(),
self
.
_weight_bits
,
)
quantized_param_v
=
np
.
round
(
quantized_param_v
)
# Weight bias correction
if
self
.
_bias_correction
==
True
:
quantized_param_v
=
utils
.
bias_correction_w
(
param_v
,
quantized_param_v
,
scale_v
,
scale_v
,
quant_axis
,
quant_axis
,
weight_bits
=
self
.
_weight_bits
,
self
.
_weight_bits
,
)
)
quantized_param_v
=
np
.
round
(
quantized_param_v
)
quantized_param_v
=
np
.
round
(
quantized_param_v
)
self
.
_restore_var
(
input_arg_name
,
quantized_param_v
)
# Weight bias correction
if
self
.
_bias_correction
==
True
:
quantized_param_v
=
utils
.
bias_correction_w
(
param_v
,
quantized_param_v
,
scale_v
,
quant_axis
,
weight_bits
=
self
.
_weight_bits
,
)
quantized_param_v
=
np
.
round
(
quantized_param_v
)
self
.
_restore_var
(
input_arg_name
,
quantized_param_v
)
self
.
_remove_fake_quant_and_dequant_op
(
graph
,
op_node
)
self
.
_remove_fake_quant_and_dequant_op
(
graph
,
op_node
)
# Remove all fake dequant op
# Remove all fake dequant op
...
@@ -3029,6 +3033,7 @@ class QuantWeightPass(object):
...
@@ -3029,6 +3033,7 @@ class QuantWeightPass(object):
self
.
_save_int_weight
=
save_int_weight
self
.
_save_int_weight
=
save_int_weight
assert
self
.
_scope
is
not
None
,
"scope must not be None."
assert
self
.
_scope
is
not
None
,
"scope must not be None."
assert
self
.
_place
is
not
None
,
"place must not be None."
assert
self
.
_place
is
not
None
,
"place must not be None."
self
.
_quantized_ops
=
set
()
def
apply
(
self
,
graph
):
def
apply
(
self
,
graph
):
assert
isinstance
(
assert
isinstance
(
...
@@ -3066,29 +3071,31 @@ class QuantWeightPass(object):
...
@@ -3066,29 +3071,31 @@ class QuantWeightPass(object):
param_v
=
self
.
_load_var
(
x_node
.
name
())
param_v
=
self
.
_load_var
(
x_node
.
name
())
quant_axis
=
_op
.
op
().
attr
(
"quant_axis"
)
quant_axis
=
_op
.
op
().
attr
(
"quant_axis"
)
bits_length
=
_op
.
op
().
attr
(
"bit_length"
)
bits_length
=
_op
.
op
().
attr
(
"bit_length"
)
quantized_param_v
=
utils
.
quant_tensor
(
if
x_node
.
name
()
not
in
self
.
_quantized_ops
:
param_v
.
copy
(),
self
.
_quantized_ops
.
add
(
x_node
.
name
())
scale_v
,
quantized_param_v
=
utils
.
quant_tensor
(
quant_axis
,
param_v
.
copy
(),
bits_length
,
onnx_format
=
True
,
)
if
self
.
_bias_correction
==
True
:
quantized_param_v
=
utils
.
bias_correction_w
(
param_v
,
quantized_param_v
,
scale_v
,
scale_v
,
quant_axis
,
quant_axis
,
weight_bits
=
bits_length
,
bits_length
,
onnx_format
=
True
,
)
)
if
self
.
_save_int_weight
:
if
self
.
_bias_correction
==
True
:
# cast weight type to int
quantized_param_v
=
utils
.
bias_correction_w
(
if
self
.
_quant_bits
==
8
:
param_v
,
save_weight_dtype
=
np
.
int8
quantized_param_v
,
quantized_param_v
=
quantized_param_v
.
astype
(
scale_v
,
save_weight_dtype
quant_axis
,
)
weight_bits
=
bits_length
,
self
.
_restore_var
(
x_node
.
name
(),
quantized_param_v
)
)
if
self
.
_save_int_weight
:
# cast weight type to int
if
self
.
_quant_bits
==
8
:
save_weight_dtype
=
np
.
int8
quantized_param_v
=
quantized_param_v
.
astype
(
save_weight_dtype
)
self
.
_restore_var
(
x_node
.
name
(),
quantized_param_v
)
for
next_op_node
in
out_node
.
outputs
:
for
next_op_node
in
out_node
.
outputs
:
graph
.
update_input_link
(
out_node
,
x_node
,
next_op_node
)
graph
.
update_input_link
(
out_node
,
x_node
,
next_op_node
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录