Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
weixin_41840029
PaddleOCR
提交
14dfa73e
P
PaddleOCR
项目概览
weixin_41840029
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
14dfa73e
编写于
9月 15, 2020
作者:
B
baiyfbupt
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix issue
上级
2ac3ec96
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
31 addition
and
28 deletion
+31
-28
deploy/slim/quantization/quant.py
deploy/slim/quantization/quant.py
+31
-28
未找到文件。
deploy/slim/quantization/quant.py
浏览文件 @
14dfa73e
...
...
@@ -53,6 +53,37 @@ from paddleslim.quant import quant_aware, convert
from
paddle.fluid.layer_helper
import
LayerHelper
def
pact
(
x
):
"""
Process a variable using the pact method you define
Args:
x(Tensor): Paddle Tensor, need to be preprocess before quantization
Returns:
The processed Tensor x.
"""
helper
=
LayerHelper
(
"pact"
,
**
locals
())
dtype
=
'float32'
init_thres
=
20
u_param_attr
=
fluid
.
ParamAttr
(
name
=
x
.
name
+
'_pact'
,
initializer
=
fluid
.
initializer
.
ConstantInitializer
(
value
=
init_thres
),
regularizer
=
fluid
.
regularizer
.
L2Decay
(
0.0001
),
learning_rate
=
1
)
u_param
=
helper
.
create_parameter
(
attr
=
u_param_attr
,
shape
=
[
1
],
dtype
=
dtype
)
x
=
fluid
.
layers
.
elementwise_sub
(
x
,
fluid
.
layers
.
relu
(
fluid
.
layers
.
elementwise_sub
(
x
,
u_param
)))
x
=
fluid
.
layers
.
elementwise_add
(
x
,
fluid
.
layers
.
relu
(
fluid
.
layers
.
elementwise_sub
(
-
u_param
,
x
)))
return
x
def
get_optimizer
():
"""
Build a program using a model and an optimizer
"""
return
fluid
.
optimizer
.
AdamOptimizer
(
0.001
)
def
main
():
train_build_outputs
=
program
.
build
(
config
,
train_program
,
startup_program
,
mode
=
'train'
)
...
...
@@ -77,26 +108,6 @@ def main():
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
startup_program
)
def
pact
(
x
,
name
=
None
):
helper
=
LayerHelper
(
"pact"
,
**
locals
())
dtype
=
'float32'
init_thres
=
20
u_param_attr
=
fluid
.
ParamAttr
(
name
=
x
.
name
+
'_pact'
,
initializer
=
fluid
.
initializer
.
ConstantInitializer
(
value
=
init_thres
),
regularizer
=
fluid
.
regularizer
.
L2Decay
(
0.0001
),
learning_rate
=
1
)
u_param
=
helper
.
create_parameter
(
attr
=
u_param_attr
,
shape
=
[
1
],
dtype
=
dtype
)
x
=
fluid
.
layers
.
elementwise_sub
(
x
,
fluid
.
layers
.
relu
(
fluid
.
layers
.
elementwise_sub
(
x
,
u_param
)))
x
=
fluid
.
layers
.
elementwise_add
(
x
,
fluid
.
layers
.
relu
(
fluid
.
layers
.
elementwise_sub
(
-
u_param
,
x
)))
return
x
def
get_optimizer
():
return
fluid
.
optimizer
.
AdamOptimizer
(
0.001
)
# 1. quantization configs
quant_config
=
{
# weight quantize type, default is 'channel_wise_abs_max'
...
...
@@ -151,14 +162,6 @@ def main():
train_compile_program
=
program
.
create_multi_devices_program
(
quant_train_program
,
train_opt_loss_name
,
for_quant
=
True
)
# dump mode structure
if
config
[
'Global'
][
'debug'
]:
if
train_alg_type
==
'rec'
and
'attention'
in
config
[
'Global'
][
'loss_type'
]:
logger
.
warning
(
'Does not suport dump attention...'
)
else
:
summary
(
quant_train_program
)
init_model
(
config
,
quant_train_program
,
exe
)
train_info_dict
=
{
'compile_program'
:
train_compile_program
,
\
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录