Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
bd0a9fb7
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
bd0a9fb7
编写于
9月 27, 2018
作者:
D
Dang Qingqing
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Update code, since one merged PR hidden the API.
上级
f7bd1761
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
23 addition
and
22 deletion
+23
-22
python/paddle/fluid/contrib/quantize/quantize_transpiler.py
python/paddle/fluid/contrib/quantize/quantize_transpiler.py
+15
-6
python/paddle/fluid/contrib/tests/test_quantize_transpiler.py
...on/paddle/fluid/contrib/tests/test_quantize_transpiler.py
+8
-16
未找到文件。
python/paddle/fluid/contrib/quantize/quantize_transpiler.py
浏览文件 @
bd0a9fb7
...
@@ -183,7 +183,7 @@ class QuantizeTranspiler(object):
...
@@ -183,7 +183,7 @@ class QuantizeTranspiler(object):
block
,
idx
+
1
,
quant_var
,
scale_var
,
quant_bits
)
block
,
idx
+
1
,
quant_var
,
scale_var
,
quant_bits
)
dequanted_vars
[
block_id
][
name
]
=
dequant_var
dequanted_vars
[
block_id
][
name
]
=
dequant_var
# rename the forward op inputs
# rename the forward op inputs
op
.
rename_input
(
name
,
dequant_var
.
name
)
op
.
_
rename_input
(
name
,
dequant_var
.
name
)
def
_transpile_backward
(
block
,
op
):
def
_transpile_backward
(
block
,
op
):
block_id
=
block
.
idx
block_id
=
block
.
idx
...
@@ -191,7 +191,7 @@ class QuantizeTranspiler(object):
...
@@ -191,7 +191,7 @@ class QuantizeTranspiler(object):
for
name
in
op
.
input_arg_names
:
for
name
in
op
.
input_arg_names
:
if
name
in
dequanted_vars
[
block_id
]:
if
name
in
dequanted_vars
[
block_id
]:
dequant_var
=
dequanted_vars
[
block_id
][
name
]
dequant_var
=
dequanted_vars
[
block_id
][
name
]
op
.
rename_input
(
name
,
dequant_var
.
name
)
op
.
_
rename_input
(
name
,
dequant_var
.
name
)
no_dequanted_input_vars
=
False
no_dequanted_input_vars
=
False
if
no_dequanted_input_vars
:
if
no_dequanted_input_vars
:
raise
ValueError
(
"There is no dequanted inputs for op %s."
%
raise
ValueError
(
"There is no dequanted inputs for op %s."
%
...
@@ -262,7 +262,7 @@ class QuantizeTranspiler(object):
...
@@ -262,7 +262,7 @@ class QuantizeTranspiler(object):
scale_var
=
None
scale_var
=
None
for
name
in
op
.
input_arg_names
:
for
name
in
op
.
input_arg_names
:
if
name
in
op_in_rename_map
[
block_id
]:
if
name
in
op_in_rename_map
[
block_id
]:
op
.
rename_input
(
name
,
op_in_rename_map
[
block_id
][
name
])
op
.
_
rename_input
(
name
,
op_in_rename_map
[
block_id
][
name
])
scale_v
=
var_scale_map
[
block_id
][
_original_var_name
(
name
)]
scale_v
=
var_scale_map
[
block_id
][
_original_var_name
(
name
)]
if
_original_var_name
(
name
)
in
persistable_vars
:
if
_original_var_name
(
name
)
in
persistable_vars
:
...
@@ -312,7 +312,8 @@ class QuantizeTranspiler(object):
...
@@ -312,7 +312,8 @@ class QuantizeTranspiler(object):
# input of the followed ops
# input of the followed ops
for
name
in
op
.
input_arg_names
:
for
name
in
op
.
input_arg_names
:
if
name
in
op_out_rename_map
[
block_id
]:
if
name
in
op_out_rename_map
[
block_id
]:
op
.
rename_input
(
name
,
op_out_rename_map
[
block_id
][
name
])
op
.
_rename_input
(
name
,
op_out_rename_map
[
block_id
][
name
])
if
op_type
in
self
.
fake_quant_op_types
:
if
op_type
in
self
.
fake_quant_op_types
:
in_arg_name
=
op
.
input
(
'X'
)[
0
]
in_arg_name
=
op
.
input
(
'X'
)[
0
]
...
@@ -378,10 +379,11 @@ class QuantizeTranspiler(object):
...
@@ -378,10 +379,11 @@ class QuantizeTranspiler(object):
if
name
not
in
input_map
:
if
name
not
in
input_map
:
int8_var
=
convert_to_int8
(
var
)
int8_var
=
convert_to_int8
(
var
)
input_map
[
name
]
=
int8_var
.
name
input_map
[
name
]
=
int8_var
.
name
op
.
rename_input
(
name
,
input_map
[
name
])
op
.
_
rename_input
(
name
,
input_map
[
name
])
self
.
_remove_unused_var
(
program
)
self
.
_remove_unused_var
(
program
)
def
_remove_unused_var
(
self
,
program
):
def
_remove_unused_var
(
self
,
program
):
all_remove_vars
=
[]
for
block
in
program
.
blocks
:
for
block
in
program
.
blocks
:
args
=
[]
args
=
[]
for
op
in
block
.
ops
:
for
op
in
block
.
ops
:
...
@@ -389,9 +391,16 @@ class QuantizeTranspiler(object):
...
@@ -389,9 +391,16 @@ class QuantizeTranspiler(object):
args
+=
op
.
output_arg_names
args
+=
op
.
output_arg_names
args
=
list
(
set
(
args
))
args
=
list
(
set
(
args
))
var_names
=
block
.
vars
.
keys
()
var_names
=
block
.
vars
.
keys
()
sub_block_remove_vars
=
[]
for
var
in
var_names
:
for
var
in
var_names
:
if
var
not
in
args
:
if
var
not
in
args
:
block
.
_remove_var
(
var
)
sub_block_remove_vars
.
append
(
var
)
all_remove_vars
.
append
(
sub_block_remove_vars
)
remove_vars
=
[
list
(
set
(
v
))
for
v
in
all_remove_vars
]
for
i
,
block
in
enumerate
(
program
.
blocks
):
for
v
in
remove_vars
[
i
]:
block
.
_remove_var
(
v
)
def
_insert_quant_abs_max_op
(
self
,
block
,
idx
,
var
,
quant_bits
):
def
_insert_quant_abs_max_op
(
self
,
block
,
idx
,
var
,
quant_bits
):
"""Insert fake_quantize_abs_max op.
"""Insert fake_quantize_abs_max op.
...
...
python/paddle/fluid/contrib/tests/test_quantize_transpiler.py
浏览文件 @
bd0a9fb7
...
@@ -226,27 +226,19 @@ class TestQuantizeTranspiler(unittest.TestCase):
...
@@ -226,27 +226,19 @@ class TestQuantizeTranspiler(unittest.TestCase):
with
fluid
.
program_guard
(
test_program
):
with
fluid
.
program_guard
(
test_program
):
test_data
=
next
(
test_reader
())
test_data
=
next
(
test_reader
())
f_var
=
fluid
.
framework
.
get_var
(
'conv2d_1.tmp_0'
,
test_program
)
w_var
=
fluid
.
framework
.
_get_var
(
'conv2d_1.w_0.quantized'
,
w_var
=
fluid
.
framework
.
get_var
(
'conv2d_1.w_0.quantized'
,
test_program
)
test_program
)
# Testing during training
# Testing during training
test_loss1
,
f_v1
,
w_quant
=
exe
.
run
(
test_loss1
,
w_quant
=
exe
.
run
(
program
=
test_program
,
program
=
test_program
,
feed
=
feeder
.
feed
(
test_data
),
feed
=
feeder
.
feed
(
test_data
),
fetch_list
=
[
loss
,
w_var
])
fetch_list
=
[
loss
,
f_var
,
w_var
])
# Freeze program for inference, but the weight of fc/conv is still float type.
# Freeze program for inference, but the weight of fc/conv is still float type.
quant_transpiler
.
freeze_program
(
test_program
,
place
)
quant_transpiler
.
freeze_program
(
test_program
,
place
)
fv2
=
fluid
.
framework
.
get_var
(
'conv2d_1.tmp_0.dequantized'
,
test_loss2
,
=
exe
.
run
(
program
=
test_program
,
test_program
)
feed
=
feeder
.
feed
(
test_data
),
test_loss2
,
f_v2
=
exe
.
run
(
program
=
test_program
,
fetch_list
=
[
loss
])
feed
=
feeder
.
feed
(
test_data
),
fetch_list
=
[
loss
,
fv2
])
self
.
assertAlmostEqual
(
test_loss1
,
test_loss2
,
delta
=
1e-3
)
self
.
assertAlmostEqual
(
test_loss1
,
test_loss2
,
delta
=
1e-3
)
self
.
assertTrue
(
np
.
allclose
(
f_v1
,
f_v2
,
rtol
=
1e-03
,
atol
=
1e-03
),
"There is diff: "
+
str
(
f_v1
)
+
"
\n
"
+
str
(
f_v2
))
w_freeze
=
np
.
array
(
fluid
.
global_scope
().
find_var
(
'conv2d_1.w_0'
)
w_freeze
=
np
.
array
(
fluid
.
global_scope
().
find_var
(
'conv2d_1.w_0'
)
.
get_tensor
())
.
get_tensor
())
self
.
assertEqual
(
np
.
sum
(
w_freeze
),
np
.
sum
(
w_quant
))
self
.
assertEqual
(
np
.
sum
(
w_freeze
),
np
.
sum
(
w_quant
))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录