Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
2fa3d59e
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2fa3d59e
编写于
7月 22, 2021
作者:
Z
Zhen Wang
提交者:
GitHub
7月 22, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix the save logic for the qat save unit test. (#34273)
上级
24c7087f
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
30 addition
and
42 deletion
+30
-42
python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py
...on/paddle/fluid/contrib/slim/tests/test_imperative_qat.py
+30
-42
未找到文件。
python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py
浏览文件 @
2fa3d59e
...
...
@@ -17,8 +17,8 @@ from __future__ import print_function
import
os
import
numpy
as
np
import
random
import
shutil
import
time
import
tempfile
import
unittest
import
logging
...
...
@@ -50,19 +50,6 @@ class TestImperativeQat(unittest.TestCase):
QAT = quantization-aware training
"""
@
classmethod
def
setUpClass
(
cls
):
timestamp
=
time
.
strftime
(
'%Y-%m-%d-%H-%M-%S'
,
time
.
localtime
())
cls
.
root_path
=
os
.
path
.
join
(
os
.
getcwd
(),
"imperative_qat_"
+
timestamp
)
cls
.
save_path
=
os
.
path
.
join
(
cls
.
root_path
,
"lenet"
)
@
classmethod
def
tearDownClass
(
cls
):
try
:
shutil
.
rmtree
(
cls
.
root_path
)
except
Exception
as
e
:
print
(
"Failed to delete {} due to {}"
.
format
(
cls
.
root_path
,
str
(
e
)))
def
set_vars
(
self
):
self
.
weight_quantize_type
=
'abs_max'
self
.
activation_quantize_type
=
'moving_average_abs_max'
...
...
@@ -170,34 +157,35 @@ class TestImperativeQat(unittest.TestCase):
lenet
.
eval
()
before_save
=
lenet
(
test_img
)
# save inference quantized model
imperative_qat
.
save_quantized_model
(
layer
=
lenet
,
path
=
self
.
save_path
,
input_spec
=
[
paddle
.
static
.
InputSpec
(
shape
=
[
None
,
1
,
28
,
28
],
dtype
=
'float32'
)
])
print
(
'Quantized model saved in {%s}'
%
self
.
save_path
)
if
core
.
is_compiled_with_cuda
():
place
=
core
.
CUDAPlace
(
0
)
else
:
place
=
core
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
[
inference_program
,
feed_target_names
,
fetch_targets
]
=
fluid
.
io
.
load_inference_model
(
dirname
=
self
.
root_path
,
executor
=
exe
,
model_filename
=
"lenet"
+
INFER_MODEL_SUFFIX
,
params_filename
=
"lenet"
+
INFER_PARAMS_SUFFIX
)
after_save
,
=
exe
.
run
(
inference_program
,
feed
=
{
feed_target_names
[
0
]:
test_data
},
fetch_list
=
fetch_targets
)
# check
self
.
assertTrue
(
np
.
allclose
(
after_save
,
before_save
.
numpy
()),
msg
=
'Failed to save the inference quantized model.'
)
with
tempfile
.
TemporaryDirectory
(
prefix
=
"qat_save_path_"
)
as
tmpdir
:
# save inference quantized model
imperative_qat
.
save_quantized_model
(
layer
=
lenet
,
path
=
os
.
path
.
join
(
tmpdir
,
"lenet"
),
input_spec
=
[
paddle
.
static
.
InputSpec
(
shape
=
[
None
,
1
,
28
,
28
],
dtype
=
'float32'
)
])
print
(
'Quantized model saved in %s'
%
tmpdir
)
if
core
.
is_compiled_with_cuda
():
place
=
core
.
CUDAPlace
(
0
)
else
:
place
=
core
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
[
inference_program
,
feed_target_names
,
fetch_targets
]
=
fluid
.
io
.
load_inference_model
(
dirname
=
tmpdir
,
executor
=
exe
,
model_filename
=
"lenet"
+
INFER_MODEL_SUFFIX
,
params_filename
=
"lenet"
+
INFER_PARAMS_SUFFIX
)
after_save
,
=
exe
.
run
(
inference_program
,
feed
=
{
feed_target_names
[
0
]:
test_data
},
fetch_list
=
fetch_targets
)
# check
self
.
assertTrue
(
np
.
allclose
(
after_save
,
before_save
.
numpy
()),
msg
=
'Failed to save the inference quantized model.'
)
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录