Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
f4e09397
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
f4e09397
编写于
6月 09, 2022
作者:
G
Guanghua Yu
提交者:
GitHub
6月 09, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Modify quantization use tempfile to place the temporary files (#43281)
上级
36980306
变更
10
显示空白变更内容
内联
并排
Showing
10 changed file
with
186 addition
and
193 deletion
+186
-193
python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py
...dle/fluid/contrib/slim/tests/test_imperative_out_scale.py
+17
-35
python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py
...on/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py
+73
-80
python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py
...addle/fluid/contrib/slim/tests/test_imperative_qat_amp.py
+9
-12
python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_lstm_model.py
.../slim/tests/test_post_training_quantization_lstm_model.py
+5
-8
python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mnist.py
...ntrib/slim/tests/test_post_training_quantization_mnist.py
+13
-17
python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mobilenetv1.py
...slim/tests/test_post_training_quantization_mobilenetv1.py
+14
-15
python/paddle/fluid/contrib/slim/tests/test_quantization_scale_pass.py
.../fluid/contrib/slim/tests/test_quantization_scale_pass.py
+8
-2
python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py
...luid/contrib/slim/tests/test_user_defined_quantization.py
+10
-6
python/paddle/fluid/tests/unittests/test_inference_model_io.py
...n/paddle/fluid/tests/unittests/test_inference_model_io.py
+21
-8
python/paddle/fluid/tests/unittests/test_save_inference_model_conditional_op.py
...sts/unittests/test_save_inference_model_conditional_op.py
+16
-10
未找到文件。
python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py
浏览文件 @
f4e09397
...
...
@@ -20,6 +20,7 @@ import random
import
unittest
import
logging
import
warnings
import
tempfile
import
paddle
import
paddle.fluid
as
fluid
...
...
@@ -122,6 +123,16 @@ class ImperativeLenet(fluid.dygraph.Layer):
class
TestImperativeOutSclae
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
root_path
=
tempfile
.
TemporaryDirectory
()
self
.
param_save_path
=
os
.
path
.
join
(
self
.
root_path
.
name
,
"lenet.pdparams"
)
self
.
save_path
=
os
.
path
.
join
(
self
.
root_path
.
name
,
"lenet_dynamic_outscale_infer_model"
)
def
tearDown
(
self
):
self
.
root_path
.
cleanup
()
def
func_out_scale_acc
(
self
):
seed
=
1000
lr
=
0.001
...
...
@@ -148,46 +159,17 @@ class TestImperativeOutSclae(unittest.TestCase):
loss_list
=
train_lenet
(
lenet
,
reader
,
adam
)
lenet
.
eval
()
param_save_path
=
"test_save_quantized_model/lenet.pdparams"
save_dict
=
lenet
.
state_dict
()
paddle
.
save
(
save_dict
,
param_save_path
)
save_path
=
"./dynamic_outscale_infer_model/lenet"
imperative_out_scale
.
save_quantized_model
(
layer
=
lenet
,
path
=
save_path
,
input_spec
=
[
paddle
.
static
.
InputSpec
(
shape
=
[
None
,
1
,
28
,
28
],
dtype
=
'float32'
)
])
paddle
.
save
(
save_dict
,
self
.
param_save_path
)
for
i
in
range
(
len
(
loss_list
)
-
1
):
self
.
assertTrue
(
loss_list
[
i
]
>
loss_list
[
i
+
1
],
msg
=
'Failed to do the imperative qat.'
)
def
test_out_scale_acc
(
self
):
with
_test_eager_guard
():
self
.
func_out_scale_acc
()
self
.
func_out_scale_acc
()
class
TestSaveQuanztizedModelFromCheckPoint
(
unittest
.
TestCase
):
def
func_save_quantized_model
(
self
):
lr
=
0.001
load_param_path
=
"test_save_quantized_model/lenet.pdparams"
save_path
=
"./dynamic_outscale_infer_model_from_checkpoint/lenet"
weight_quantize_type
=
'abs_max'
activation_quantize_type
=
'moving_average_abs_max'
imperative_out_scale
=
ImperativeQuantAware
(
weight_quantize_type
=
weight_quantize_type
,
activation_quantize_type
=
activation_quantize_type
)
with
fluid
.
dygraph
.
guard
():
lenet
=
ImperativeLenet
()
load_dict
=
paddle
.
load
(
load_param
_path
)
load_dict
=
paddle
.
load
(
self
.
param_save
_path
)
imperative_out_scale
.
quantize
(
lenet
)
lenet
.
set_dict
(
load_dict
)
...
...
@@ -200,7 +182,7 @@ class TestSaveQuanztizedModelFromCheckPoint(unittest.TestCase):
imperative_out_scale
.
save_quantized_model
(
layer
=
lenet
,
path
=
save_path
,
path
=
s
elf
.
s
ave_path
,
input_spec
=
[
paddle
.
static
.
InputSpec
(
shape
=
[
None
,
1
,
28
,
28
],
dtype
=
'float32'
)
...
...
@@ -211,10 +193,10 @@ class TestSaveQuanztizedModelFromCheckPoint(unittest.TestCase):
loss_list
[
i
]
>
loss_list
[
i
+
1
],
msg
=
'Failed to do the imperative qat.'
)
def
test_
save_quantized_model
(
self
):
def
test_
out_scale_acc
(
self
):
with
_test_eager_guard
():
self
.
func_
save_quantized_model
()
self
.
func_
save_quantized_model
()
self
.
func_
out_scale_acc
()
self
.
func_
out_scale_acc
()
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py
浏览文件 @
f4e09397
...
...
@@ -22,6 +22,7 @@ import time
import
unittest
import
copy
import
logging
import
tempfile
import
paddle.nn
as
nn
import
paddle
...
...
@@ -72,10 +73,6 @@ class TestImperativePTQ(unittest.TestCase):
@
classmethod
def
setUpClass
(
cls
):
timestamp
=
time
.
strftime
(
'%Y-%m-%d-%H-%M-%S'
,
time
.
localtime
())
cls
.
root_path
=
os
.
path
.
join
(
os
.
getcwd
(),
"imperative_ptq_"
+
timestamp
)
cls
.
save_path
=
os
.
path
.
join
(
cls
.
root_path
,
"model"
)
cls
.
download_path
=
'dygraph_int8/download'
cls
.
cache_folder
=
os
.
path
.
expanduser
(
'~/.cache/paddle/dataset/'
+
cls
.
download_path
)
...
...
@@ -88,14 +85,6 @@ class TestImperativePTQ(unittest.TestCase):
paddle
.
static
.
default_main_program
().
random_seed
=
seed
paddle
.
static
.
default_startup_program
().
random_seed
=
seed
@
classmethod
def
tearDownClass
(
cls
):
try
:
pass
# shutil.rmtree(cls.root_path)
except
Exception
as
e
:
print
(
"Failed to delete {} due to {}"
.
format
(
cls
.
root_path
,
str
(
e
)))
def
cache_unzipping
(
self
,
target_folder
,
zip_path
):
if
not
os
.
path
.
exists
(
target_folder
):
cmd
=
'mkdir {0} && tar xf {1} -C {0}'
.
format
(
target_folder
,
...
...
@@ -126,8 +115,8 @@ class TestImperativePTQ(unittest.TestCase):
'batch_norm2d_0'
:
[[
0.37673383951187134
],
[
0.44249194860458374
]],
're_lu_0'
:
[[
0.44249194860458374
],
[
0.25804123282432556
]],
'max_pool2d_0'
:
[[
0.25804123282432556
],
[
0.25804123282432556
]],
'linear_0'
:
[[
1.7058950662612915
],
[
14.405526161193848
],
[
0.4373355209827423
]],
'linear_0'
:
[[
1.7058950662612915
],
[
14.405526161193848
],
[
0.4373355209827423
]],
'add_0'
:
[[
1.7058950662612915
,
0.0
],
[
1.7058950662612915
]],
}
...
...
@@ -141,8 +130,8 @@ class TestImperativePTQ(unittest.TestCase):
for
batch_id
,
data
in
enumerate
(
test_reader
()):
x_data
=
np
.
array
([
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
-
1
,
1
)
y_data
=
np
.
array
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
-
1
,
1
)
img
=
paddle
.
to_tensor
(
x_data
)
label
=
paddle
.
to_tensor
(
y_data
)
...
...
@@ -165,8 +154,8 @@ class TestImperativePTQ(unittest.TestCase):
def
program_test
(
self
,
program_path
,
batch_num
=-
1
,
batch_size
=
8
):
exe
=
paddle
.
static
.
Executor
(
paddle
.
CPUPlace
())
[
inference_program
,
feed_target_names
,
fetch_targets
]
=
(
paddle
.
static
.
load_inference_model
(
program_path
,
exe
))
[
inference_program
,
feed_target_names
,
fetch_targets
]
=
(
paddle
.
static
.
load_inference_model
(
program_path
,
exe
))
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
test
(),
batch_size
=
batch_size
)
...
...
@@ -217,15 +206,17 @@ class TestImperativePTQ(unittest.TestCase):
paddle
.
static
.
InputSpec
(
shape
=
[
None
,
1
,
28
,
28
],
dtype
=
'float32'
)
]
with
tempfile
.
TemporaryDirectory
(
prefix
=
"imperative_ptq_"
)
as
tmpdir
:
save_path
=
os
.
path
.
join
(
tmpdir
,
"model"
)
self
.
ptq
.
save_quantized_model
(
model
=
quant_model
,
path
=
self
.
save_path
,
input_spec
=
input_spec
)
print
(
'Quantized model saved in {%s}'
%
self
.
save_path
)
model
=
quant_model
,
path
=
save_path
,
input_spec
=
input_spec
)
print
(
'Quantized model saved in {%s}'
%
save_path
)
after_acc_top1
=
self
.
model_test
(
quant_model
,
self
.
batch_num
,
self
.
batch_size
)
paddle
.
enable_static
()
infer_acc_top1
=
self
.
program_test
(
self
.
save_path
,
self
.
batch_num
,
infer_acc_top1
=
self
.
program_test
(
save_path
,
self
.
batch_num
,
self
.
batch_size
)
paddle
.
disable_static
()
...
...
@@ -279,15 +270,17 @@ class TestImperativePTQfuse(TestImperativePTQ):
paddle
.
static
.
InputSpec
(
shape
=
[
None
,
1
,
28
,
28
],
dtype
=
'float32'
)
]
with
tempfile
.
TemporaryDirectory
(
prefix
=
"imperative_ptq_"
)
as
tmpdir
:
save_path
=
os
.
path
.
join
(
tmpdir
,
"model"
)
self
.
ptq
.
save_quantized_model
(
model
=
quant_model
,
path
=
self
.
save_path
,
input_spec
=
input_spec
)
print
(
'Quantized model saved in {%s}'
%
self
.
save_path
)
model
=
quant_model
,
path
=
save_path
,
input_spec
=
input_spec
)
print
(
'Quantized model saved in {%s}'
%
save_path
)
after_acc_top1
=
self
.
model_test
(
quant_model
,
self
.
batch_num
,
self
.
batch_size
)
paddle
.
enable_static
()
infer_acc_top1
=
self
.
program_test
(
self
.
save_path
,
self
.
batch_num
,
infer_acc_top1
=
self
.
program_test
(
save_path
,
self
.
batch_num
,
self
.
batch_size
)
paddle
.
disable_static
()
...
...
@@ -327,13 +320,13 @@ class TestImperativePTQHist(TestImperativePTQ):
self
.
eval_acc_top1
=
0.98
self
.
gt_thresholds
=
{
'conv2d_0'
:
[[
0.99853515625
],
[
0.35732391771364225
],
[
0.10933732241392136
]],
'conv2d_0'
:
[[
0.99853515625
],
[
0.35732391771364225
],
[
0.10933732241392136
]],
'batch_norm2d_0'
:
[[
0.35732391771364225
],
[
0.4291427868761275
]],
're_lu_0'
:
[[
0.4291427868761275
],
[
0.2359918110742001
]],
'max_pool2d_0'
:
[[
0.2359918110742001
],
[
0.25665526917146053
]],
'linear_0'
:
[[
1.7037603475152991
],
[
14.395224522473026
],
[
0.4373355209827423
]],
'linear_0'
:
[[
1.7037603475152991
],
[
14.395224522473026
],
[
0.4373355209827423
]],
'add_0'
:
[[
1.7037603475152991
,
0.0
],
[
1.7037603475152991
]],
}
...
...
python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py
浏览文件 @
f4e09397
...
...
@@ -21,6 +21,7 @@ import shutil
import
time
import
unittest
import
logging
import
tempfile
import
paddle
import
paddle.fluid
as
fluid
...
...
@@ -45,10 +46,9 @@ class TestImperativeQatAmp(unittest.TestCase):
@
classmethod
def
setUpClass
(
cls
):
timestamp
=
time
.
strftime
(
'%Y-%m-%d-%H-%M-%S'
,
time
.
localtime
())
cls
.
root_path
=
os
.
path
.
join
(
os
.
getcwd
(),
"imperative_qat_amp_"
+
timestamp
)
cls
.
save_path
=
os
.
path
.
join
(
cls
.
root_path
,
"model"
)
cls
.
root_path
=
tempfile
.
TemporaryDirectory
(
prefix
=
"imperative_qat_amp_"
)
cls
.
save_path
=
os
.
path
.
join
(
cls
.
root_path
.
name
,
"model"
)
cls
.
download_path
=
'dygraph_int8/download'
cls
.
cache_folder
=
os
.
path
.
expanduser
(
'~/.cache/paddle/dataset/'
+
...
...
@@ -64,10 +64,7 @@ class TestImperativeQatAmp(unittest.TestCase):
@
classmethod
def
tearDownClass
(
cls
):
try
:
shutil
.
rmtree
(
cls
.
root_path
)
except
Exception
as
e
:
print
(
"Failed to delete {} due to {}"
.
format
(
cls
.
root_path
,
str
(
e
)))
cls
.
root_path
.
cleanup
()
def
cache_unzipping
(
self
,
target_folder
,
zip_path
):
if
not
os
.
path
.
exists
(
target_folder
):
...
...
@@ -106,8 +103,8 @@ class TestImperativeQatAmp(unittest.TestCase):
for
batch_id
,
data
in
enumerate
(
train_reader
()):
x_data
=
np
.
array
([
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
-
1
,
1
)
y_data
=
np
.
array
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
-
1
,
1
)
img
=
paddle
.
to_tensor
(
x_data
)
label
=
paddle
.
to_tensor
(
y_data
)
...
...
@@ -150,8 +147,8 @@ class TestImperativeQatAmp(unittest.TestCase):
for
batch_id
,
data
in
enumerate
(
test_reader
()):
x_data
=
np
.
array
([
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
-
1
,
1
)
y_data
=
np
.
array
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
-
1
,
1
)
img
=
paddle
.
to_tensor
(
x_data
)
label
=
paddle
.
to_tensor
(
y_data
)
...
...
python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_lstm_model.py
浏览文件 @
f4e09397
...
...
@@ -20,6 +20,7 @@ import math
import
functools
import
contextlib
import
struct
import
tempfile
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
...
...
@@ -37,9 +38,9 @@ class TestPostTrainingQuantization(unittest.TestCase):
self
.
download_path
=
'int8/download'
self
.
cache_folder
=
os
.
path
.
expanduser
(
'~/.cache/paddle/dataset/'
+
self
.
download_path
)
self
.
timestamp
=
time
.
strftime
(
'%Y-%m-%d-%H-%M-%S'
,
time
.
localtime
()
)
self
.
int8_model_path
=
os
.
path
.
join
(
os
.
getcwd
()
,
"post_training_
"
+
self
.
timestamp
)
self
.
root_path
=
tempfile
.
TemporaryDirectory
(
)
self
.
int8_model_path
=
os
.
path
.
join
(
self
.
root_path
.
name
,
"post_training_
quantization"
)
try
:
os
.
system
(
"mkdir -p "
+
self
.
int8_model_path
)
except
Exception
as
e
:
...
...
@@ -48,11 +49,7 @@ class TestPostTrainingQuantization(unittest.TestCase):
sys
.
exit
(
-
1
)
def
tearDown
(
self
):
try
:
os
.
system
(
"rm -rf {}"
.
format
(
self
.
int8_model_path
))
except
Exception
as
e
:
print
(
"Failed to delete {} due to {}"
.
format
(
self
.
int8_model_path
,
str
(
e
)))
self
.
root_path
.
cleanup
()
def
cache_unzipping
(
self
,
target_folder
,
zip_path
):
if
not
os
.
path
.
exists
(
target_folder
):
...
...
python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mnist.py
浏览文件 @
f4e09397
...
...
@@ -18,6 +18,7 @@ import sys
import
random
import
math
import
functools
import
tempfile
import
contextlib
import
numpy
as
np
import
paddle
...
...
@@ -33,12 +34,12 @@ np.random.seed(0)
class
TestPostTrainingQuantization
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
root_path
=
tempfile
.
TemporaryDirectory
()
self
.
int8_model_path
=
os
.
path
.
join
(
self
.
root_path
.
name
,
"post_training_quantization"
)
self
.
download_path
=
'int8/download'
self
.
cache_folder
=
os
.
path
.
expanduser
(
'~/.cache/paddle/dataset/'
+
self
.
download_path
)
self
.
timestamp
=
time
.
strftime
(
'%Y-%m-%d-%H-%M-%S'
,
time
.
localtime
())
self
.
int8_model_path
=
os
.
path
.
join
(
os
.
getcwd
(),
"post_training_"
+
self
.
timestamp
)
try
:
os
.
system
(
"mkdir -p "
+
self
.
int8_model_path
)
except
Exception
as
e
:
...
...
@@ -47,11 +48,7 @@ class TestPostTrainingQuantization(unittest.TestCase):
sys
.
exit
(
-
1
)
def
tearDown
(
self
):
try
:
os
.
system
(
"rm -rf {}"
.
format
(
self
.
int8_model_path
))
except
Exception
as
e
:
print
(
"Failed to delete {} due to {}"
.
format
(
self
.
int8_model_path
,
str
(
e
)))
self
.
root_path
.
cleanup
()
def
cache_unzipping
(
self
,
target_folder
,
zip_path
):
if
not
os
.
path
.
exists
(
target_folder
):
...
...
@@ -82,8 +79,8 @@ class TestPostTrainingQuantization(unittest.TestCase):
cnt
=
0
periods
=
[]
for
batch_id
,
data
in
enumerate
(
val_reader
()):
image
=
np
.
array
(
[
x
[
0
].
reshape
(
img_shape
)
for
x
in
data
]).
astype
(
"float32"
)
image
=
np
.
array
(
[
x
[
0
].
reshape
(
img_shape
)
for
x
in
data
]).
astype
(
"float32"
)
input_label
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
"int64"
)
t1
=
time
.
time
()
...
...
@@ -121,7 +118,6 @@ class TestPostTrainingQuantization(unittest.TestCase):
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
scope
=
fluid
.
global_scope
()
val_reader
=
paddle
.
dataset
.
mnist
.
train
()
ptq
=
PostTrainingQuantization
(
...
...
@@ -178,12 +174,12 @@ class TestPostTrainingQuantization(unittest.TestCase):
print
(
"---Post training quantization of {} method---"
.
format
(
algo
))
print
(
"FP32 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}."
.
format
(
model_name
,
batch_size
,
fp32_throughput
,
fp32_latency
,
"FP32 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}."
.
format
(
model_name
,
batch_size
,
fp32_throughput
,
fp32_latency
,
fp32_acc1
))
print
(
"INT8 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}.
\n
"
.
format
(
model_name
,
batch_size
,
int8_throughput
,
int8_latency
,
"INT8 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}.
\n
"
.
format
(
model_name
,
batch_size
,
int8_throughput
,
int8_latency
,
int8_acc1
))
sys
.
stdout
.
flush
()
...
...
python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mobilenetv1.py
浏览文件 @
f4e09397
...
...
@@ -19,6 +19,7 @@ import random
import
math
import
functools
import
contextlib
import
tempfile
import
numpy
as
np
from
PIL
import
Image
,
ImageEnhance
import
paddle
...
...
@@ -146,16 +147,12 @@ class TestPostTrainingQuantization(unittest.TestCase):
self
.
infer_iterations
=
50000
if
os
.
environ
.
get
(
'DATASET'
)
==
'full'
else
2
self
.
timestamp
=
time
.
strftime
(
'%Y-%m-%d-%H-%M-%S'
,
time
.
localtime
()
)
self
.
int8_model
=
os
.
path
.
join
(
os
.
getcwd
()
,
"post_training_
"
+
self
.
timestamp
)
self
.
root_path
=
tempfile
.
TemporaryDirectory
(
)
self
.
int8_model
=
os
.
path
.
join
(
self
.
root_path
.
name
,
"post_training_
quantization"
)
def
tearDown
(
self
):
try
:
os
.
system
(
"rm -rf {}"
.
format
(
self
.
int8_model
))
except
Exception
as
e
:
print
(
"Failed to delete {} due to {}"
.
format
(
self
.
int8_model
,
str
(
e
)))
self
.
root_path
.
cleanup
()
def
cache_unzipping
(
self
,
target_folder
,
zip_path
):
if
not
os
.
path
.
exists
(
target_folder
):
...
...
@@ -207,8 +204,8 @@ class TestPostTrainingQuantization(unittest.TestCase):
cnt
=
0
periods
=
[]
for
batch_id
,
data
in
enumerate
(
val_reader
()):
image
=
np
.
array
(
[
x
[
0
].
reshape
(
image_shape
)
for
x
in
data
]).
astype
(
"float32"
)
image
=
np
.
array
(
[
x
[
0
].
reshape
(
image_shape
)
for
x
in
data
]).
astype
(
"float32"
)
label
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
"int64"
)
label
=
label
.
reshape
([
-
1
,
1
])
...
...
@@ -308,11 +305,13 @@ class TestPostTrainingQuantization(unittest.TestCase):
print
(
"---Post training quantization of {} method---"
.
format
(
algo
))
print
(
"FP32 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}."
.
format
(
model
,
batch_size
,
fp32_throughput
,
fp32_latency
,
fp32_acc1
))
"FP32 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}."
.
format
(
model
,
batch_size
,
fp32_throughput
,
fp32_latency
,
fp32_acc1
))
print
(
"INT8 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.
\n
"
.
format
(
model
,
batch_size
,
int8_throughput
,
int8_latency
,
int8_acc1
))
"INT8 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.
\n
"
.
format
(
model
,
batch_size
,
int8_throughput
,
int8_latency
,
int8_acc1
))
sys
.
stdout
.
flush
()
delta_value
=
fp32_acc1
-
int8_acc1
...
...
@@ -405,7 +404,7 @@ class TestPostTrainingAbsMaxForMobilenetv1(TestPostTrainingQuantization):
is_full_quantize
=
False
is_use_cache_file
=
False
is_optimize_model
=
False
# The accuracy diff of post-traing quantization (abs_max) maybe bigger
# The accuracy diff of post-train
in
g quantization (abs_max) maybe bigger
diff_threshold
=
0.05
self
.
run_test
(
model
,
algo
,
round_type
,
data_urls
,
data_md5s
,
quantizable_op_type
,
is_full_quantize
,
is_use_cache_file
,
...
...
python/paddle/fluid/contrib/slim/tests/test_quantization_scale_pass.py
浏览文件 @
f4e09397
...
...
@@ -17,6 +17,7 @@ import unittest
import
random
import
numpy
as
np
import
six
import
tempfile
import
paddle.fluid
as
fluid
import
paddle
from
paddle.fluid.framework
import
IrGraph
...
...
@@ -165,15 +166,20 @@ class TestQuantizationScalePass(unittest.TestCase):
marked_nodes
.
add
(
op
)
test_graph
.
draw
(
'.'
,
'quant_scale'
+
dev_name
,
marked_nodes
)
with
open
(
'quant_scale_model'
+
dev_name
+
'.txt'
,
'w'
)
as
f
:
tempdir
=
tempfile
.
TemporaryDirectory
()
mapping_table_path
=
os
.
path
.
join
(
tempdir
.
name
,
'quant_scale_model'
+
dev_name
+
'.txt'
)
save_path
=
os
.
path
.
join
(
tempdir
.
name
,
'quant_scale_model'
+
dev_name
)
with
open
(
mapping_table_path
,
'w'
)
as
f
:
f
.
write
(
str
(
server_program
))
with
fluid
.
scope_guard
(
scope
):
fluid
.
io
.
save_inference_model
(
'quant_scale_model'
+
dev_name
,
[
'image'
,
'label'
],
[
loss
],
save_path
,
[
'image'
,
'label'
],
[
loss
],
exe
,
server_program
,
clip_extra
=
True
)
tempdir
.
cleanup
()
def
test_quant_scale_cuda
(
self
):
if
fluid
.
core
.
is_compiled_with_cuda
():
...
...
python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py
浏览文件 @
f4e09397
...
...
@@ -18,6 +18,7 @@ import json
import
random
import
numpy
as
np
import
six
import
tempfile
import
paddle.fluid
as
fluid
import
paddle
from
paddle.fluid.framework
import
IrGraph
...
...
@@ -108,18 +109,20 @@ class TestUserDefinedQuantization(unittest.TestCase):
def
get_optimizer
():
return
fluid
.
optimizer
.
MomentumOptimizer
(
0.0001
,
0.9
)
def
load_dict
():
with
open
(
'mapping_table_for_saving_inference_model'
,
'r'
)
as
file
:
def
load_dict
(
mapping_table_path
):
with
open
(
mapping_table_path
,
'r'
)
as
file
:
data
=
file
.
read
()
data
=
json
.
loads
(
data
)
return
data
def
save_dict
(
Dict
):
with
open
(
'mapping_table_for_saving_inference_model'
,
'w'
)
as
file
:
def
save_dict
(
Dict
,
mapping_table_path
):
with
open
(
mapping_table_path
,
'w'
)
as
file
:
file
.
write
(
json
.
dumps
(
Dict
))
random
.
seed
(
0
)
np
.
random
.
seed
(
0
)
tempdir
=
tempfile
.
TemporaryDirectory
()
mapping_table_path
=
os
.
path
.
join
(
tempdir
.
name
,
'inference'
)
main
=
fluid
.
Program
()
startup
=
fluid
.
Program
()
...
...
@@ -160,7 +163,7 @@ class TestUserDefinedQuantization(unittest.TestCase):
executor
=
exe
)
test_transform_pass
.
apply
(
test_graph
)
save_dict
(
test_graph
.
out_node_mapping_table
)
save_dict
(
test_graph
.
out_node_mapping_table
,
mapping_table_path
)
add_quant_dequant_pass
=
AddQuantDequantPass
(
scope
=
scope
,
place
=
place
)
add_quant_dequant_pass
.
apply
(
main_graph
)
...
...
@@ -202,10 +205,11 @@ class TestUserDefinedQuantization(unittest.TestCase):
activation_bits
=
8
,
weight_quantize_type
=
weight_quant_type
)
mapping_table
=
load_dict
()
mapping_table
=
load_dict
(
mapping_table_path
)
test_graph
.
out_node_mapping_table
=
mapping_table
if
act_quantize_func
==
None
and
weight_quantize_func
==
None
:
freeze_pass
.
apply
(
test_graph
)
tempdir
.
cleanup
()
def
test_act_preprocess_cuda
(
self
):
if
fluid
.
core
.
is_compiled_with_cuda
():
...
...
python/paddle/fluid/tests/unittests/test_inference_model_io.py
浏览文件 @
f4e09397
...
...
@@ -18,6 +18,7 @@ import unittest
import
os
import
six
import
tempfile
import
numpy
as
np
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
...
...
@@ -31,6 +32,7 @@ from paddle.fluid.compiler import CompiledProgram
from
paddle.fluid.framework
import
Program
,
program_guard
from
paddle.fluid.io
import
save_inference_model
,
load_inference_model
,
save_persistables
from
paddle.fluid.transpiler
import
memory_optimize
paddle
.
enable_static
()
...
...
@@ -43,8 +45,9 @@ class InferModel(object):
class
TestBook
(
unittest
.
TestCase
):
def
test_fit_line_inference_model
(
self
):
MODEL_DIR
=
"./tmp/inference_model"
UNI_MODEL_DIR
=
"./tmp/inference_model1"
root_path
=
tempfile
.
TemporaryDirectory
()
MODEL_DIR
=
os
.
path
.
join
(
root_path
.
name
,
"inference_model"
)
UNI_MODEL_DIR
=
os
.
path
.
join
(
root_path
.
name
,
"inference_model1"
)
init_program
=
Program
()
program
=
Program
()
...
...
@@ -67,8 +70,8 @@ class TestBook(unittest.TestCase):
exe
.
run
(
init_program
,
feed
=
{},
fetch_list
=
[])
for
i
in
six
.
moves
.
xrange
(
100
):
tensor_x
=
np
.
array
(
[[
1
,
1
],
[
1
,
2
],
[
3
,
4
],
[
5
,
2
]]).
astype
(
"float32"
)
tensor_x
=
np
.
array
(
[[
1
,
1
],
[
1
,
2
],
[
3
,
4
],
[
5
,
2
]]).
astype
(
"float32"
)
tensor_y
=
np
.
array
([[
-
2
],
[
-
3
],
[
-
7
],
[
-
7
]]).
astype
(
"float32"
)
exe
.
run
(
program
,
...
...
@@ -111,13 +114,16 @@ class TestBook(unittest.TestCase):
print
(
"fetch %s"
%
str
(
model
.
fetch_vars
[
0
]))
self
.
assertEqual
(
expected
,
actual
)
root_path
.
cleanup
()
self
.
assertRaises
(
ValueError
,
fluid
.
io
.
load_inference_model
,
None
,
exe
,
model_str
,
None
)
class
TestSaveInferenceModel
(
unittest
.
TestCase
):
def
test_save_inference_model
(
self
):
MODEL_DIR
=
"./tmp/inference_model2"
root_path
=
tempfile
.
TemporaryDirectory
()
MODEL_DIR
=
os
.
path
.
join
(
root_path
.
name
,
"inference_model2"
)
init_program
=
Program
()
program
=
Program
()
...
...
@@ -136,9 +142,11 @@ class TestSaveInferenceModel(unittest.TestCase):
exe
.
run
(
init_program
,
feed
=
{},
fetch_list
=
[])
save_inference_model
(
MODEL_DIR
,
[
"x"
,
"y"
],
[
avg_cost
],
exe
,
program
)
root_path
.
cleanup
()
def
test_save_inference_model_with_auc
(
self
):
MODEL_DIR
=
"./tmp/inference_model4"
root_path
=
tempfile
.
TemporaryDirectory
()
MODEL_DIR
=
os
.
path
.
join
(
root_path
.
name
,
"inference_model4"
)
init_program
=
Program
()
program
=
Program
()
...
...
@@ -160,6 +168,7 @@ class TestSaveInferenceModel(unittest.TestCase):
warnings
.
simplefilter
(
"always"
)
save_inference_model
(
MODEL_DIR
,
[
"x"
,
"y"
],
[
avg_cost
],
exe
,
program
)
root_path
.
cleanup
()
expected_warn
=
"please ensure that you have set the auc states to zeros before saving inference model"
self
.
assertTrue
(
len
(
w
)
>
0
)
self
.
assertTrue
(
expected_warn
==
str
(
w
[
0
].
message
))
...
...
@@ -167,7 +176,8 @@ class TestSaveInferenceModel(unittest.TestCase):
class
TestInstance
(
unittest
.
TestCase
):
def
test_save_inference_model
(
self
):
MODEL_DIR
=
"./tmp/inference_model3"
root_path
=
tempfile
.
TemporaryDirectory
()
MODEL_DIR
=
os
.
path
.
join
(
root_path
.
name
,
"inference_model3"
)
init_program
=
Program
()
program
=
Program
()
...
...
@@ -193,11 +203,13 @@ class TestInstance(unittest.TestCase):
save_inference_model
(
MODEL_DIR
,
[
"x"
,
"y"
],
[
avg_cost
],
exe
,
cp_prog
)
self
.
assertRaises
(
TypeError
,
save_inference_model
,
[
MODEL_DIR
,
[
"x"
,
"y"
],
[
avg_cost
],
[],
cp_prog
])
root_path
.
cleanup
()
class
TestSaveInferenceModelNew
(
unittest
.
TestCase
):
def
test_save_and_load_inference_model
(
self
):
MODEL_DIR
=
"./tmp/inference_model5"
root_path
=
tempfile
.
TemporaryDirectory
()
MODEL_DIR
=
os
.
path
.
join
(
root_path
.
name
,
"inference_model5"
)
init_program
=
fluid
.
default_startup_program
()
program
=
fluid
.
default_main_program
()
...
...
@@ -292,6 +304,7 @@ class TestSaveInferenceModelNew(unittest.TestCase):
model
=
InferModel
(
paddle
.
static
.
io
.
load_inference_model
(
MODEL_DIR
,
exe
))
root_path
.
cleanup
()
outs
=
exe
.
run
(
model
.
program
,
feed
=
{
...
...
python/paddle/fluid/tests/unittests/test_save_inference_model_conditional_op.py
浏览文件 @
f4e09397
...
...
@@ -17,6 +17,7 @@ from __future__ import print_function
import
os
import
unittest
import
numpy
as
np
import
tempfile
import
paddle
import
paddle.fluid
as
fluid
...
...
@@ -31,7 +32,6 @@ def getModelOp(model_path):
result
=
set
()
for
i
in
range
(
0
,
size
):
#print(main_block.op(i).type())
result
.
add
(
main_block
.
op
(
i
).
type
())
return
result
...
...
@@ -90,18 +90,20 @@ class TestConditionalOp(unittest.TestCase):
paddle
.
static
.
InputSpec
(
shape
=
[
1
,
3
,
8
,
8
],
dtype
=
'float32'
)
])
paddle
.
jit
.
save
(
net
,
'./while_net'
)
root_path
=
tempfile
.
TemporaryDirectory
()
model_file
=
os
.
path
.
join
(
root_path
.
name
,
"while_net"
)
paddle
.
jit
.
save
(
net
,
model_file
)
right_pdmodel
=
set
([
"uniform_random"
,
"shape"
,
"slice"
,
"not_equal"
,
"while"
,
"elementwise_add"
])
paddle
.
enable_static
()
pdmodel
=
getModelOp
(
"while_net.pdmodel"
)
#print(len(right_pdmodel.difference(pdmodel)))
pdmodel
=
getModelOp
(
model_file
+
".pdmodel"
)
self
.
assertTrue
(
len
(
right_pdmodel
.
difference
(
pdmodel
))
==
0
,
"The while op is pruned by mistake."
)
root_path
.
cleanup
()
def
test_for_op
(
self
):
paddle
.
disable_static
()
...
...
@@ -110,18 +112,20 @@ class TestConditionalOp(unittest.TestCase):
net
,
input_spec
=
[
paddle
.
static
.
InputSpec
(
shape
=
[
1
],
dtype
=
'int32'
)])
paddle
.
jit
.
save
(
net
,
'./for_net'
)
root_path
=
tempfile
.
TemporaryDirectory
()
model_file
=
os
.
path
.
join
(
root_path
.
name
,
"for_net"
)
paddle
.
jit
.
save
(
net
,
model_file
)
right_pdmodel
=
set
([
"randint"
,
"fill_constant"
,
"cast"
,
"less_than"
,
"while"
,
"elementwise_add"
])
paddle
.
enable_static
()
pdmodel
=
getModelOp
(
"for_net.pdmodel"
)
#print(len(right_pdmodel.difference(pdmodel)))
pdmodel
=
getModelOp
(
model_file
+
".pdmodel"
)
self
.
assertTrue
(
len
(
right_pdmodel
.
difference
(
pdmodel
))
==
0
,
"The for op is pruned by mistake."
)
root_path
.
cleanup
()
def
test_if_op
(
self
):
paddle
.
disable_static
()
...
...
@@ -130,18 +134,20 @@ class TestConditionalOp(unittest.TestCase):
net
,
input_spec
=
[
paddle
.
static
.
InputSpec
(
shape
=
[
1
],
dtype
=
'int32'
)])
paddle
.
jit
.
save
(
net
,
'./if_net'
)
root_path
=
tempfile
.
TemporaryDirectory
()
model_file
=
os
.
path
.
join
(
root_path
.
name
,
"if_net"
)
paddle
.
jit
.
save
(
net
,
model_file
)
right_pdmodel
=
set
([
"assign_value"
,
"greater_than"
,
"cast"
,
"conditional_block"
,
"logical_not"
,
"select_input"
])
paddle
.
enable_static
()
pdmodel
=
getModelOp
(
"if_net.pdmodel"
)
#print(len(right_pdmodel.difference(pdmodel)))
pdmodel
=
getModelOp
(
model_file
+
".pdmodel"
)
self
.
assertTrue
(
len
(
right_pdmodel
.
difference
(
pdmodel
))
==
0
,
"The if op is pruned by mistake."
)
root_path
.
cleanup
()
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录