Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
9176a01a
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
9176a01a
编写于
11月 26, 2021
作者:
C
cuicheng01
提交者:
GitHub
11月 26, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1483 from RainFrost1/whole_chain
TIPC添加rec model的支持
上级
0f3e3141
12ed6e28
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
117 addition
and
6 deletion
+117
-6
deploy/python/predict_rec.py
deploy/python/predict_rec.py
+36
-3
test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_infer_python.txt
...on/GeneralRecognition_PPLCNet_x2_5_train_infer_python.txt
+52
-0
test_tipc/prepare.sh
test_tipc/prepare.sh
+18
-0
test_tipc/test_train_inference_python.sh
test_tipc/test_train_inference_python.sh
+11
-3
未找到文件。
deploy/python/predict_rec.py
浏览文件 @
9176a01a
...
@@ -35,6 +35,27 @@ class RecPredictor(Predictor):
...
@@ -35,6 +35,27 @@ class RecPredictor(Predictor):
self
.
preprocess_ops
=
create_operators
(
config
[
"RecPreProcess"
][
self
.
preprocess_ops
=
create_operators
(
config
[
"RecPreProcess"
][
"transform_ops"
])
"transform_ops"
])
self
.
postprocess
=
build_postprocess
(
config
[
"RecPostProcess"
])
self
.
postprocess
=
build_postprocess
(
config
[
"RecPostProcess"
])
self
.
benchmark
=
config
[
"Global"
].
get
(
"benchmark"
,
False
)
if
self
.
benchmark
:
import
auto_log
pid
=
os
.
getpid
()
self
.
auto_logger
=
auto_log
.
AutoLogger
(
model_name
=
config
[
"Global"
].
get
(
"model_name"
,
"rec"
),
model_precision
=
'fp16'
if
config
[
"Global"
][
"use_fp16"
]
else
'fp32'
,
batch_size
=
config
[
"Global"
].
get
(
"batch_size"
,
1
),
data_shape
=
[
3
,
224
,
224
],
save_path
=
config
[
"Global"
].
get
(
"save_log_path"
,
"./auto_log.log"
),
inference_config
=
self
.
config
,
pids
=
pid
,
process_name
=
None
,
gpu_ids
=
None
,
time_keys
=
[
'preprocess_time'
,
'inference_time'
,
'postprocess_time'
],
warmup
=
2
)
def
predict
(
self
,
images
,
feature_normalize
=
True
):
def
predict
(
self
,
images
,
feature_normalize
=
True
):
input_names
=
self
.
paddle_predictor
.
get_input_names
()
input_names
=
self
.
paddle_predictor
.
get_input_names
()
...
@@ -44,16 +65,22 @@ class RecPredictor(Predictor):
...
@@ -44,16 +65,22 @@ class RecPredictor(Predictor):
output_tensor
=
self
.
paddle_predictor
.
get_output_handle
(
output_names
[
output_tensor
=
self
.
paddle_predictor
.
get_output_handle
(
output_names
[
0
])
0
])
if
self
.
benchmark
:
self
.
auto_logger
.
times
.
start
()
if
not
isinstance
(
images
,
(
list
,
)):
if
not
isinstance
(
images
,
(
list
,
)):
images
=
[
images
]
images
=
[
images
]
for
idx
in
range
(
len
(
images
)):
for
idx
in
range
(
len
(
images
)):
for
ops
in
self
.
preprocess_ops
:
for
ops
in
self
.
preprocess_ops
:
images
[
idx
]
=
ops
(
images
[
idx
])
images
[
idx
]
=
ops
(
images
[
idx
])
image
=
np
.
array
(
images
)
image
=
np
.
array
(
images
)
if
self
.
benchmark
:
self
.
auto_logger
.
times
.
stamp
()
input_tensor
.
copy_from_cpu
(
image
)
input_tensor
.
copy_from_cpu
(
image
)
self
.
paddle_predictor
.
run
()
self
.
paddle_predictor
.
run
()
batch_output
=
output_tensor
.
copy_to_cpu
()
batch_output
=
output_tensor
.
copy_to_cpu
()
if
self
.
benchmark
:
self
.
auto_logger
.
times
.
stamp
()
if
feature_normalize
:
if
feature_normalize
:
feas_norm
=
np
.
sqrt
(
feas_norm
=
np
.
sqrt
(
...
@@ -62,6 +89,9 @@ class RecPredictor(Predictor):
...
@@ -62,6 +89,9 @@ class RecPredictor(Predictor):
if
self
.
postprocess
is
not
None
:
if
self
.
postprocess
is
not
None
:
batch_output
=
self
.
postprocess
(
batch_output
)
batch_output
=
self
.
postprocess
(
batch_output
)
if
self
.
benchmark
:
self
.
auto_logger
.
times
.
end
(
stamp
=
True
)
return
batch_output
return
batch_output
...
@@ -85,16 +115,19 @@ def main(config):
...
@@ -85,16 +115,19 @@ def main(config):
batch_names
.
append
(
img_name
)
batch_names
.
append
(
img_name
)
cnt
+=
1
cnt
+=
1
if
cnt
%
config
[
"Global"
][
"batch_size"
]
==
0
or
(
idx
+
1
)
==
len
(
image_list
):
if
cnt
%
config
[
"Global"
][
"batch_size"
]
==
0
or
(
idx
+
1
if
len
(
batch_imgs
)
==
0
:
)
==
len
(
image_list
):
if
len
(
batch_imgs
)
==
0
:
continue
continue
batch_results
=
rec_predictor
.
predict
(
batch_imgs
)
batch_results
=
rec_predictor
.
predict
(
batch_imgs
)
for
number
,
result_dict
in
enumerate
(
batch_results
):
for
number
,
result_dict
in
enumerate
(
batch_results
):
filename
=
batch_names
[
number
]
filename
=
batch_names
[
number
]
print
(
"{}:
\t
{}"
.
format
(
filename
,
result_dict
))
print
(
"{}:
\t
{}"
.
format
(
filename
,
result_dict
))
batch_imgs
=
[]
batch_imgs
=
[]
batch_names
=
[]
batch_names
=
[]
if
rec_predictor
.
benchmark
:
rec_predictor
.
auto_logger
.
report
()
return
return
...
...
test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_infer_python.txt
0 → 100644
浏览文件 @
9176a01a
===========================train_params===========================
model_name:GeneralRecognition_PPLCNet_x2_5
python:python3.7
gpu_list:0|0,1
-o Global.device:gpu
-o Global.auto_cast:null
-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120
-o Global.output_dir:./output/
-o DataLoader.Train.sampler.batch_size:8
-o Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./dataset/ILSVRC2012/val
null:null
##
trainer:norm_train
norm_train:tools/train.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml
null:null
##
===========================infer_params==========================
-o Global.save_inference_dir:./inference
-o Global.pretrained_model:
norm_export:tools/export_model.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml
quant_export:null
fpgm_export:null
distill_export:null
kl_quant:null
export2:null
pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/general_PPLCNet_x2_5_pretrained_v1.0.pdparams
infer_model:../inference/
infer_export:True
infer_quant:Fasle
inference:python/predict_rec.py -c configs/inference_rec.yaml
-o Global.use_gpu:True|False
-o Global.enable_mkldnn:True|False
-o Global.cpu_num_threads:1|6
-o Global.batch_size:1|16
-o Global.use_tensorrt:True|False
-o Global.use_fp16:True|False
-o Global.rec_inference_model_dir:../inference
-o Global.infer_imgs:../dataset/Aliproduct/demo_test/
-o Global.save_log_path:null
-o Global.benchmark:True
null:null
null:null
test_tipc/prepare.sh
浏览文件 @
9176a01a
...
@@ -37,6 +37,24 @@ model_name=$(func_parser_value "${lines[1]}")
...
@@ -37,6 +37,24 @@ model_name=$(func_parser_value "${lines[1]}")
model_url_value
=
$(
func_parser_value
"
${
lines
[35]
}
"
)
model_url_value
=
$(
func_parser_value
"
${
lines
[35]
}
"
)
model_url_key
=
$(
func_parser_key
"
${
lines
[35]
}
"
)
model_url_key
=
$(
func_parser_key
"
${
lines
[35]
}
"
)
if
[[
$FILENAME
==
*
GeneralRecognition
*
]]
;
then
cd
dataset
rm
-rf
Aliproduct
rm
-rf
train_reg_all_data.txt
rm
-rf
demo_train
wget
-nc
https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/tipc_shitu_demo_data.tar
tar
-xf
tipc_shitu_demo_data.tar
ln
-s
tipc_shitu_demo_data Aliproduct
ln
-s
tipc_shitu_demo_data/demo_train.txt train_reg_all_data.txt
ln
-s
tipc_shitu_demo_data/demo_train demo_train
cd
tipc_shitu_demo_data
ln
-s
demo_test.txt val_list.txt
cd
../../
eval
"wget -nc
$model_url_value
"
mv
general_PPLCNet_x2_5_pretrained_v1.0.pdparams GeneralRecognition_PPLCNet_x2_5_pretrained.pdparams
exit
0
fi
if
[
${
MODE
}
=
"lite_train_lite_infer"
]
||
[
${
MODE
}
=
"lite_train_whole_infer"
]
;
then
if
[
${
MODE
}
=
"lite_train_lite_infer"
]
||
[
${
MODE
}
=
"lite_train_whole_infer"
]
;
then
# pretrain lite train data
# pretrain lite train data
cd
dataset
cd
dataset
...
...
test_tipc/test_train_inference_python.sh
浏览文件 @
9176a01a
...
@@ -291,8 +291,12 @@ else
...
@@ -291,8 +291,12 @@ else
export
FLAGS_cudnn_deterministic
=
True
export
FLAGS_cudnn_deterministic
=
True
eval
$cmd
eval
$cmd
status_check
$?
"
${
cmd
}
"
"
${
status_log
}
"
status_check
$?
"
${
cmd
}
"
"
${
status_log
}
"
set_eval_pretrain
=
$(
func_set_params
"
${
pretrain_model_key
}
"
"
${
save_log
}
/
${
$model_name
}
/
${
train_model_name
}
"
)
if
[[
$FILENAME
==
*
GeneralRecognition
*
]]
;
then
set_eval_pretrain
=
$(
func_set_params
"
${
pretrain_model_key
}
"
"
${
save_log
}
/RecModel/
${
train_model_name
}
"
)
else
set_eval_pretrain
=
$(
func_set_params
"
${
pretrain_model_key
}
"
"
${
save_log
}
/
${
model_name
}
/
${
train_model_name
}
"
)
fi
# save norm trained models to set pretrain for pact training and fpgm training
# save norm trained models to set pretrain for pact training and fpgm training
if
[
${
trainer
}
=
${
trainer_norm
}
]
;
then
if
[
${
trainer
}
=
${
trainer_norm
}
]
;
then
load_norm_train_model
=
${
set_eval_pretrain
}
load_norm_train_model
=
${
set_eval_pretrain
}
...
@@ -308,7 +312,11 @@ else
...
@@ -308,7 +312,11 @@ else
if
[
${
run_export
}
!=
"null"
]
;
then
if
[
${
run_export
}
!=
"null"
]
;
then
# run export model
# run export model
save_infer_path
=
"
${
save_log
}
"
save_infer_path
=
"
${
save_log
}
"
set_export_weight
=
$(
func_set_params
"
${
export_weight
}
"
"
${
save_log
}
/
${
model_name
}
/
${
train_model_name
}
"
)
if
[[
$FILENAME
==
*
GeneralRecognition
*
]]
;
then
set_eval_pretrain
=
$(
func_set_params
"
${
pretrain_model_key
}
"
"
${
save_log
}
/RecModel/
${
train_model_name
}
"
)
else
set_export_weight
=
$(
func_set_params
"
${
export_weight
}
"
"
${
save_log
}
/
${
model_name
}
/
${
train_model_name
}
"
)
fi
set_save_infer_key
=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_path
}
"
)
set_save_infer_key
=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_path
}
"
)
export_cmd
=
"
${
python
}
${
run_export
}
${
set_export_weight
}
${
set_save_infer_key
}
"
export_cmd
=
"
${
python
}
${
run_export
}
${
set_export_weight
}
${
set_save_infer_key
}
"
eval
$export_cmd
eval
$export_cmd
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录