Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
weixin_41840029
PaddleOCR
提交
f3efa9be
P
PaddleOCR
项目概览
weixin_41840029
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f3efa9be
编写于
6月 09, 2021
作者:
L
LDOUBLEV
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix test.sh bug
上级
b5aa9bde
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
8 addition
and
8 deletion
+8
-8
test/test.sh
test/test.sh
+8
-8
未找到文件。
test/test.sh
浏览文件 @
f3efa9be
...
@@ -138,18 +138,18 @@ for train_model in ${train_model_list[*]}; do
...
@@ -138,18 +138,18 @@ for train_model in ${train_model_list[*]}; do
${
python
}
${
launch
}
${
trainer
}
-c
${
yml_file
}
-o
Global.epoch_num
=
${
epoch
}
Global.eval_batch_step
=
${
eval_batch_step
}
Global.auto_cast
=
${
auto_cast
}
Global.pretrained_model
=
${
pretrain
}
Global.save_model_dir
=
${
save_log
}
Global.use_gpu
=
${
use_gpu
}
Train.loader.batch_size_per_card
=
2
${
python
}
${
launch
}
${
trainer
}
-c
${
yml_file
}
-o
Global.epoch_num
=
${
epoch
}
Global.eval_batch_step
=
${
eval_batch_step
}
Global.auto_cast
=
${
auto_cast
}
Global.pretrained_model
=
${
pretrain
}
Global.save_model_dir
=
${
save_log
}
Global.use_gpu
=
${
use_gpu
}
Train.loader.batch_size_per_card
=
2
status_check
$?
"
${
trainer
}
"
"
${
command
}
"
"
${
status_log
}
"
status_check
$?
"
${
trainer
}
"
"
${
command
}
"
"
${
status_log
}
"
command
=
"
${
python
}
${
export_model
}
-c
${
yml_file
}
-o Global.pretrained_model=
${
save_log
}
/latest Global.save_inference_dir=
${
save_log
}
/export_inference
/ Global.save_model_dir=
${
save_log
}
"
command
=
"
${
python
}
${
export_model
}
-c
${
yml_file
}
-o Global.pretrained_model=
${
save_log
}
/latest Global.save_inference_dir=
${
save_log
}
_infer
/ Global.save_model_dir=
${
save_log
}
"
${
python
}
${
export_model
}
-c
${
yml_file
}
-o
Global.pretrained_model
=
${
save_log
}
/latest Global.save_inference_dir
=
${
save_log
}
/export_inference
/ Global.save_model_dir
=
${
save_log
}
${
python
}
${
export_model
}
-c
${
yml_file
}
-o
Global.pretrained_model
=
${
save_log
}
/latest Global.save_inference_dir
=
${
save_log
}
_infer
/ Global.save_model_dir
=
${
save_log
}
status_check
$?
"
${
trainer
}
"
"
${
command
}
"
"
${
status_log
}
"
status_check
$?
"
${
trainer
}
"
"
${
command
}
"
"
${
status_log
}
"
if
[
"
${
model_name
}
"
=
"det"
]
;
then
if
[
"
${
model_name
}
"
=
"det"
]
;
then
export
rec_batch_size_list
=(
"1"
)
export
rec_batch_size_list
=(
"1"
)
inference
=
"tools/infer/predict_det.py"
inference
=
"tools/infer/predict_det.py"
det_model_dir
=
${
save_log
}
/export_inference/
det_model_dir
=
${
save_log
}
_infer
rec_model_dir
=
""
rec_model_dir
=
""
elif
[
"
${
model_name
}
"
=
"rec"
]
;
then
elif
[
"
${
model_name
}
"
=
"rec"
]
;
then
inference
=
"tools/infer/predict_rec.py"
inference
=
"tools/infer/predict_rec.py"
rec_model_dir
=
${
save_log
}
/export_inference/
rec_model_dir
=
${
save_log
}
_infer
det_model_dir
=
""
det_model_dir
=
""
fi
fi
# inference
# inference
...
@@ -159,8 +159,8 @@ for train_model in ${train_model_list[*]}; do
...
@@ -159,8 +159,8 @@ for train_model in ${train_model_list[*]}; do
for
threads
in
${
cpu_threads_list
[*]
}
;
do
for
threads
in
${
cpu_threads_list
[*]
}
;
do
for
rec_batch_size
in
${
rec_batch_size_list
[*]
}
;
do
for
rec_batch_size
in
${
rec_batch_size_list
[*]
}
;
do
save_log_path
=
"
${
log_path
}
/
${
model_name
}
_
${
slim_trainer
}
_cpu_usemkldnn_
${
use_mkldnn
}
_cputhreads_
${
threads
}
_recbatchnum_
${
rec_batch_size
}
_infer.log"
save_log_path
=
"
${
log_path
}
/
${
model_name
}
_
${
slim_trainer
}
_cpu_usemkldnn_
${
use_mkldnn
}
_cputhreads_
${
threads
}
_recbatchnum_
${
rec_batch_size
}
_infer.log"
command
=
"
${
python
}
${
inference
}
--enable_mkldnn=
${
use_mkldnn
}
--use_gpu=False --cpu_threads=
${
threads
}
--benchmark=True --det_model_dir=
${
save_log
}
/export_inference/
--rec_batch_num=
${
rec_batch_size
}
--rec_model_dir=
${
rec_model_dir
}
--image_dir=
${
img_dir
}
--save_log_path=
${
save_log_path
}
"
command
=
"
${
python
}
${
inference
}
--enable_mkldnn=
${
use_mkldnn
}
--use_gpu=False --cpu_threads=
${
threads
}
--benchmark=True --det_model_dir=
${
det_model_dir
}
--rec_batch_num=
${
rec_batch_size
}
--rec_model_dir=
${
rec_model_dir
}
--image_dir=
${
img_dir
}
--save_log_path=
${
save_log_path
}
"
${
python
}
${
inference
}
--enable_mkldnn
=
${
use_mkldnn
}
--use_gpu
=
False
--cpu_threads
=
${
threads
}
--benchmark
=
True
--det_model_dir
=
${
save_log
}
/export_inference/
--rec_batch_num
=
${
rec_batch_size
}
--rec_model_dir
=
${
rec_model_dir
}
--image_dir
=
${
img_dir
}
--save_log_path
=
${
save_log_path
}
${
python
}
${
inference
}
--enable_mkldnn
=
${
use_mkldnn
}
--use_gpu
=
False
--cpu_threads
=
${
threads
}
--benchmark
=
True
--det_model_dir
=
${
det_model_dir
}
--rec_batch_num
=
${
rec_batch_size
}
--rec_model_dir
=
${
rec_model_dir
}
--image_dir
=
${
img_dir
}
--save_log_path
=
${
save_log_path
}
status_check
$?
"
${
inference
}
"
"
${
command
}
"
"
${
status_log
}
"
status_check
$?
"
${
inference
}
"
"
${
command
}
"
"
${
status_log
}
"
done
done
done
done
...
@@ -173,8 +173,8 @@ for train_model in ${train_model_list[*]}; do
...
@@ -173,8 +173,8 @@ for train_model in ${train_model_list[*]}; do
fi
fi
for
rec_batch_size
in
${
rec_batch_size_list
[*]
}
;
do
for
rec_batch_size
in
${
rec_batch_size_list
[*]
}
;
do
save_log_path
=
"
${
log_path
}
/
${
model_name
}
_
${
slim_trainer
}
_gpu_usetensorrt_
${
use_trt
}
_usefp16_
${
precision
}
_recbatchnum_
${
rec_batch_size
}
_infer.log"
save_log_path
=
"
${
log_path
}
/
${
model_name
}
_
${
slim_trainer
}
_gpu_usetensorrt_
${
use_trt
}
_usefp16_
${
precision
}
_recbatchnum_
${
rec_batch_size
}
_infer.log"
command
=
"
${
python
}
${
inference
}
--use_gpu=True --use_tensorrt=
${
use_trt
}
--precision=
${
precision
}
--benchmark=True --det_model_dir=
${
save_log
}
/export_inference/
--rec_batch_num=
${
rec_batch_size
}
--rec_model_dir=
${
rec_model_dir
}
--image_dir=
${
img_dir
}
--save_log_path=
${
save_log_path
}
"
command
=
"
${
python
}
${
inference
}
--use_gpu=True --use_tensorrt=
${
use_trt
}
--precision=
${
precision
}
--benchmark=True --det_model_dir=
${
det_model_dir
}
--rec_batch_num=
${
rec_batch_size
}
--rec_model_dir=
${
rec_model_dir
}
--image_dir=
${
img_dir
}
--save_log_path=
${
save_log_path
}
"
${
python
}
${
inference
}
--use_gpu
=
True
--use_tensorrt
=
${
use_trt
}
--precision
=
${
precision
}
--benchmark
=
True
--det_model_dir
=
${
save_log
}
/export_inference/
--rec_batch_num
=
${
rec_batch_size
}
--rec_model_dir
=
${
rec_model_dir
}
--image_dir
=
${
img_dir
}
--save_log_path
=
${
save_log_path
}
${
python
}
${
inference
}
--use_gpu
=
True
--use_tensorrt
=
${
use_trt
}
--precision
=
${
precision
}
--benchmark
=
True
--det_model_dir
=
${
det_model_dir
}
--rec_batch_num
=
${
rec_batch_size
}
--rec_model_dir
=
${
rec_model_dir
}
--image_dir
=
${
img_dir
}
--save_log_path
=
${
save_log_path
}
status_check
$?
"
${
inference
}
"
"
${
command
}
"
"
${
status_log
}
"
status_check
$?
"
${
inference
}
"
"
${
command
}
"
"
${
status_log
}
"
done
done
done
done
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录