Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleOCR
提交
0fbcb520
P
PaddleOCR
项目概览
PaddlePaddle
/
PaddleOCR
大约 1 年 前同步成功
通知
1528
Star
32962
Fork
6643
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
108
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
108
Issue
108
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0fbcb520
编写于
12月 22, 2020
作者:
L
LDOUBLEV
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'dygraph' of
https://github.com/PaddlePaddle/PaddleOCR
into dyg_db
上级
dbd27878
9df55aa5
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
49 addition
and
69 deletion
+49
-69
ppocr/data/imaug/operators.py
ppocr/data/imaug/operators.py
+2
-2
ppocr/losses/det_sast_loss.py
ppocr/losses/det_sast_loss.py
+22
-22
tools/infer/predict_cls.py
tools/infer/predict_cls.py
+5
-10
tools/infer/predict_det.py
tools/infer/predict_det.py
+5
-9
tools/infer/predict_rec.py
tools/infer/predict_rec.py
+4
-10
tools/infer/utility.py
tools/infer/utility.py
+10
-15
tools/program.py
tools/program.py
+1
-1
未找到文件。
ppocr/data/imaug/operators.py
浏览文件 @
0fbcb520
...
...
@@ -119,10 +119,10 @@ class DetResizeForTest(object):
if
'image_shape'
in
kwargs
:
self
.
image_shape
=
kwargs
[
'image_shape'
]
self
.
resize_type
=
1
if
'limit_side_len'
in
kwargs
:
el
if
'limit_side_len'
in
kwargs
:
self
.
limit_side_len
=
kwargs
[
'limit_side_len'
]
self
.
limit_type
=
kwargs
.
get
(
'limit_type'
,
'min'
)
if
'resize_long'
in
kwargs
:
el
if
'resize_long'
in
kwargs
:
self
.
resize_type
=
2
self
.
resize_long
=
kwargs
.
get
(
'resize_long'
,
960
)
else
:
...
...
ppocr/losses/det_sast_loss.py
浏览文件 @
0fbcb520
...
...
@@ -19,7 +19,6 @@ from __future__ import print_function
import
paddle
from
paddle
import
nn
from
.det_basic_loss
import
DiceLoss
import
paddle.fluid
as
fluid
import
numpy
as
np
...
...
@@ -27,9 +26,7 @@ class SASTLoss(nn.Layer):
"""
"""
def
__init__
(
self
,
eps
=
1e-6
,
**
kwargs
):
def
__init__
(
self
,
eps
=
1e-6
,
**
kwargs
):
super
(
SASTLoss
,
self
).
__init__
()
self
.
dice_loss
=
DiceLoss
(
eps
=
eps
)
...
...
@@ -53,10 +50,12 @@ class SASTLoss(nn.Layer):
score_loss
=
1.0
-
2
*
intersection
/
(
union
+
1e-5
)
#border loss
l_border_split
,
l_border_norm
=
paddle
.
split
(
l_border
,
num_or_sections
=
[
4
,
1
],
axis
=
1
)
l_border_split
,
l_border_norm
=
paddle
.
split
(
l_border
,
num_or_sections
=
[
4
,
1
],
axis
=
1
)
f_border_split
=
f_border
border_ex_shape
=
l_border_norm
.
shape
*
np
.
array
([
1
,
4
,
1
,
1
])
l_border_norm_split
=
paddle
.
expand
(
x
=
l_border_norm
,
shape
=
border_ex_shape
)
l_border_norm_split
=
paddle
.
expand
(
x
=
l_border_norm
,
shape
=
border_ex_shape
)
l_border_score
=
paddle
.
expand
(
x
=
l_score
,
shape
=
border_ex_shape
)
l_border_mask
=
paddle
.
expand
(
x
=
l_mask
,
shape
=
border_ex_shape
)
...
...
@@ -72,7 +71,8 @@ class SASTLoss(nn.Layer):
(
paddle
.
sum
(
l_border_score
*
l_border_mask
)
+
1e-5
)
#tvo_loss
l_tvo_split
,
l_tvo_norm
=
paddle
.
split
(
l_tvo
,
num_or_sections
=
[
8
,
1
],
axis
=
1
)
l_tvo_split
,
l_tvo_norm
=
paddle
.
split
(
l_tvo
,
num_or_sections
=
[
8
,
1
],
axis
=
1
)
f_tvo_split
=
f_tvo
tvo_ex_shape
=
l_tvo_norm
.
shape
*
np
.
array
([
1
,
8
,
1
,
1
])
l_tvo_norm_split
=
paddle
.
expand
(
x
=
l_tvo_norm
,
shape
=
tvo_ex_shape
)
...
...
@@ -91,7 +91,8 @@ class SASTLoss(nn.Layer):
(
paddle
.
sum
(
l_tvo_score
*
l_tvo_mask
)
+
1e-5
)
#tco_loss
l_tco_split
,
l_tco_norm
=
paddle
.
split
(
l_tco
,
num_or_sections
=
[
2
,
1
],
axis
=
1
)
l_tco_split
,
l_tco_norm
=
paddle
.
split
(
l_tco
,
num_or_sections
=
[
2
,
1
],
axis
=
1
)
f_tco_split
=
f_tco
tco_ex_shape
=
l_tco_norm
.
shape
*
np
.
array
([
1
,
2
,
1
,
1
])
l_tco_norm_split
=
paddle
.
expand
(
x
=
l_tco_norm
,
shape
=
tco_ex_shape
)
...
...
@@ -109,7 +110,6 @@ class SASTLoss(nn.Layer):
tco_loss
=
paddle
.
sum
(
tco_out_loss
*
l_tco_score
*
l_tco_mask
)
/
\
(
paddle
.
sum
(
l_tco_score
*
l_tco_mask
)
+
1e-5
)
# total loss
tvo_lw
,
tco_lw
=
1.5
,
1.5
score_lw
,
border_lw
=
1.0
,
1.0
...
...
tools/infer/predict_cls.py
浏览文件 @
0fbcb520
...
...
@@ -24,7 +24,6 @@ import numpy as np
import
math
import
time
import
traceback
import
paddle.fluid
as
fluid
import
tools.infer.utility
as
utility
from
ppocr.postprocess
import
build_post_process
...
...
@@ -39,7 +38,6 @@ class TextClassifier(object):
self
.
cls_image_shape
=
[
int
(
v
)
for
v
in
args
.
cls_image_shape
.
split
(
","
)]
self
.
cls_batch_num
=
args
.
cls_batch_num
self
.
cls_thresh
=
args
.
cls_thresh
self
.
use_zero_copy_run
=
args
.
use_zero_copy_run
postprocess_params
=
{
'name'
:
'ClsPostProcess'
,
"label_list"
:
args
.
label_list
,
...
...
@@ -99,12 +97,8 @@ class TextClassifier(object):
norm_img_batch
=
norm_img_batch
.
copy
()
starttime
=
time
.
time
()
if
self
.
use_zero_copy_run
:
self
.
input_tensor
.
copy_from_cpu
(
norm_img_batch
)
self
.
predictor
.
zero_copy_run
()
else
:
norm_img_batch
=
fluid
.
core
.
PaddleTensor
(
norm_img_batch
)
self
.
predictor
.
run
([
norm_img_batch
])
self
.
predictor
.
run
()
prob_out
=
self
.
output_tensors
[
0
].
copy_to_cpu
()
cls_result
=
self
.
postprocess_op
(
prob_out
)
elapse
+=
time
.
time
()
-
starttime
...
...
@@ -143,10 +137,11 @@ def main(args):
"Please set --rec_image_shape='3,32,100' and --rec_char_type='en' "
)
exit
()
for
ino
in
range
(
len
(
img_list
)):
logger
.
info
(
"Predicts of {}:{}"
.
format
(
valid_image_file_list
[
ino
],
cls_res
[
ino
]))
logger
.
info
(
"Predicts of {}:{}"
.
format
(
valid_image_file_list
[
ino
],
cls_res
[
ino
]))
logger
.
info
(
"Total predict time for {} images, cost: {:.3f}"
.
format
(
len
(
img_list
),
predict_time
))
if
__name__
==
"__main__"
:
main
(
utility
.
parse_args
())
tools/infer/predict_det.py
浏览文件 @
0fbcb520
...
...
@@ -22,7 +22,6 @@ import cv2
import
numpy
as
np
import
time
import
sys
import
paddle
import
tools.infer.utility
as
utility
from
ppocr.utils.logging
import
get_logger
...
...
@@ -37,7 +36,6 @@ class TextDetector(object):
def
__init__
(
self
,
args
):
self
.
args
=
args
self
.
det_algorithm
=
args
.
det_algorithm
self
.
use_zero_copy_run
=
args
.
use_zero_copy_run
pre_process_list
=
[{
'DetResizeForTest'
:
{
'limit_side_len'
:
args
.
det_limit_side_len
,
...
...
@@ -72,7 +70,9 @@ class TextDetector(object):
postprocess_params
[
"nms_thresh"
]
=
args
.
det_east_nms_thresh
elif
self
.
det_algorithm
==
"SAST"
:
pre_process_list
[
0
]
=
{
'DetResizeForTest'
:
{
'resize_long'
:
args
.
det_limit_side_len
}
'DetResizeForTest'
:
{
'resize_long'
:
args
.
det_limit_side_len
}
}
postprocess_params
[
'name'
]
=
'SASTPostProcess'
postprocess_params
[
"score_thresh"
]
=
args
.
det_sast_score_thresh
...
...
@@ -161,12 +161,8 @@ class TextDetector(object):
img
=
img
.
copy
()
starttime
=
time
.
time
()
if
self
.
use_zero_copy_run
:
self
.
input_tensor
.
copy_from_cpu
(
img
)
self
.
predictor
.
zero_copy_run
()
else
:
im
=
paddle
.
fluid
.
core
.
PaddleTensor
(
img
)
self
.
predictor
.
run
([
im
])
self
.
predictor
.
run
()
outputs
=
[]
for
output_tensor
in
self
.
output_tensors
:
output
=
output_tensor
.
copy_to_cpu
()
...
...
tools/infer/predict_rec.py
浏览文件 @
0fbcb520
...
...
@@ -23,7 +23,6 @@ import numpy as np
import
math
import
time
import
traceback
import
paddle.fluid
as
fluid
import
tools.infer.utility
as
utility
from
ppocr.postprocess
import
build_post_process
...
...
@@ -39,7 +38,6 @@ class TextRecognizer(object):
self
.
character_type
=
args
.
rec_char_type
self
.
rec_batch_num
=
args
.
rec_batch_num
self
.
rec_algorithm
=
args
.
rec_algorithm
self
.
use_zero_copy_run
=
args
.
use_zero_copy_run
postprocess_params
=
{
'name'
:
'CTCLabelDecode'
,
"character_type"
:
args
.
rec_char_type
,
...
...
@@ -101,12 +99,8 @@ class TextRecognizer(object):
norm_img_batch
=
np
.
concatenate
(
norm_img_batch
)
norm_img_batch
=
norm_img_batch
.
copy
()
starttime
=
time
.
time
()
if
self
.
use_zero_copy_run
:
self
.
input_tensor
.
copy_from_cpu
(
norm_img_batch
)
self
.
predictor
.
zero_copy_run
()
else
:
norm_img_batch
=
fluid
.
core
.
PaddleTensor
(
norm_img_batch
)
self
.
predictor
.
run
([
norm_img_batch
])
self
.
predictor
.
run
()
outputs
=
[]
for
output_tensor
in
self
.
output_tensors
:
output
=
output_tensor
.
copy_to_cpu
()
...
...
@@ -145,8 +139,8 @@ def main(args):
"Please set --rec_image_shape='3,32,100' and --rec_char_type='en' "
)
exit
()
for
ino
in
range
(
len
(
img_list
)):
logger
.
info
(
"Predicts of {}:{}"
.
format
(
valid_image_file_list
[
ino
],
rec_res
[
ino
]))
logger
.
info
(
"Predicts of {}:{}"
.
format
(
valid_image_file_list
[
ino
],
rec_res
[
ino
]))
logger
.
info
(
"Total predict time for {} images, cost: {:.3f}"
.
format
(
len
(
img_list
),
predict_time
))
...
...
tools/infer/utility.py
浏览文件 @
0fbcb520
...
...
@@ -20,8 +20,7 @@ import numpy as np
import
json
from
PIL
import
Image
,
ImageDraw
,
ImageFont
import
math
from
paddle.fluid.core
import
AnalysisConfig
from
paddle.fluid.core
import
create_paddle_predictor
from
paddle
import
inference
def
parse_args
():
...
...
@@ -83,8 +82,6 @@ def parse_args():
parser
.
add_argument
(
"--cls_thresh"
,
type
=
float
,
default
=
0.9
)
parser
.
add_argument
(
"--enable_mkldnn"
,
type
=
str2bool
,
default
=
False
)
parser
.
add_argument
(
"--use_zero_copy_run"
,
type
=
str2bool
,
default
=
False
)
parser
.
add_argument
(
"--use_pdserving"
,
type
=
str2bool
,
default
=
False
)
return
parser
.
parse_args
()
...
...
@@ -110,14 +107,14 @@ def create_predictor(args, mode, logger):
logger
.
info
(
"not find params file path {}"
.
format
(
params_file_path
))
sys
.
exit
(
0
)
config
=
Analysis
Config
(
model_file_path
,
params_file_path
)
config
=
inference
.
Config
(
model_file_path
,
params_file_path
)
if
args
.
use_gpu
:
config
.
enable_use_gpu
(
args
.
gpu_mem
,
0
)
if
args
.
use_tensorrt
:
config
.
enable_tensorrt_engine
(
precision_mode
=
AnalysisConfig
.
Precision
.
Half
if
args
.
use_fp16
else
AnalysisConfig
.
Precision
.
Float32
,
precision_mode
=
inference
.
PrecisionType
.
Half
if
args
.
use_fp16
else
inference
.
PrecisionType
.
Float32
,
max_batch_size
=
args
.
max_batch_size
)
else
:
config
.
disable_gpu
()
...
...
@@ -130,20 +127,18 @@ def create_predictor(args, mode, logger):
# config.enable_memory_optim()
config
.
disable_glog_info
()
if
args
.
use_zero_copy_run
:
config
.
delete_pass
(
"conv_transpose_eltwiseadd_bn_fuse_pass"
)
config
.
switch_use_feed_fetch_ops
(
False
)
else
:
config
.
switch_use_feed_fetch_ops
(
True
)
predictor
=
create_paddle_predictor
(
config
)
# create predictor
predictor
=
inference
.
create_predictor
(
config
)
input_names
=
predictor
.
get_input_names
()
for
name
in
input_names
:
input_tensor
=
predictor
.
get_input_
tensor
(
name
)
input_tensor
=
predictor
.
get_input_
handle
(
name
)
output_names
=
predictor
.
get_output_names
()
output_tensors
=
[]
for
output_name
in
output_names
:
output_tensor
=
predictor
.
get_output_
tensor
(
output_name
)
output_tensor
=
predictor
.
get_output_
handle
(
output_name
)
output_tensors
.
append
(
output_tensor
)
return
predictor
,
input_tensor
,
output_tensors
...
...
tools/program.py
浏览文件 @
0fbcb520
...
...
@@ -131,7 +131,7 @@ def check_gpu(use_gpu):
"model on CPU"
try
:
if
use_gpu
and
not
paddle
.
fluid
.
is_compiled_with_cuda
():
if
use_gpu
and
not
paddle
.
is_compiled_with_cuda
():
print
(
err
)
sys
.
exit
(
1
)
except
Exception
as
e
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录