Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
weixin_41840029
PaddleOCR
提交
ade18e13
P
PaddleOCR
项目概览
weixin_41840029
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ade18e13
编写于
6月 02, 2020
作者:
D
dyning
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add score in rec_infer
上级
78d90511
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
38 addition
and
18 deletion
+38
-18
ppocr/data/rec/dataset_traversal.py
ppocr/data/rec/dataset_traversal.py
+1
-0
ppocr/modeling/architectures/rec_model.py
ppocr/modeling/architectures/rec_model.py
+6
-2
ppocr/modeling/heads/rec_attention_head.py
ppocr/modeling/heads/rec_attention_head.py
+9
-4
tools/infer_rec.py
tools/infer_rec.py
+22
-12
未找到文件。
ppocr/data/rec/dataset_traversal.py
浏览文件 @
ade18e13
...
...
@@ -48,6 +48,7 @@ class LMDBReader(object):
elif
params
[
'mode'
]
==
"test"
:
self
.
batch_size
=
1
self
.
infer_img
=
params
[
"infer_img"
]
def
load_hierarchical_lmdb_dataset
(
self
):
lmdb_sets
=
{}
dataset_idx
=
0
...
...
ppocr/modeling/architectures/rec_model.py
浏览文件 @
ade18e13
...
...
@@ -110,7 +110,11 @@ class RecModel(object):
return
loader
,
outputs
elif
mode
==
"export"
:
predict
=
predicts
[
'predict'
]
predict
=
fluid
.
layers
.
softmax
(
predict
)
if
self
.
loss_type
==
"ctc"
:
predict
=
fluid
.
layers
.
softmax
(
predict
)
return
[
image
,
{
'decoded_out'
:
decoded_out
,
'predicts'
:
predict
}]
else
:
return
loader
,
{
'decoded_out'
:
decoded_out
}
predict
=
predicts
[
'predict'
]
if
self
.
loss_type
==
"ctc"
:
predict
=
fluid
.
layers
.
softmax
(
predict
)
return
loader
,
{
'decoded_out'
:
decoded_out
,
'predicts'
:
predict
}
ppocr/modeling/heads/rec_attention_head.py
浏览文件 @
ade18e13
...
...
@@ -123,6 +123,8 @@ class AttentionPredict(object):
full_ids
=
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
init_state
,
shape
=
[
-
1
,
1
],
dtype
=
'int64'
,
value
=
1
)
full_scores
=
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
init_state
,
shape
=
[
-
1
,
1
],
dtype
=
'float32'
,
value
=
1
)
cond
=
layers
.
less_than
(
x
=
counter
,
y
=
array_len
)
while_op
=
layers
.
While
(
cond
=
cond
)
...
...
@@ -171,6 +173,9 @@ class AttentionPredict(object):
new_ids
=
fluid
.
layers
.
concat
([
full_ids
,
topk_indices
],
axis
=
1
)
fluid
.
layers
.
assign
(
new_ids
,
full_ids
)
new_scores
=
fluid
.
layers
.
concat
([
full_scores
,
topk_scores
],
axis
=
1
)
fluid
.
layers
.
assign
(
new_scores
,
full_scores
)
layers
.
increment
(
x
=
counter
,
value
=
1
,
in_place
=
True
)
# update the memories
...
...
@@ -184,7 +189,7 @@ class AttentionPredict(object):
length_cond
=
layers
.
less_than
(
x
=
counter
,
y
=
array_len
)
finish_cond
=
layers
.
logical_not
(
layers
.
is_empty
(
x
=
topk_indices
))
layers
.
logical_and
(
x
=
length_cond
,
y
=
finish_cond
,
out
=
cond
)
return
full_ids
return
full_ids
,
full_scores
def
__call__
(
self
,
inputs
,
labels
=
None
,
mode
=
None
):
encoder_features
=
self
.
encoder
(
inputs
)
...
...
@@ -223,10 +228,10 @@ class AttentionPredict(object):
decoder_size
,
char_num
)
_
,
decoded_out
=
layers
.
topk
(
input
=
predict
,
k
=
1
)
decoded_out
=
layers
.
lod_reset
(
decoded_out
,
y
=
label_out
)
predicts
=
{
'predict'
:
predict
,
'decoded_out'
:
decoded_out
}
predicts
=
{
'predict'
:
predict
,
'decoded_out'
:
decoded_out
}
else
:
ids
=
self
.
gru_attention_infer
(
ids
,
predict
=
self
.
gru_attention_infer
(
decoder_boot
,
self
.
max_length
,
char_num
,
word_vector_dim
,
encoded_vector
,
encoded_proj
,
decoder_size
)
predicts
=
{
'
decoded_out'
:
ids
}
predicts
=
{
'
predict'
:
predict
,
'decoded_out'
:
ids
}
return
predicts
tools/infer_rec.py
浏览文件 @
ade18e13
...
...
@@ -79,34 +79,44 @@ def main():
blobs
=
reader_main
(
config
,
'test'
)()
infer_img
=
config
[
'TestReader'
][
'infer_img'
]
loss_type
=
config
[
'Global'
][
'loss_type'
]
infer_list
=
get_image_file_list
(
infer_img
)
max_img_num
=
len
(
infer_list
)
if
len
(
infer_list
)
==
0
:
logger
.
info
(
"Can not find img in infer_img dir."
)
for
i
in
range
(
max_img_num
):
print
(
"infer_img:"
,
infer_list
[
i
])
logger
.
info
(
"infer_img:%s"
%
infer_list
[
i
])
img
=
next
(
blobs
)
predict
=
exe
.
run
(
program
=
eval_prog
,
feed
=
{
"image"
:
img
},
fetch_list
=
fetch_varname_list
,
return_numpy
=
False
)
preds
=
np
.
array
(
predict
[
0
])
if
preds
.
shape
[
1
]
==
1
:
if
loss_type
==
"ctc"
:
preds
=
np
.
array
(
predict
[
0
])
preds
=
preds
.
reshape
(
-
1
)
preds_lod
=
predict
[
0
].
lod
()[
0
]
preds_text
=
char_ops
.
decode
(
preds
)
else
:
probs
=
np
.
array
(
predict
[
1
])
ind
=
np
.
argmax
(
probs
,
axis
=
1
)
blank
=
probs
.
shape
[
1
]
valid_ind
=
np
.
where
(
ind
!=
(
blank
-
1
))[
0
]
score
=
np
.
mean
(
probs
[
valid_ind
,
ind
[
valid_ind
]])
elif
loss_type
==
"attention"
:
preds
=
np
.
array
(
predict
[
0
])
probs
=
np
.
array
(
predict
[
1
])
end_pos
=
np
.
where
(
preds
[
0
,
:]
==
1
)[
0
]
if
len
(
end_pos
)
<=
1
:
preds_text
=
preds
[
0
,
1
:]
preds
=
preds
[
0
,
1
:]
score
=
np
.
mean
(
probs
[
0
,
1
:])
else
:
preds_text
=
preds
[
0
,
1
:
end_pos
[
1
]]
preds_text
=
preds_text
.
reshape
(
-
1
)
preds_text
=
char_ops
.
decode
(
preds_text
)
print
(
"
\t
index:"
,
preds
)
print
(
"
\t
word :"
,
preds_text
)
preds
=
preds
[
0
,
1
:
end_pos
[
1
]]
score
=
np
.
mean
(
probs
[
0
,
1
:
end_pos
[
1
]])
preds
=
preds
.
reshape
(
-
1
)
preds_text
=
char_ops
.
decode
(
preds
)
print
(
"
\t
index:"
,
preds
)
print
(
"
\t
word :"
,
preds_text
)
print
(
"
\t
score :"
,
score
)
# save for inference model
target_var
=
[]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录