Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
weixin_41840029
PaddleOCR
提交
1b3cf0da
P
PaddleOCR
项目概览
weixin_41840029
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1b3cf0da
编写于
12月 19, 2021
作者:
D
Double_V
提交者:
GitHub
12月 19, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #4969 from WenmuZhou/fix_vqa
add encoding='utf-8'
上级
68deaab1
dc51469b
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
27 addition
and
13 deletion
+27
-13
ppstructure/vqa/eval_ser.py
ppstructure/vqa/eval_ser.py
+6
-2
ppstructure/vqa/helper/eval_with_label_end2end.py
ppstructure/vqa/helper/eval_with_label_end2end.py
+1
-1
ppstructure/vqa/helper/trans_xfun_data.py
ppstructure/vqa/helper/trans_xfun_data.py
+2
-2
ppstructure/vqa/infer_re.py
ppstructure/vqa/infer_re.py
+1
-1
ppstructure/vqa/infer_ser.py
ppstructure/vqa/infer_ser.py
+7
-3
ppstructure/vqa/infer_ser_e2e.py
ppstructure/vqa/infer_ser_e2e.py
+4
-1
ppstructure/vqa/infer_ser_re_e2e.py
ppstructure/vqa/infer_ser_re_e2e.py
+4
-1
ppstructure/vqa/utils.py
ppstructure/vqa/utils.py
+1
-1
ppstructure/vqa/xfun.py
ppstructure/vqa/xfun.py
+1
-1
未找到文件。
ppstructure/vqa/eval_ser.py
浏览文件 @
1b3cf0da
...
@@ -128,12 +128,16 @@ def evaluate(args,
...
@@ -128,12 +128,16 @@ def evaluate(args,
"f1"
:
f1_score
(
out_label_list
,
preds_list
),
"f1"
:
f1_score
(
out_label_list
,
preds_list
),
}
}
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"test_gt.txt"
),
"w"
)
as
fout
:
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"test_gt.txt"
),
"w"
,
encoding
=
'utf-8'
)
as
fout
:
for
lbl
in
out_label_list
:
for
lbl
in
out_label_list
:
for
l
in
lbl
:
for
l
in
lbl
:
fout
.
write
(
l
+
"
\t
"
)
fout
.
write
(
l
+
"
\t
"
)
fout
.
write
(
"
\n
"
)
fout
.
write
(
"
\n
"
)
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"test_pred.txt"
),
"w"
)
as
fout
:
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"test_pred.txt"
),
"w"
,
encoding
=
'utf-8'
)
as
fout
:
for
lbl
in
preds_list
:
for
lbl
in
preds_list
:
for
l
in
lbl
:
for
l
in
lbl
:
fout
.
write
(
l
+
"
\t
"
)
fout
.
write
(
l
+
"
\t
"
)
...
...
ppstructure/vqa/helper/eval_with_label_end2end.py
浏览文件 @
1b3cf0da
...
@@ -37,7 +37,7 @@ def parse_ser_results_fp(fp, fp_type="gt", ignore_background=True):
...
@@ -37,7 +37,7 @@ def parse_ser_results_fp(fp, fp_type="gt", ignore_background=True):
assert
fp_type
in
[
"gt"
,
"pred"
]
assert
fp_type
in
[
"gt"
,
"pred"
]
key
=
"label"
if
fp_type
==
"gt"
else
"pred"
key
=
"label"
if
fp_type
==
"gt"
else
"pred"
res_dict
=
dict
()
res_dict
=
dict
()
with
open
(
fp
,
"r"
)
as
fin
:
with
open
(
fp
,
"r"
,
encoding
=
'utf-8'
)
as
fin
:
lines
=
fin
.
readlines
()
lines
=
fin
.
readlines
()
for
_
,
line
in
enumerate
(
lines
):
for
_
,
line
in
enumerate
(
lines
):
...
...
ppstructure/vqa/helper/trans_xfun_data.py
浏览文件 @
1b3cf0da
...
@@ -16,13 +16,13 @@ import json
...
@@ -16,13 +16,13 @@ import json
def
transfer_xfun_data
(
json_path
=
None
,
output_file
=
None
):
def
transfer_xfun_data
(
json_path
=
None
,
output_file
=
None
):
with
open
(
json_path
,
"r"
)
as
fin
:
with
open
(
json_path
,
"r"
,
encoding
=
'utf-8'
)
as
fin
:
lines
=
fin
.
readlines
()
lines
=
fin
.
readlines
()
json_info
=
json
.
loads
(
lines
[
0
])
json_info
=
json
.
loads
(
lines
[
0
])
documents
=
json_info
[
"documents"
]
documents
=
json_info
[
"documents"
]
label_info
=
{}
label_info
=
{}
with
open
(
output_file
,
"w"
)
as
fout
:
with
open
(
output_file
,
"w"
,
encoding
=
'utf-8'
)
as
fout
:
for
idx
,
document
in
enumerate
(
documents
):
for
idx
,
document
in
enumerate
(
documents
):
img_info
=
document
[
"img"
]
img_info
=
document
[
"img"
]
document
=
document
[
"document"
]
document
=
document
[
"document"
]
...
...
ppstructure/vqa/infer_re.py
浏览文件 @
1b3cf0da
...
@@ -92,7 +92,7 @@ def infer(args):
...
@@ -92,7 +92,7 @@ def infer(args):
def
load_ocr
(
img_folder
,
json_path
):
def
load_ocr
(
img_folder
,
json_path
):
import
json
import
json
d
=
[]
d
=
[]
with
open
(
json_path
,
"r"
)
as
fin
:
with
open
(
json_path
,
"r"
,
encoding
=
'utf-8'
)
as
fin
:
lines
=
fin
.
readlines
()
lines
=
fin
.
readlines
()
for
line
in
lines
:
for
line
in
lines
:
image_name
,
info_str
=
line
.
split
(
"
\t
"
)
image_name
,
info_str
=
line
.
split
(
"
\t
"
)
...
...
ppstructure/vqa/infer_ser.py
浏览文件 @
1b3cf0da
...
@@ -59,7 +59,8 @@ def pad_sentences(tokenizer,
...
@@ -59,7 +59,8 @@ def pad_sentences(tokenizer,
encoded_inputs
[
"bbox"
]
=
encoded_inputs
[
"bbox"
]
+
[[
0
,
0
,
0
,
0
]
encoded_inputs
[
"bbox"
]
=
encoded_inputs
[
"bbox"
]
+
[[
0
,
0
,
0
,
0
]
]
*
difference
]
*
difference
else
:
else
:
assert
False
,
f
"padding_side of tokenizer just supports [
\"
right
\"
] but got
{
tokenizer
.
padding_side
}
"
assert
False
,
"padding_side of tokenizer just supports [
\"
right
\"
] but got {}"
.
format
(
tokenizer
.
padding_side
)
else
:
else
:
if
return_attention_mask
:
if
return_attention_mask
:
encoded_inputs
[
"attention_mask"
]
=
[
1
]
*
len
(
encoded_inputs
[
encoded_inputs
[
"attention_mask"
]
=
[
1
]
*
len
(
encoded_inputs
[
...
@@ -224,7 +225,7 @@ def infer(args):
...
@@ -224,7 +225,7 @@ def infer(args):
# load ocr results json
# load ocr results json
ocr_results
=
dict
()
ocr_results
=
dict
()
with
open
(
args
.
ocr_json_path
,
"r"
)
as
fin
:
with
open
(
args
.
ocr_json_path
,
"r"
,
encoding
=
'utf-8'
)
as
fin
:
lines
=
fin
.
readlines
()
lines
=
fin
.
readlines
()
for
line
in
lines
:
for
line
in
lines
:
img_name
,
json_info
=
line
.
split
(
"
\t
"
)
img_name
,
json_info
=
line
.
split
(
"
\t
"
)
...
@@ -234,7 +235,10 @@ def infer(args):
...
@@ -234,7 +235,10 @@ def infer(args):
infer_imgs
=
get_image_file_list
(
args
.
infer_imgs
)
infer_imgs
=
get_image_file_list
(
args
.
infer_imgs
)
# loop for infer
# loop for infer
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"infer_results.txt"
),
"w"
)
as
fout
:
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"infer_results.txt"
),
"w"
,
encoding
=
'utf-8'
)
as
fout
:
for
idx
,
img_path
in
enumerate
(
infer_imgs
):
for
idx
,
img_path
in
enumerate
(
infer_imgs
):
print
(
"process: [{}/{}]"
.
format
(
idx
,
len
(
infer_imgs
),
img_path
))
print
(
"process: [{}/{}]"
.
format
(
idx
,
len
(
infer_imgs
),
img_path
))
...
...
ppstructure/vqa/infer_ser_e2e.py
浏览文件 @
1b3cf0da
...
@@ -113,7 +113,10 @@ if __name__ == "__main__":
...
@@ -113,7 +113,10 @@ if __name__ == "__main__":
# loop for infer
# loop for infer
ser_engine
=
SerPredictor
(
args
)
ser_engine
=
SerPredictor
(
args
)
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"infer_results.txt"
),
"w"
)
as
fout
:
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"infer_results.txt"
),
"w"
,
encoding
=
'utf-8'
)
as
fout
:
for
idx
,
img_path
in
enumerate
(
infer_imgs
):
for
idx
,
img_path
in
enumerate
(
infer_imgs
):
print
(
"process: [{}/{}], {}"
.
format
(
idx
,
len
(
infer_imgs
),
img_path
))
print
(
"process: [{}/{}], {}"
.
format
(
idx
,
len
(
infer_imgs
),
img_path
))
...
...
ppstructure/vqa/infer_ser_re_e2e.py
浏览文件 @
1b3cf0da
...
@@ -112,7 +112,10 @@ if __name__ == "__main__":
...
@@ -112,7 +112,10 @@ if __name__ == "__main__":
# loop for infer
# loop for infer
ser_re_engine
=
SerReSystem
(
args
)
ser_re_engine
=
SerReSystem
(
args
)
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"infer_results.txt"
),
"w"
)
as
fout
:
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"infer_results.txt"
),
"w"
,
encoding
=
'utf-8'
)
as
fout
:
for
idx
,
img_path
in
enumerate
(
infer_imgs
):
for
idx
,
img_path
in
enumerate
(
infer_imgs
):
print
(
"process: [{}/{}], {}"
.
format
(
idx
,
len
(
infer_imgs
),
img_path
))
print
(
"process: [{}/{}], {}"
.
format
(
idx
,
len
(
infer_imgs
),
img_path
))
...
...
ppstructure/vqa/utils.py
浏览文件 @
1b3cf0da
...
@@ -32,7 +32,7 @@ def set_seed(seed):
...
@@ -32,7 +32,7 @@ def set_seed(seed):
def
get_bio_label_maps
(
label_map_path
):
def
get_bio_label_maps
(
label_map_path
):
with
open
(
label_map_path
,
"r"
)
as
fin
:
with
open
(
label_map_path
,
"r"
,
encoding
=
'utf-8'
)
as
fin
:
lines
=
fin
.
readlines
()
lines
=
fin
.
readlines
()
lines
=
[
line
.
strip
()
for
line
in
lines
]
lines
=
[
line
.
strip
()
for
line
in
lines
]
if
"O"
not
in
lines
:
if
"O"
not
in
lines
:
...
...
ppstructure/vqa/xfun.py
浏览文件 @
1b3cf0da
...
@@ -162,7 +162,7 @@ class XFUNDataset(Dataset):
...
@@ -162,7 +162,7 @@ class XFUNDataset(Dataset):
return
encoded_inputs
return
encoded_inputs
def
read_all_lines
(
self
,
):
def
read_all_lines
(
self
,
):
with
open
(
self
.
label_path
,
"r"
)
as
fin
:
with
open
(
self
.
label_path
,
"r"
,
encoding
=
'utf-8'
)
as
fin
:
lines
=
fin
.
readlines
()
lines
=
fin
.
readlines
()
return
lines
return
lines
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录