Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
weixin_41840029
PaddleOCR
提交
c4fcd143
P
PaddleOCR
项目概览
weixin_41840029
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
c4fcd143
编写于
11月 26, 2020
作者:
L
littletomatodonkey
提交者:
GitHub
11月 26, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refine dynamic sampling (#1256)
上级
169b629b
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
17 addition
and
49 deletion
+17
-49
ppocr/data/simple_dataset.py
ppocr/data/simple_dataset.py
+17
-49
未找到文件。
ppocr/data/simple_dataset.py
浏览文件 @
c4fcd143
...
...
@@ -32,12 +32,10 @@ class SimpleDataSet(Dataset):
self
.
delimiter
=
dataset_config
.
get
(
'delimiter'
,
'
\t
'
)
label_file_list
=
dataset_config
.
pop
(
'label_file_list'
)
data_source_num
=
len
(
label_file_list
)
if
data_source_num
==
1
:
ratio_list
=
[
1.0
]
else
:
ratio_list
=
dataset_config
.
pop
(
'ratio_list'
)
ratio_list
=
dataset_config
.
get
(
"ratio_list"
,
[
1.0
])
if
isinstance
(
ratio_list
,
(
float
,
int
)):
ratio_list
=
[
float
(
ratio_list
)]
*
len
(
data_source_num
)
assert
sum
(
ratio_list
)
==
1
,
"The sum of the ratio_list should be 1."
assert
len
(
ratio_list
)
==
data_source_num
,
"The length of ratio_list should be the same as the file_list."
...
...
@@ -45,62 +43,32 @@ class SimpleDataSet(Dataset):
self
.
do_shuffle
=
loader_config
[
'shuffle'
]
logger
.
info
(
"Initialize indexs of datasets:%s"
%
label_file_list
)
self
.
data_lines_list
,
data_num_list
=
self
.
get_image_info_list
(
label_file_list
)
self
.
data_idx_order_list
=
self
.
dataset_traversal
(
data_num_list
,
ratio_list
,
batch_size
)
self
.
shuffle_data_random
()
self
.
data_lines
=
self
.
get_image_info_list
(
label_file_list
,
ratio_list
)
self
.
data_idx_order_list
=
list
(
range
(
len
(
self
.
data_lines
)))
if
mode
.
lower
()
==
"train"
:
self
.
shuffle_data_random
()
self
.
ops
=
create_operators
(
dataset_config
[
'transforms'
],
global_config
)
def
get_image_info_list
(
self
,
file_list
):
def
get_image_info_list
(
self
,
file_list
,
ratio_list
):
if
isinstance
(
file_list
,
str
):
file_list
=
[
file_list
]
data_lines_list
=
[]
data_num_list
=
[]
for
file
in
file_list
:
data_lines
=
[]
for
idx
,
file
in
enumerate
(
file_list
):
with
open
(
file
,
"rb"
)
as
f
:
lines
=
f
.
readlines
()
data_lines_list
.
append
(
lines
)
data_num_list
.
append
(
len
(
lines
))
return
data_lines_list
,
data_num_list
def
dataset_traversal
(
self
,
data_num_list
,
ratio_list
,
batch_size
):
select_num_list
=
[]
dataset_num
=
len
(
data_num_list
)
for
dno
in
range
(
dataset_num
):
select_num
=
round
(
batch_size
*
ratio_list
[
dno
])
select_num
=
max
(
select_num
,
1
)
select_num_list
.
append
(
select_num
)
data_idx_order_list
=
[]
cur_index_sets
=
[
0
]
*
dataset_num
while
True
:
finish_read_num
=
0
for
dataset_idx
in
range
(
dataset_num
):
cur_index
=
cur_index_sets
[
dataset_idx
]
if
cur_index
>=
data_num_list
[
dataset_idx
]:
finish_read_num
+=
1
else
:
select_num
=
select_num_list
[
dataset_idx
]
for
sno
in
range
(
select_num
):
cur_index
=
cur_index_sets
[
dataset_idx
]
if
cur_index
>=
data_num_list
[
dataset_idx
]:
break
data_idx_order_list
.
append
((
dataset_idx
,
cur_index
))
cur_index_sets
[
dataset_idx
]
+=
1
if
finish_read_num
==
dataset_num
:
break
return
data_idx_order_list
lines
=
random
.
sample
(
lines
,
round
(
len
(
lines
)
*
ratio_list
[
idx
]))
data_lines
.
extend
(
lines
)
return
data_lines
def
shuffle_data_random
(
self
):
if
self
.
do_shuffle
:
for
dno
in
range
(
len
(
self
.
data_lines_list
)):
random
.
shuffle
(
self
.
data_lines_list
[
dno
])
random
.
shuffle
(
self
.
data_lines
)
return
def
__getitem__
(
self
,
idx
):
dataset_idx
,
file_idx
=
self
.
data_idx_order_list
[
idx
]
data_line
=
self
.
data_lines
_list
[
dataset_idx
]
[
file_idx
]
file_idx
=
self
.
data_idx_order_list
[
idx
]
data_line
=
self
.
data_lines
[
file_idx
]
try
:
data_line
=
data_line
.
decode
(
'utf-8'
)
substr
=
data_line
.
strip
(
"
\n
"
).
split
(
self
.
delimiter
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录