Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
a41a5bcb
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a41a5bcb
编写于
3月 10, 2023
作者:
G
gaotingquan
提交者:
Wei Shengyu
3月 10, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
debug
上级
ab29eaa8
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
78 addition
and
75 deletion
+78
-75
ppcls/data/__init__.py
ppcls/data/__init__.py
+77
-75
ppcls/metric/__init__.py
ppcls/metric/__init__.py
+1
-0
未找到文件。
ppcls/data/__init__.py
浏览文件 @
a41a5bcb
...
...
@@ -106,89 +106,91 @@ def build_dataloader(config, *mode, seed=None):
# build dataset
if
use_dali
:
from
ppcls.data.dataloader.dali
import
dali_dataloader
return
dali_dataloader
(
data_loader
=
dali_dataloader
(
dataloader_config
,
mode
[
-
1
],
paddle
.
device
.
get_device
(),
num_threads
=
num_workers
,
seed
=
seed
,
enable_fuse
=
True
)
config_dataset
=
dataloader_config
[
'dataset'
]
config_dataset
=
copy
.
deepcopy
(
config_dataset
)
dataset_name
=
config_dataset
.
pop
(
'name'
)
if
'batch_transform_ops'
in
config_dataset
:
batch_transform
=
config_dataset
[
'batch_transform_ops'
]
else
:
batch_transform
=
None
dataset
=
eval
(
dataset_name
)(
**
config_dataset
)
logger
.
debug
(
"build dataset({}) success..."
.
format
(
dataset
))
# build sampler
config_sampler
=
dataloader_config
[
'sampler'
]
if
config_sampler
and
"name"
not
in
config_sampler
:
batch_sampler
=
None
batch_size
=
config_sampler
[
"batch_size"
]
drop_last
=
config_sampler
[
"drop_last"
]
shuffle
=
config_sampler
[
"shuffle"
]
else
:
sampler_name
=
config_sampler
.
pop
(
"name"
)
sampler_argspec
=
inspect
.
getargspec
(
eval
(
sampler_name
).
__init__
).
args
if
"total_epochs"
in
sampler_argspec
:
config_sampler
.
update
({
"total_epochs"
:
epochs
})
batch_sampler
=
eval
(
sampler_name
)(
dataset
,
**
config_sampler
)
logger
.
debug
(
"build batch_sampler({}) success..."
.
format
(
batch_sampler
))
# build batch operator
def
mix_collate_fn
(
batch
):
batch
=
transform
(
batch
,
batch_ops
)
# batch each field
slots
=
[]
for
items
in
batch
:
for
i
,
item
in
enumerate
(
items
):
if
len
(
slots
)
<
len
(
items
):
slots
.
append
([
item
])
else
:
slots
[
i
].
append
(
item
)
return
[
np
.
stack
(
slot
,
axis
=
0
)
for
slot
in
slots
]
if
isinstance
(
batch_transform
,
list
):
batch_ops
=
create_operators
(
batch_transform
,
class_num
)
batch_collate_fn
=
mix_collate_fn
else
:
batch_collate_fn
=
None
init_fn
=
partial
(
worker_init_fn
,
num_workers
=
num_workers
,
rank
=
dist
.
get_rank
(),
seed
=
seed
)
if
seed
is
not
None
else
None
if
batch_sampler
is
None
:
data_loader
=
DataLoader
(
dataset
=
dataset
,
places
=
paddle
.
device
.
get_device
(),
num_workers
=
num_workers
,
return_list
=
True
,
use_shared_memory
=
use_shared_memory
,
batch_size
=
batch_size
,
shuffle
=
shuffle
,
drop_last
=
drop_last
,
collate_fn
=
batch_collate_fn
,
worker_init_fn
=
init_fn
)
else
:
data_loader
=
DataLoader
(
dataset
=
dataset
,
places
=
paddle
.
device
.
get_device
(),
config_dataset
=
dataloader_config
[
'dataset'
]
config_dataset
=
copy
.
deepcopy
(
config_dataset
)
dataset_name
=
config_dataset
.
pop
(
'name'
)
if
'batch_transform_ops'
in
config_dataset
:
batch_transform
=
config_dataset
[
'batch_transform_ops'
]
else
:
batch_transform
=
None
dataset
=
eval
(
dataset_name
)(
**
config_dataset
)
logger
.
debug
(
"build dataset({}) success..."
.
format
(
dataset
))
# build sampler
config_sampler
=
dataloader_config
[
'sampler'
]
if
config_sampler
and
"name"
not
in
config_sampler
:
batch_sampler
=
None
batch_size
=
config_sampler
[
"batch_size"
]
drop_last
=
config_sampler
[
"drop_last"
]
shuffle
=
config_sampler
[
"shuffle"
]
else
:
sampler_name
=
config_sampler
.
pop
(
"name"
)
sampler_argspec
=
inspect
.
getargspec
(
eval
(
sampler_name
)
.
__init__
).
args
if
"total_epochs"
in
sampler_argspec
:
config_sampler
.
update
({
"total_epochs"
:
epochs
})
batch_sampler
=
eval
(
sampler_name
)(
dataset
,
**
config_sampler
)
logger
.
debug
(
"build batch_sampler({}) success..."
.
format
(
batch_sampler
))
# build batch operator
def
mix_collate_fn
(
batch
):
batch
=
transform
(
batch
,
batch_ops
)
# batch each field
slots
=
[]
for
items
in
batch
:
for
i
,
item
in
enumerate
(
items
):
if
len
(
slots
)
<
len
(
items
):
slots
.
append
([
item
])
else
:
slots
[
i
].
append
(
item
)
return
[
np
.
stack
(
slot
,
axis
=
0
)
for
slot
in
slots
]
if
isinstance
(
batch_transform
,
list
):
batch_ops
=
create_operators
(
batch_transform
,
class_num
)
batch_collate_fn
=
mix_collate_fn
else
:
batch_collate_fn
=
None
init_fn
=
partial
(
worker_init_fn
,
num_workers
=
num_workers
,
return_list
=
True
,
use_shared_memory
=
use_shared_memory
,
batch_sampler
=
batch_sampler
,
collate_fn
=
batch_collate_fn
,
worker_init_fn
=
init_fn
)
rank
=
dist
.
get_rank
(),
seed
=
seed
)
if
seed
is
not
None
else
None
if
batch_sampler
is
None
:
data_loader
=
DataLoader
(
dataset
=
dataset
,
places
=
paddle
.
device
.
get_device
(),
num_workers
=
num_workers
,
return_list
=
True
,
use_shared_memory
=
use_shared_memory
,
batch_size
=
batch_size
,
shuffle
=
shuffle
,
drop_last
=
drop_last
,
collate_fn
=
batch_collate_fn
,
worker_init_fn
=
init_fn
)
else
:
data_loader
=
DataLoader
(
dataset
=
dataset
,
places
=
paddle
.
device
.
get_device
(),
num_workers
=
num_workers
,
return_list
=
True
,
use_shared_memory
=
use_shared_memory
,
batch_sampler
=
batch_sampler
,
collate_fn
=
batch_collate_fn
,
worker_init_fn
=
init_fn
)
total_samples
=
len
(
data_loader
.
dataset
)
if
not
use_dali
else
data_loader
.
size
...
...
ppcls/metric/__init__.py
浏览文件 @
a41a5bcb
...
...
@@ -15,6 +15,7 @@
import
copy
from
collections
import
OrderedDict
from
..utils
import
logger
from
.avg_metrics
import
AvgMetrics
from
.metrics
import
TopkAcc
,
mAP
,
mINP
,
Recallk
,
Precisionk
from
.metrics
import
DistillationTopkAcc
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录