Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
a38e42f6
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a38e42f6
编写于
2月 22, 2023
作者:
G
gaotingquan
提交者:
Wei Shengyu
3月 10, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor: iter_per_epoch -> max_iter
上级
284e2a67
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
21 addition
and
22 deletion
+21
-22
ppcls/data/__init__.py
ppcls/data/__init__.py
+10
-11
ppcls/engine/engine.py
ppcls/engine/engine.py
+1
-0
ppcls/engine/evaluation/classification.py
ppcls/engine/evaluation/classification.py
+2
-3
ppcls/engine/train/regular_train_epoch.py
ppcls/engine/train/regular_train_epoch.py
+2
-2
ppcls/engine/train/utils.py
ppcls/engine/train/utils.py
+2
-2
ppcls/optimizer/__init__.py
ppcls/optimizer/__init__.py
+4
-4
未找到文件。
ppcls/data/__init__.py
浏览文件 @
a38e42f6
...
...
@@ -204,6 +204,8 @@ class DataIterator(object):
self
.
dataloader
=
dataloader
self
.
use_dali
=
use_dali
self
.
iterator
=
iter
(
dataloader
)
self
.
max_iter
=
dataloader
.
max_iter
self
.
total_samples
=
dataloader
.
total_samples
def
get_batch
(
self
):
# fetch data batch from dataloader
...
...
@@ -234,7 +236,7 @@ def build_dataloader(engine):
"epochs"
:
engine
.
config
[
"Global"
][
"epochs"
]
})
use_dali
=
engine
.
config
[
'Global'
].
get
(
"use_dali"
,
False
)
use_dali
=
engine
.
use_dali
dataloader_dict
=
{
"Train"
:
None
,
"UnLabelTrain"
:
None
,
...
...
@@ -246,18 +248,15 @@ def build_dataloader(engine):
if
engine
.
mode
==
'train'
:
train_dataloader
=
build
(
engine
.
config
[
"DataLoader"
],
"Train"
,
use_dali
,
seed
=
None
)
iter_per_epoch
=
len
(
train_dataloader
)
-
1
if
platform
.
system
(
)
==
"Windows"
else
len
(
train_dataloader
)
if
engine
.
config
[
"Global"
].
get
(
"iter_per_epoch"
,
None
):
# TODO(gaotingquan): iter_per_epoch should be set in Dataloader.Train, not Global
if
engine
.
config
[
"DataLoader"
][
"Train"
].
get
(
"max_iter"
,
None
):
# set max iteration per epoch mannualy, when training by iteration(s), such as XBM, FixMatch.
iter_per_epoch
=
engine
.
config
[
"Global"
].
get
(
"iter_per_epoch"
)
iter_per_epoch
=
iter_per_epoch
//
engine
.
update_freq
*
engine
.
update_freq
# engine.iter_per_epoch = iter_per_epoch
train_dataloader
.
iter_per_epoch
=
iter_per_epoch
max_iter
=
engine
.
config
[
"Train"
].
get
(
"max_iter"
)
max_iter
=
train_dataloader
.
max_iter
//
engine
.
update_freq
*
engine
.
update_freq
train_dataloader
.
max_iter
=
max_iter
if
engine
.
config
[
"DataLoader"
][
"Train"
].
get
(
"convert_iterator"
,
True
):
train_dataloader
=
DataIterator
(
train_dataloader
,
use_dali
)
dataloader_dict
[
"Train"
]
=
train_dataloader
# TODO(gaotingquan): set the iterator field in config, such as Dataloader.Train.convert_iterator=True
dataloader_dict
[
"TrainIter"
]
=
DataIterator
(
train_dataloader
,
use_dali
)
if
engine
.
config
[
"DataLoader"
].
get
(
'UnLabelTrain'
,
None
)
is
not
None
:
dataloader_dict
[
"UnLabelTrain"
]
=
build
(
...
...
ppcls/engine/engine.py
浏览文件 @
a38e42f6
...
...
@@ -72,6 +72,7 @@ class Engine(object):
self
.
update_freq
=
self
.
config
[
"Global"
].
get
(
"update_freq"
,
1
)
# build dataloader
self
.
use_dali
=
self
.
config
[
"Global"
].
get
(
"use_dali"
,
False
)
self
.
dataloader_dict
=
build_dataloader
(
self
)
# build loss
...
...
ppcls/engine/evaluation/classification.py
浏览文件 @
a38e42f6
...
...
@@ -92,8 +92,7 @@ def classification_eval(engine, epoch_id=0):
paddle
.
distributed
.
all_gather
(
pred_list
,
out
)
preds
=
paddle
.
concat
(
pred_list
,
0
)
if
accum_samples
>
total_samples
and
not
engine
.
config
[
"Global"
].
get
(
"use_dali"
,
False
):
if
accum_samples
>
total_samples
and
not
engine
.
use_dali
:
if
isinstance
(
preds
,
list
):
preds
=
[
pred
[:
total_samples
+
current_samples
-
accum_samples
]
...
...
@@ -152,7 +151,7 @@ def classification_eval(engine, epoch_id=0):
epoch_id
,
iter_id
,
max_iter
,
metric_msg
,
time_msg
,
ips_msg
))
tic
=
time
.
time
()
if
engine
.
config
[
"Global"
].
get
(
"use_dali"
,
False
)
:
if
engine
.
use_dali
:
engine
.
dataloader_dict
[
"Eval"
].
reset
()
if
"ATTRMetric"
in
engine
.
config
[
"Metric"
][
"Eval"
][
0
]:
...
...
ppcls/engine/train/regular_train_epoch.py
浏览文件 @
a38e42f6
...
...
@@ -22,8 +22,8 @@ from ppcls.utils import profiler
def
regular_train_epoch
(
engine
,
epoch_id
,
print_batch_step
):
tic
=
time
.
time
()
for
iter_id
in
range
(
engine
.
dataloader_dict
[
"Train"
].
iter_per_epoch
):
batch
=
engine
.
dataloader_dict
[
"Train
Iter
"
].
get_batch
()
for
iter_id
in
range
(
engine
.
dataloader_dict
[
"Train"
].
max_iter
):
batch
=
engine
.
dataloader_dict
[
"Train"
].
get_batch
()
profiler
.
add_profiler_step
(
engine
.
config
[
"profiler_options"
])
if
iter_id
==
5
:
...
...
ppcls/engine/train/utils.py
浏览文件 @
a38e42f6
...
...
@@ -55,13 +55,13 @@ def log_info(trainer, batch_size, epoch_id, iter_id):
batch_size
/
trainer
.
time_info
[
"batch_cost"
].
avg
)
eta_sec
=
((
trainer
.
config
[
"Global"
][
"epochs"
]
-
epoch_id
+
1
)
*
trainer
.
dataloader_dict
[
"Train"
].
iter_per_epoch
-
iter_id
)
*
trainer
.
dataloader_dict
[
"Train"
].
max_iter
-
iter_id
)
*
trainer
.
time_info
[
"batch_cost"
].
avg
eta_msg
=
"eta: {:s}"
.
format
(
str
(
datetime
.
timedelta
(
seconds
=
int
(
eta_sec
))))
logger
.
info
(
"[Train][Epoch {}/{}][Iter: {}/{}]{}, {}, {}, {}, {}"
.
format
(
epoch_id
,
trainer
.
config
[
"Global"
][
"epochs"
],
iter_id
,
trainer
.
dataloader_dict
[
"Train"
]
.
iter_per_epoch
,
lr_msg
,
metric_msg
,
time_msg
,
ips_msg
,
eta_msg
))
.
max_iter
,
lr_msg
,
metric_msg
,
time_msg
,
ips_msg
,
eta_msg
))
for
i
,
lr
in
enumerate
(
trainer
.
lr_sch
):
logger
.
scaler
(
...
...
ppcls/optimizer/__init__.py
浏览文件 @
a38e42f6
...
...
@@ -48,12 +48,12 @@ def build_lr_scheduler(lr_config, epochs, step_each_epoch):
def
build_optimizer
(
engine
):
if
engine
.
mode
!=
"train"
:
return
None
,
None
config
,
iter_per_epoch
,
model_list
=
engine
.
config
,
engine
.
dataloader_dict
[
"Train"
].
iter_per_epoch
,
[
engine
.
mode
,
engine
.
train_loss_func
]
config
,
max_iter
,
model_list
=
engine
.
config
,
engine
.
dataloader_dict
[
"Train"
].
max_iter
,
[
engine
.
model
,
engine
.
train_loss_func
]
optim_config
=
copy
.
deepcopy
(
config
[
"Optimizer"
])
epochs
=
config
[
"Global"
][
"epochs"
]
update_freq
=
config
[
"Global"
].
get
(
"update_freq"
,
1
)
step_each_epoch
=
iter_per_epoch
//
update_freq
update_freq
=
engine
.
update_freq
step_each_epoch
=
max_iter
//
update_freq
if
isinstance
(
optim_config
,
dict
):
# convert {'name': xxx, **optim_cfg} to [{name: {scope: xxx, **optim_cfg}}]
optim_name
=
optim_config
.
pop
(
"name"
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录