Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
45b8b569
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
45b8b569
编写于
4月 12, 2022
作者:
G
gaotingquan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix: fix bug about calc loss in dist
上级
255d7c3e
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
50 addition
and
46 deletion
+50
-46
ppcls/engine/evaluation/classification.py
ppcls/engine/evaluation/classification.py
+50
-46
未找到文件。
ppcls/engine/evaluation/classification.py
浏览文件 @
45b8b569
...
...
@@ -66,31 +66,14 @@ def classification_eval(engine, epoch_id=0):
},
level
=
amp_level
):
out
=
engine
.
model
(
batch
[
0
])
# calc loss
if
engine
.
eval_loss_func
is
not
None
:
loss_dict
=
engine
.
eval_loss_func
(
out
,
batch
[
1
])
for
key
in
loss_dict
:
if
key
not
in
output_info
:
output_info
[
key
]
=
AverageMeter
(
key
,
'7.5f'
)
output_info
[
key
].
update
(
loss_dict
[
key
].
numpy
()[
0
],
batch_size
)
else
:
out
=
engine
.
model
(
batch
[
0
])
# calc loss
if
engine
.
eval_loss_func
is
not
None
:
loss_dict
=
engine
.
eval_loss_func
(
out
,
batch
[
1
])
for
key
in
loss_dict
:
if
key
not
in
output_info
:
output_info
[
key
]
=
AverageMeter
(
key
,
'7.5f'
)
output_info
[
key
].
update
(
loss_dict
[
key
].
numpy
()[
0
],
batch_size
)
# just for DistributedBatchSampler issue: repeat sampling
current_samples
=
batch_size
*
paddle
.
distributed
.
get_world_size
()
accum_samples
+=
current_samples
# calc metric
if
engine
.
eval_metric_func
is
not
None
:
# gather Tensor when distributed
if
paddle
.
distributed
.
get_world_size
()
>
1
:
label_list
=
[]
paddle
.
distributed
.
all_gather
(
label_list
,
batch
[
1
])
...
...
@@ -99,33 +82,54 @@ def classification_eval(engine, epoch_id=0):
if
isinstance
(
out
,
dict
):
if
"Student"
in
out
:
out
=
out
[
"Student"
]
if
isinstance
(
out
,
dict
):
out
=
out
[
"logits"
]
elif
"logits"
in
out
:
out
=
out
[
"logits"
]
else
:
msg
=
"Error: Wrong key in out!"
raise
Exception
(
msg
)
if
isinstance
(
out
,
list
):
pred
=
[]
preds
=
[]
for
x
in
out
:
pred_list
=
[]
paddle
.
distributed
.
all_gather
(
pred_list
,
x
)
pred_x
=
paddle
.
concat
(
pred_list
,
0
)
pred
.
append
(
pred_x
)
preds
.
append
(
pred_x
)
else
:
pred_list
=
[]
paddle
.
distributed
.
all_gather
(
pred_list
,
out
)
pred
=
paddle
.
concat
(
pred_list
,
0
)
preds
=
paddle
.
concat
(
pred_list
,
0
)
if
accum_samples
>
total_samples
and
not
engine
.
use_dali
:
pred
=
pred
[:
total_samples
+
current_samples
-
accum_samples
]
preds
=
preds
[:
total_samples
+
current_samples
-
accum_samples
]
labels
=
labels
[:
total_samples
+
current_samples
-
accum_samples
]
current_samples
=
total_samples
+
current_samples
-
accum_samples
metric_dict
=
engine
.
eval_metric_func
(
pred
,
labels
)
else
:
metric_dict
=
engine
.
eval_metric_func
(
out
,
batch
[
1
])
labels
=
batch
[
1
]
preds
=
out
# calc loss
if
engine
.
eval_loss_func
is
not
None
:
if
engine
.
amp
and
engine
.
config
[
"AMP"
].
get
(
"use_fp16_test"
,
False
):
amp_level
=
engine
.
config
[
'AMP'
].
get
(
"level"
,
"O1"
).
upper
()
with
paddle
.
amp
.
auto_cast
(
custom_black_list
=
{
"flatten_contiguous_range"
,
"greater_than"
},
level
=
amp_level
):
loss_dict
=
engine
.
eval_loss_func
(
preds
,
labels
)
else
:
loss_dict
=
engine
.
eval_loss_func
(
preds
,
labels
)
for
key
in
loss_dict
:
if
key
not
in
output_info
:
output_info
[
key
]
=
AverageMeter
(
key
,
'7.5f'
)
output_info
[
key
].
update
(
loss_dict
[
key
].
numpy
()[
0
],
batch_size
)
# calc metric
if
engine
.
eval_metric_func
is
not
None
:
metric_dict
=
engine
.
eval_metric_func
(
preds
,
labels
)
for
key
in
metric_dict
:
if
metric_key
is
None
:
metric_key
=
key
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录