Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
f45f9ee4
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
1 年多 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
f45f9ee4
编写于
12月 21, 2021
作者:
W
Wei Shengyu
提交者:
GitHub
12月 21, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1585 from zhangbo9674/dev/resnet50_optimize
Accelerate dynamic graph amp training
上级
08da1d44
558f03d6
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
20 addition
and
6 deletion
+20
-6
ppcls/engine/engine.py
ppcls/engine/engine.py
+2
-0
ppcls/engine/train/train.py
ppcls/engine/train/train.py
+7
-5
ppcls/optimizer/optimizer.py
ppcls/optimizer/optimizer.py
+11
-1
未找到文件。
ppcls/engine/engine.py
浏览文件 @
f45f9ee4
...
@@ -250,6 +250,8 @@ class Engine(object):
...
@@ -250,6 +250,8 @@ class Engine(object):
self
.
scaler
=
paddle
.
amp
.
GradScaler
(
self
.
scaler
=
paddle
.
amp
.
GradScaler
(
init_loss_scaling
=
self
.
scale_loss
,
init_loss_scaling
=
self
.
scale_loss
,
use_dynamic_loss_scaling
=
self
.
use_dynamic_loss_scaling
)
use_dynamic_loss_scaling
=
self
.
use_dynamic_loss_scaling
)
if
self
.
config
[
'AMP'
][
'use_pure_fp16'
]
is
True
:
self
.
model
=
paddle
.
amp
.
decorate
(
models
=
self
.
model
,
level
=
'O2'
)
self
.
max_iter
=
len
(
self
.
train_dataloader
)
-
1
if
platform
.
system
(
self
.
max_iter
=
len
(
self
.
train_dataloader
)
-
1
if
platform
.
system
(
)
==
"Windows"
else
len
(
self
.
train_dataloader
)
)
==
"Windows"
else
len
(
self
.
train_dataloader
)
...
...
ppcls/engine/train/train.py
浏览文件 @
f45f9ee4
...
@@ -21,6 +21,7 @@ from ppcls.utils import profiler
...
@@ -21,6 +21,7 @@ from ppcls.utils import profiler
def
train_epoch
(
engine
,
epoch_id
,
print_batch_step
):
def
train_epoch
(
engine
,
epoch_id
,
print_batch_step
):
tic
=
time
.
time
()
tic
=
time
.
time
()
v_current
=
[
int
(
i
)
for
i
in
paddle
.
__version__
.
split
(
"."
)]
for
iter_id
,
batch
in
enumerate
(
engine
.
train_dataloader
):
for
iter_id
,
batch
in
enumerate
(
engine
.
train_dataloader
):
if
iter_id
>=
engine
.
max_iter
:
if
iter_id
>=
engine
.
max_iter
:
break
break
...
@@ -41,14 +42,15 @@ def train_epoch(engine, epoch_id, print_batch_step):
...
@@ -41,14 +42,15 @@ def train_epoch(engine, epoch_id, print_batch_step):
# image input
# image input
if
engine
.
amp
:
if
engine
.
amp
:
with
paddle
.
amp
.
auto_cast
(
custom_black_list
=
{
amp_level
=
'O1'
"flatten_contiguous_range"
,
"greater_than"
if
engine
.
config
[
'AMP'
][
'use_pure_fp16'
]
is
True
:
}):
amp_level
=
'O2'
with
paddle
.
amp
.
auto_cast
(
custom_black_list
=
{
"flatten_contiguous_range"
,
"greater_than"
},
level
=
amp_level
):
out
=
forward
(
engine
,
batch
)
out
=
forward
(
engine
,
batch
)
loss_dict
=
engine
.
train_loss_func
(
out
,
batch
[
1
])
else
:
else
:
out
=
forward
(
engine
,
batch
)
out
=
forward
(
engine
,
batch
)
loss_dict
=
engine
.
train_loss_func
(
out
,
batch
[
1
])
loss_dict
=
engine
.
train_loss_func
(
out
,
batch
[
1
])
# step opt and lr
# step opt and lr
if
engine
.
amp
:
if
engine
.
amp
:
...
...
ppcls/optimizer/optimizer.py
浏览文件 @
f45f9ee4
...
@@ -17,6 +17,7 @@ from __future__ import division
...
@@ -17,6 +17,7 @@ from __future__ import division
from
__future__
import
print_function
from
__future__
import
print_function
from
paddle
import
optimizer
as
optim
from
paddle
import
optimizer
as
optim
import
paddle
from
ppcls.utils
import
logger
from
ppcls.utils
import
logger
...
@@ -36,7 +37,7 @@ class Momentum(object):
...
@@ -36,7 +37,7 @@ class Momentum(object):
momentum
,
momentum
,
weight_decay
=
None
,
weight_decay
=
None
,
grad_clip
=
None
,
grad_clip
=
None
,
multi_precision
=
Fals
e
):
multi_precision
=
Tru
e
):
super
().
__init__
()
super
().
__init__
()
self
.
learning_rate
=
learning_rate
self
.
learning_rate
=
learning_rate
self
.
momentum
=
momentum
self
.
momentum
=
momentum
...
@@ -55,6 +56,15 @@ class Momentum(object):
...
@@ -55,6 +56,15 @@ class Momentum(object):
grad_clip
=
self
.
grad_clip
,
grad_clip
=
self
.
grad_clip
,
multi_precision
=
self
.
multi_precision
,
multi_precision
=
self
.
multi_precision
,
parameters
=
parameters
)
parameters
=
parameters
)
if
hasattr
(
opt
,
'_use_multi_tensor'
):
opt
=
optim
.
Momentum
(
learning_rate
=
self
.
learning_rate
,
momentum
=
self
.
momentum
,
weight_decay
=
self
.
weight_decay
,
grad_clip
=
self
.
grad_clip
,
multi_precision
=
self
.
multi_precision
,
parameters
=
parameters
,
use_multi_tensor
=
True
)
return
opt
return
opt
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录