Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
419fee27
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
419fee27
编写于
9月 23, 2020
作者:
M
mapingshuo
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
revert mix_precision
上级
f35c8ce6
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
14 addition
and
18 deletion
+14
-18
python/paddle/fluid/contrib/mixed_precision/decorator.py
python/paddle/fluid/contrib/mixed_precision/decorator.py
+14
-18
未找到文件。
python/paddle/fluid/contrib/mixed_precision/decorator.py
浏览文件 @
419fee27
...
@@ -16,7 +16,6 @@ from ... import default_main_program
...
@@ -16,7 +16,6 @@ from ... import default_main_program
from
...
import
default_startup_program
from
...
import
default_startup_program
from
...
import
layers
from
...
import
layers
from
...
import
unique_name
from
...
import
unique_name
from
...
import
framework
from
.
import
fp16_utils
from
.
import
fp16_utils
from
.fp16_utils
import
rewrite_program
from
.fp16_utils
import
rewrite_program
from
.fp16_utils
import
update_role_var_grad
from
.fp16_utils
import
update_role_var_grad
...
@@ -133,8 +132,7 @@ class OptimizerWithMixedPrecision(object):
...
@@ -133,8 +132,7 @@ class OptimizerWithMixedPrecision(object):
gradient respectively, and the scaled loss.
gradient respectively, and the scaled loss.
"""
"""
rewrite_program
(
self
.
_train_program
,
self
.
_amp_lists
)
rewrite_program
(
self
.
_train_program
,
self
.
_amp_lists
)
with
framework
.
name_scope
(
'mixed_precision'
):
self
.
_scaled_loss
=
loss
*
self
.
_loss_scaling
self
.
_scaled_loss
=
loss
*
self
.
_loss_scaling
self
.
_params_grads
=
self
.
_optimizer
.
backward
(
self
.
_params_grads
=
self
.
_optimizer
.
backward
(
self
.
_scaled_loss
,
startup_program
,
parameter_list
,
no_grad_set
,
self
.
_scaled_loss
,
startup_program
,
parameter_list
,
no_grad_set
,
callbacks
)
callbacks
)
...
@@ -158,24 +156,22 @@ class OptimizerWithMixedPrecision(object):
...
@@ -158,24 +156,22 @@ class OptimizerWithMixedPrecision(object):
grads
=
[
g
for
_
,
g
in
params_grads
]
grads
=
[
g
for
_
,
g
in
params_grads
]
with
self
.
_train_program
.
_optimized_guard
(
grads
):
with
self
.
_train_program
.
_optimized_guard
(
grads
):
with
framework
.
name_scope
(
'mixed_precision'
):
grads
,
found_inf
=
check_finite_and_unscale
(
grads
,
found_inf
=
check_finite_and_unscale
(
grads
,
self
.
_loss_scaling
,
name
=
"find_infinite_scale"
)
grads
,
self
.
_loss_scaling
,
name
=
"find_infinite_scale"
)
if
self
.
_use_dynamic_loss_scaling
:
if
self
.
_use_dynamic_loss_scaling
:
with
self
.
_train_program
.
_optimized_guard
(
grads
):
with
self
.
_train_program
.
_optimized_guard
(
grads
):
with
framework
.
name_scope
(
'mixed_precision'
):
grads
=
update_loss_scaling
(
grads
=
update_loss_scaling
(
grads
,
grads
,
found_inf
,
found_inf
,
self
.
_loss_scaling
,
self
.
_loss_scaling
,
self
.
_num_good_steps
,
self
.
_num_good_steps
,
self
.
_num_bad_steps
,
self
.
_num_bad_steps
,
self
.
_incr_every_n_steps
,
self
.
_incr_every_n_steps
,
self
.
_decr_every_n_nan_or_inf
,
self
.
_decr_every_n_nan_or_inf
,
self
.
_incr_ratio
,
self
.
_incr_ratio
,
self
.
_decr_ratio
,
self
.
_decr_ratio
,
name
=
"update_loss_scaling"
)
name
=
"update_loss_scaling"
)
params_unscaled_grads
=
[]
params_unscaled_grads
=
[]
for
pg
,
new_g
in
zip
(
params_grads
,
grads
):
for
pg
,
new_g
in
zip
(
params_grads
,
grads
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录