Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
37bb3342
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
37bb3342
编写于
4月 21, 2021
作者:
Y
Yuang Liu
提交者:
GitHub
4月 21, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add get_loss_scaling to fleet (#32401)
上级
2b68d20b
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
24 addition
and
15 deletion
+24
-15
python/paddle/distributed/fleet/base/fleet_base.py
python/paddle/distributed/fleet/base/fleet_base.py
+21
-15
python/paddle/fluid/contrib/mixed_precision/decorator.py
python/paddle/fluid/contrib/mixed_precision/decorator.py
+1
-0
python/paddle/fluid/tests/unittests/test_fleet_amp_init.py
python/paddle/fluid/tests/unittests/test_fleet_amp_init.py
+2
-0
未找到文件。
python/paddle/distributed/fleet/base/fleet_base.py
浏览文件 @
37bb3342
...
@@ -1041,6 +1041,26 @@ class Fleet(object):
...
@@ -1041,6 +1041,26 @@ class Fleet(object):
# imitate target optimizer retrieval
# imitate target optimizer retrieval
return
self
.
user_defined_optimizer
.
clear_grad
()
return
self
.
user_defined_optimizer
.
clear_grad
()
def
_get_amp_optimizer
(
self
):
# imitate target optimizer retrieval
amp_optimizer
=
None
for
optimizer
in
self
.
strategy_compiler
.
_get_applied_meta_optimizer
():
if
hasattr
(
optimizer
,
'amp_init'
):
amp_optimizer
=
optimizer
break
if
amp_optimizer
is
None
:
if
hasattr
(
self
.
user_defined_optimizer
,
'amp_init'
):
amp_optimizer
=
self
.
user_defined_optimizer
assert
amp_optimizer
is
not
None
,
\
"amp_init can only be used when the amp(auto mixed precision) strategy is turned on."
return
amp_optimizer
def
get_loss_scaling
(
self
):
amp_optimizer
=
self
.
_get_amp_optimizer
()
return
amp_optimizer
.
get_loss_scaling
()
def
amp_init
(
self
,
def
amp_init
(
self
,
place
,
place
,
scope
=
None
,
scope
=
None
,
...
@@ -1101,21 +1121,7 @@ class Fleet(object):
...
@@ -1101,21 +1121,7 @@ class Fleet(object):
if paddle.is_compiled_with_cuda() and len(paddle.static.cuda_places()) > 0:
if paddle.is_compiled_with_cuda() and len(paddle.static.cuda_places()) > 0:
run_example_code()
run_example_code()
"""
"""
amp_optimizer
=
self
.
_get_amp_optimizer
()
# imitate target optimizer retrieval
amp_optimizer
=
None
for
optimizer
in
self
.
strategy_compiler
.
_get_applied_meta_optimizer
():
if
hasattr
(
optimizer
,
'amp_init'
):
amp_optimizer
=
optimizer
break
if
amp_optimizer
is
None
:
if
hasattr
(
self
.
user_defined_optimizer
,
'amp_init'
):
amp_optimizer
=
self
.
user_defined_optimizer
assert
amp_optimizer
is
not
None
,
\
"amp_init can only be used when the amp(auto mixed precision) strategy is turned on."
return
amp_optimizer
.
amp_init
(
place
,
scope
,
test_program
,
use_fp16_test
)
return
amp_optimizer
.
amp_init
(
place
,
scope
,
test_program
,
use_fp16_test
)
def
_final_strategy
(
self
):
def
_final_strategy
(
self
):
...
...
python/paddle/fluid/contrib/mixed_precision/decorator.py
浏览文件 @
37bb3342
...
@@ -98,6 +98,7 @@ class OptimizerWithMixedPrecision(object):
...
@@ -98,6 +98,7 @@ class OptimizerWithMixedPrecision(object):
def
get_loss_scaling
(
self
):
def
get_loss_scaling
(
self
):
"""Return the real-time loss scaling factor.
"""Return the real-time loss scaling factor.
"""
"""
assert
self
.
_loss_scaling
is
not
None
,
'Call minimize() before calling get_loss_scaling()'
return
self
.
_loss_scaling
return
self
.
_loss_scaling
def
get_scaled_loss
(
self
):
def
get_scaled_loss
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_fleet_amp_init.py
浏览文件 @
37bb3342
...
@@ -70,6 +70,8 @@ class TestFleetAMPInit(unittest.TestCase):
...
@@ -70,6 +70,8 @@ class TestFleetAMPInit(unittest.TestCase):
optimizer
=
fleet
.
distributed_optimizer
(
optimizer
)
optimizer
=
fleet
.
distributed_optimizer
(
optimizer
)
optimizer
.
minimize
(
cost
)
optimizer
.
minimize
(
cost
)
loss_scale
=
optimizer
.
get_loss_scaling
()
place
=
paddle
.
CUDAPlace
(
0
)
place
=
paddle
.
CUDAPlace
(
0
)
exe
=
paddle
.
static
.
Executor
(
place
)
exe
=
paddle
.
static
.
Executor
(
place
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录