Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
4e8f18ab
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
4e8f18ab
编写于
9月 28, 2020
作者:
D
Dong Daxiang
提交者:
GitHub
9月 28, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Get final strategy (#27602)
* add get final strategy for user to print final strategy
上级
d01f6269
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
50 addition
and
19 deletion
+50
-19
python/paddle/distributed/fleet/__init__.py
python/paddle/distributed/fleet/__init__.py
+1
-0
python/paddle/distributed/fleet/base/distributed_strategy.py
python/paddle/distributed/fleet/base/distributed_strategy.py
+1
-2
python/paddle/distributed/fleet/base/fleet_base.py
python/paddle/distributed/fleet/base/fleet_base.py
+30
-11
python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py
.../tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py
+2
-2
python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py
.../unittests/test_dist_fleet_a_sync_optimizer_auto_async.py
+2
-2
python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py
...ts/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py
+2
-2
python/paddle/fluid/tests/unittests/test_fleet_amp_meta_optimizer.py
...le/fluid/tests/unittests/test_fleet_amp_meta_optimizer.py
+4
-0
python/paddle/fluid/tests/unittests/test_fleet_auto.py
python/paddle/fluid/tests/unittests/test_fleet_auto.py
+2
-0
python/paddle/fluid/tests/unittests/test_fleet_base.py
python/paddle/fluid/tests/unittests/test_fleet_base.py
+2
-0
python/paddle/fluid/tests/unittests/test_fleet_lamb_meta_optimizer.py
...e/fluid/tests/unittests/test_fleet_lamb_meta_optimizer.py
+2
-0
python/paddle/fluid/tests/unittests/test_fleet_lars_meta_optimizer.py
...e/fluid/tests/unittests/test_fleet_lars_meta_optimizer.py
+2
-0
未找到文件。
python/paddle/distributed/fleet/__init__.py
浏览文件 @
4e8f18ab
...
...
@@ -30,6 +30,7 @@ __all__ = [
]
fleet
=
Fleet
()
_final_strategy
=
fleet
.
_final_strategy
init
=
fleet
.
init
is_first_worker
=
fleet
.
is_first_worker
worker_index
=
fleet
.
worker_index
...
...
python/paddle/distributed/fleet/base/distributed_strategy.py
浏览文件 @
4e8f18ab
...
...
@@ -1244,8 +1244,7 @@ class DistributedStrategy(object):
if
getattr
(
self
.
strategy
,
f
.
name
):
draws
+=
border
+
"
\n
"
draws
+=
h1_format
.
format
(
"{} = True, please check {}_configs"
.
format
(
f
.
name
,
f
.
name
))
"{}=True <-> {}_configs"
.
format
(
f
.
name
,
f
.
name
))
draws
+=
line
+
"
\n
"
my_configs
=
getattr
(
self
.
strategy
,
f
.
name
+
"_configs"
)
...
...
python/paddle/distributed/fleet/base/fleet_base.py
浏览文件 @
4e8f18ab
...
...
@@ -119,6 +119,8 @@ class Fleet(object):
self
.
strategy_compiler
=
None
self
.
_is_collective
=
False
self
.
_runtime_handle
=
None
self
.
_util
=
None
self
.
_context
=
{}
def
init
(
self
,
role_maker
=
None
,
is_collective
=
False
):
"""
...
...
@@ -233,7 +235,7 @@ class Fleet(object):
Returns:
int: worker numbers
Examples:
.. code-block:: python
...
...
@@ -569,8 +571,9 @@ class Fleet(object):
if
strategy
==
None
:
strategy
=
DistributedStrategy
()
self
.
user_defined_strategy
=
strategy
self
.
valid_strategy
=
None
self
.
_user_defined_strategy
=
copy
.
deepcopy
(
strategy
)
self
.
_context
=
{}
return
self
@
dygraph_only
...
...
@@ -909,6 +912,15 @@ class Fleet(object):
# imitate target optimizer retrieval
return
self
.
user_defined_optimizer
.
clear_grad
()
def
_final_strategy
(
self
):
if
"valid_strategy"
not
in
self
.
_context
:
print
(
"WARNING: You may need to call minimize function before this function is called"
)
return
{}
else
:
return
self
.
_context
[
"valid_strategy"
]
def
minimize
(
self
,
loss
,
startup_program
=
None
,
...
...
@@ -958,12 +970,15 @@ class Fleet(object):
# for more examples, please reference https://github.com/PaddlePaddle/FleetX
"""
context
=
{}
context
[
"user_defined_strategy"
]
=
copy
.
deepcopy
(
self
.
_user_defined_strategy
)
if
paddle
.
fluid
.
framework
.
in_dygraph_mode
():
# imitate target optimizer retrieval
target_opt
=
self
.
user_defined_optimizer
self
.
_context
=
context
return
target_opt
.
minimize
(
loss
)
context
=
{}
# cache original feed forward program
self
.
origin_main_program
=
loss
.
block
.
program
context
[
"origin_main_program"
]
=
self
.
origin_main_program
...
...
@@ -984,17 +999,19 @@ class Fleet(object):
MetaOptimizerFactory
().
_get_valid_meta_optimizers
(
self
.
user_defined_optimizer
)
context
[
"user_defined_strategy"
]
=
copy
.
copy
(
self
.
user_defined_strategy
)
context
[
"user_defined_strategy"
]
=
copy
.
deepcopy
(
self
.
_user_defined_strategy
)
copy_user_defined_strategy
=
copy
.
deepcopy
(
self
.
_user_defined_strategy
)
# trigger the auto-parallel in very strict condition
# strategy = DistributedStrategy()
# strategy.auto = True
# optimizer = paddle.optimizer.SGD(learning_rate=0.1)
# optimizer = fleet.distributed_optimizer(optimizer, strategy)
if
self
.
user_defined_strategy
.
_is_strict_auto
():
if
copy_
user_defined_strategy
.
_is_strict_auto
():
# turn on all the strategy for each optimizer
for
opt
in
distributed_optimizer_list
:
opt
.
_enable_strategy
(
self
.
user_defined_strategy
,
context
)
opt
.
_enable_strategy
(
copy_
user_defined_strategy
,
context
)
valid_optimizer_list
=
[]
valid_graph_optimizer_list
=
[]
...
...
@@ -1003,7 +1020,7 @@ class Fleet(object):
for
opt
in
distributed_optimizer_list
:
opt
.
_set_basic_info
(
loss
,
self
.
_role_maker
,
self
.
user_defined_optimizer
,
self
.
user_defined_strategy
)
copy_
user_defined_strategy
)
if
opt
.
_can_apply
()
and
not
opt
.
_is_graph_out
():
valid_optimizer_list
.
append
(
opt
)
elif
opt
.
_can_apply
()
and
opt
.
_is_graph_out
():
...
...
@@ -1014,13 +1031,15 @@ class Fleet(object):
meta_optimizer
,
graph_optimizer
=
\
self
.
strategy_compiler
.
generate_optimizer
(
loss
,
self
.
_role_maker
,
self
.
user_defined_optimizer
,
self
.
user_defined_strategy
,
valid_optimizer_list
,
copy_
user_defined_strategy
,
valid_optimizer_list
,
valid_graph_optimizer_list
)
valid_strategy
=
self
.
strategy_compiler
.
_get_valid_strategy
(
self
.
user_defined_strategy
,
can_not_apply_optimizer_list
)
copy_user_defined_strategy
,
can_not_apply_optimizer_list
)
context
[
"valid_strategy"
]
=
copy
.
deepcopy
(
valid_strategy
)
context
[
"valid_strategy"
]
=
valid_strategy
self
.
_context
=
context
self
.
valid_strategy
=
valid_strategy
self
.
valid_strategy
.
_enable_env
()
...
...
python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py
浏览文件 @
4e8f18ab
...
...
@@ -60,8 +60,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
optimizer
=
fleet
.
distributed_optimizer
(
optimizer
,
strategy
=
strategy
)
optimizer
.
minimize
(
avg_cost
)
self
.
assertTrue
(
optimizer
.
user_defined_strategy
.
a_sync
)
a_sync_configs
=
optimizer
.
user_defined_strategy
.
a_sync_configs
self
.
assertTrue
(
fleet
.
_final_strategy
()
.
a_sync
)
a_sync_configs
=
fleet
.
_final_strategy
()
.
a_sync_configs
self
.
assertTrue
(
a_sync_configs
[
'k_steps'
]
==
0
)
...
...
python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py
浏览文件 @
4e8f18ab
...
...
@@ -72,8 +72,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
optimizer
=
fleet
.
distributed_optimizer
(
optimizer
,
strategy
=
strategy
)
optimizer
.
minimize
(
avg_cost
)
self
.
assertTrue
(
optimizer
.
user_defined_strategy
.
a_sync
)
a_sync_configs
=
optimizer
.
user_defined_strategy
.
a_sync_configs
self
.
assertTrue
(
fleet
.
_final_strategy
()
.
a_sync
)
a_sync_configs
=
fleet
.
_final_strategy
()
.
a_sync_configs
self
.
assertTrue
(
a_sync_configs
[
'k_steps'
]
==
0
)
...
...
python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py
浏览文件 @
4e8f18ab
...
...
@@ -60,8 +60,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
optimizer
=
fleet
.
distributed_optimizer
(
optimizer
,
strategy
=
strategy
)
optimizer
.
minimize
(
avg_cost
)
self
.
assertTrue
(
optimizer
.
user_defined_strategy
.
a_sync
)
a_sync_configs
=
optimizer
.
user_defined_strategy
.
a_sync_configs
self
.
assertTrue
(
fleet
.
_final_strategy
()
.
a_sync
)
a_sync_configs
=
fleet
.
_final_strategy
()
.
a_sync_configs
self
.
assertTrue
(
a_sync_configs
[
'k_steps'
]
==
800
)
...
...
python/paddle/fluid/tests/unittests/test_fleet_amp_meta_optimizer.py
浏览文件 @
4e8f18ab
...
...
@@ -18,6 +18,8 @@ import unittest
import
paddle
import
os
paddle
.
enable_static
()
class
TestFleetAMPOptimizer
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
@@ -55,6 +57,8 @@ class TestFleetAMPOptimizer(unittest.TestCase):
optimizer
=
fleet
.
distributed_optimizer
(
optimizer
,
strategy
=
strategy
)
optimizer
.
minimize
(
avg_cost
)
strategy
=
fleet
.
_final_strategy
()
ops
=
[
op
.
type
for
op
in
avg_cost
.
block
.
ops
]
self
.
assertIn
(
'cast'
,
ops
)
self
.
assertIn
(
'check_finite_and_unscale'
,
ops
)
...
...
python/paddle/fluid/tests/unittests/test_fleet_auto.py
浏览文件 @
4e8f18ab
...
...
@@ -18,6 +18,8 @@ import os
import
paddle.distributed.fleet
as
fleet
import
paddle.distributed.fleet.base.role_maker
as
role_maker
paddle
.
enable_static
()
class
TestDistributedStrategyAuto
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_fleet_base.py
浏览文件 @
4e8f18ab
...
...
@@ -167,6 +167,8 @@ class TestFleetDygraph(unittest.TestCase):
state_dict
=
adam
.
state_dict
()
adam
.
set_state_dict
(
state_dict
)
final_strategy
=
fleet
.
_final_strategy
()
class
TestFleetBaseSingleRunCollective
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_fleet_lamb_meta_optimizer.py
浏览文件 @
4e8f18ab
...
...
@@ -19,6 +19,8 @@ import os
import
paddle.distributed.fleet
as
fleet
import
paddle.distributed.fleet.base.role_maker
as
role_maker
paddle
.
enable_static
()
class
TestFleetLambMetaOptimizer
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_fleet_lars_meta_optimizer.py
浏览文件 @
4e8f18ab
...
...
@@ -19,6 +19,8 @@ import os
import
paddle.distributed.fleet
as
fleet
import
paddle.distributed.fleet.base.role_maker
as
role_maker
paddle
.
enable_static
()
class
TestFleetLarsMetaOptimizer
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录