Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
7dbab103
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7dbab103
编写于
2月 07, 2021
作者:
S
sandyhouse
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update, test=develop
上级
d7c5e849
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
65 addition
and
4 deletion
+65
-4
paddle/fluid/framework/section_worker.cc
paddle/fluid/framework/section_worker.cc
+0
-1
python/paddle/distributed/fleet/base/distributed_strategy.py
python/paddle/distributed/fleet/base/distributed_strategy.py
+54
-0
python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py
...paddle/distributed/fleet/meta_optimizers/amp_optimizer.py
+10
-1
python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py
...e/distributed/fleet/meta_optimizers/sharding_optimizer.py
+1
-2
未找到文件。
paddle/fluid/framework/section_worker.cc
浏览文件 @
7dbab103
...
...
@@ -13,7 +13,6 @@ limitations under the License. */
#include <float.h>
#include "paddle/fluid/framework/device_worker.h"
#include "paddle/fluid/framework/executor_gc_helper.h"
#include "paddle/fluid/platform/device_context.h"
namespace
paddle
{
...
...
python/paddle/distributed/fleet/base/distributed_strategy.py
浏览文件 @
7dbab103
...
...
@@ -736,6 +736,60 @@ class DistributedStrategy(object):
"sharding_configs"
)
assign_configs_value
(
self
.
strategy
.
sharding_configs
,
configs
)
@
property
def
model_parallel
(
self
):
"""
Indicating whether we are using model parallel parallelism for distributed training.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.model_parallel = True
"""
return
self
.
strategy
.
model_parallel
@
model_parallel
.
setter
@
is_strict_auto
def
model_parallel
(
self
,
flag
):
if
isinstance
(
flag
,
bool
):
self
.
strategy
.
model_parallel
=
flag
else
:
print
(
"WARNING: model_parallel should have value of bool type"
)
@
property
def
model_parallel_configs
(
self
):
"""
Set model_parallel parallelism configurations.
**Notes**:
**Detailed arguments for model_parallel_configs**
**parallelism**: degree of model parallel
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.model_parallel = True
strategy.model_parallel_configs = {"parallelism": 12}
"""
return
get_msg_dict
(
self
.
strategy
.
model_parallel_configs
)
@
model_parallel_configs
.
setter
@
is_strict_auto
def
model_parallel_configs
(
self
,
configs
):
check_configs_key
(
self
.
strategy
.
model_parallel_configs
,
configs
,
"model_parallel_configs"
)
assign_configs_value
(
self
.
strategy
.
model_parallel_configs
,
configs
)
@
property
def
pipeline
(
self
):
"""
...
...
python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py
浏览文件 @
7dbab103
...
...
@@ -50,7 +50,8 @@ class AMPOptimizer(MetaOptimizerBase):
self
.
inner_opt
,
amp_lists
,
config
[
'init_loss_scaling'
],
config
[
'incr_every_n_steps'
],
config
[
'decr_every_n_nan_or_inf'
],
config
[
'incr_ratio'
],
config
[
'decr_ratio'
],
config
[
'use_dynamic_loss_scaling'
])
config
[
'use_dynamic_loss_scaling'
],
config
[
'use_pure_fp16'
],
config
[
'use_fp16_guard'
])
# if worker_num > 1, all cards will communication with each other,
# add is_distributed to optimize amp, overlap communication and
...
...
@@ -113,3 +114,11 @@ class AMPOptimizer(MetaOptimizerBase):
self
.
wrapped_opt
.
minimize
(
loss
,
startup_program
,
parameter_list
,
no_grad_set
)
return
optimize_ops
,
params_grads
def
amp_init
(
self
,
place
,
scope
=
None
,
test_program
=
None
,
use_fp16_test
=
False
):
return
self
.
wrapped_opt
.
amp_init
(
place
,
scope
,
test_program
,
use_fp16_test
)
python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py
浏览文件 @
7dbab103
...
...
@@ -87,8 +87,7 @@ class ShardingOptimizer(MetaOptimizerBase):
self
.
_as_outer_parallelism
=
self
.
user_defined_strategy
.
sharding_configs
[
"as_outer_parallelism"
]
self
.
_inner_parallelism_size
=
int
(
self
.
user_defined_strategy
.
sharding_configs
[
"inner_parallelism_size"
])
self
.
user_defined_strategy
.
sharding_configs
[
"parallelism"
])
self
.
use_pipeline
=
self
.
user_defined_strategy
.
sharding_configs
[
"use_pipeline"
]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录