Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
4ec51e02
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
4ec51e02
编写于
8月 17, 2020
作者:
D
Dong Daxiang
提交者:
GitHub
8月 17, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
【paddle.fleet】Clear disable (#26334)
* add check approval test=develop
上级
3b2c580a
变更
12
显示空白变更内容
内联
并排
Showing
12 changed file
with
25 addition
and
18 deletion
+25
-18
python/paddle/distributed/fleet/base/strategy_compiler.py
python/paddle/distributed/fleet/base/strategy_compiler.py
+12
-0
python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py
...paddle/distributed/fleet/meta_optimizers/amp_optimizer.py
+1
-0
python/paddle/distributed/fleet/meta_optimizers/async_graph_execution_optimizer.py
.../fleet/meta_optimizers/async_graph_execution_optimizer.py
+3
-0
python/paddle/distributed/fleet/meta_optimizers/async_optimizer.py
...ddle/distributed/fleet/meta_optimizers/async_optimizer.py
+1
-1
python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py
...paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py
+1
-5
python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py
...ributed/fleet/meta_optimizers/gradient_merge_optimizer.py
+1
-1
python/paddle/distributed/fleet/meta_optimizers/lamb_optimizer.py
...addle/distributed/fleet/meta_optimizers/lamb_optimizer.py
+1
-4
python/paddle/distributed/fleet/meta_optimizers/lars_optimizer.py
...addle/distributed/fleet/meta_optimizers/lars_optimizer.py
+1
-4
python/paddle/distributed/fleet/meta_optimizers/localsgd_optimizer.py
...e/distributed/fleet/meta_optimizers/localsgd_optimizer.py
+1
-1
python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py
.../distributed/fleet/meta_optimizers/meta_optimizer_base.py
+1
-0
python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py
...e/distributed/fleet/meta_optimizers/pipeline_optimizer.py
+1
-1
python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py
.../distributed/fleet/meta_optimizers/recompute_optimizer.py
+1
-1
未找到文件。
python/paddle/distributed/fleet/base/strategy_compiler.py
浏览文件 @
4ec51e02
...
...
@@ -76,6 +76,18 @@ class StrategyCompiler(StrategyCompilerBase):
opt
.
_disable_strategy
(
valid_strategy
)
return
valid_strategy
"""
Meta Optimizer Type A: rewrite forward, backward. e.g. recompute, async, sync, pipeline.
results will be splitted in async, sync, pipeline
Meta Optimizer Type B: rewrite forward,
e.g. AMP and the corresponding backward is generated by rewritten forward
Meta Opitmizer Type B: rewrite backward. e.g. gradient fusion
Meta Optimizer Type D: rewrite optimize. e.g. lars, lamb, localsgd, gradient merge, dgc
Meta Optimizer Type E: only transpile to Graph structure for runtime,
currently, grad fusion and kernel fusion, sync batch-norm included.
we will remove grad fusion and sync batch-norm
"""
def
generate_optimizer
(
self
,
loss
,
role_maker
,
optimizer
,
user_defined_strategy
,
meta_optimizer_list
,
graph_optimizer_list
):
...
...
python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py
浏览文件 @
4ec51e02
...
...
@@ -37,6 +37,7 @@ class AMPOptimizer(MetaOptimizerBase):
def
_disable_strategy
(
self
,
dist_strategy
):
dist_strategy
.
amp
=
False
dist_strategy
.
amp_configs
=
{}
def
minimize_impl
(
self
,
loss
,
...
...
python/paddle/distributed/fleet/meta_optimizers/async_graph_execution_optimizer.py
浏览文件 @
4ec51e02
...
...
@@ -33,6 +33,9 @@ class AsyncGraphExecutionOptimizer(AsyncMetaOptimizer):
return
True
def
_disable_strategy
(
self
,
dist_strategy
):
dist_strategy
.
a_sync_configs
=
{}
def
_is_graph_out
(
self
):
return
True
...
...
python/paddle/distributed/fleet/meta_optimizers/async_optimizer.py
浏览文件 @
4ec51e02
...
...
@@ -139,4 +139,4 @@ class AsyncMetaOptimizer(MetaOptimizerBase):
return
None
,
None
def
_disable_strategy
(
self
,
dist_strategy
):
self
.
user_defined_strategy
.
a_sync_configs
[
"k_steps"
]
=
-
1
self
.
user_defined_strategy
.
a_sync_configs
=
{}
python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py
浏览文件 @
4ec51e02
...
...
@@ -68,11 +68,7 @@ class DGCOptimizer(MetaOptimizerBase):
def
_disable_strategy
(
self
,
dist_strategy
):
dist_strategy
.
dgc
=
False
dist_strategy
.
dgc_configs
=
{
'rampup_begin_step'
:
0
,
'rampup_step'
:
1
,
'sparsity'
:
[
0.999
]
}
dist_strategy
.
dgc_configs
=
{}
def
backward
(
self
,
loss
,
...
...
python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py
浏览文件 @
4ec51e02
...
...
@@ -40,7 +40,7 @@ class GradientMergeOptimizer(MetaOptimizerBase):
def
_disable_strategy
(
self
,
dist_strategy
):
dist_strategy
.
gradient_merge
=
False
dist_strategy
.
gradient_merge_configs
=
{
"k_steps"
:
1
,
"avg"
:
True
}
dist_strategy
.
gradient_merge_configs
=
{}
def
minimize_impl
(
self
,
loss
,
...
...
python/paddle/distributed/fleet/meta_optimizers/lamb_optimizer.py
浏览文件 @
4ec51e02
...
...
@@ -74,10 +74,7 @@ class LambOptimizer(MetaOptimizerBase):
def
_disable_strategy
(
self
,
dist_strategy
):
dist_strategy
.
lamb
=
False
dist_strategy
.
lamb_configs
=
{
'lamb_weight_decay'
:
0.01
,
'exclude_from_weight_decay'
:
[],
}
dist_strategy
.
lamb_configs
=
{}
def
backward
(
self
,
loss
,
...
...
python/paddle/distributed/fleet/meta_optimizers/lars_optimizer.py
浏览文件 @
4ec51e02
...
...
@@ -58,10 +58,7 @@ class LarsOptimizer(MetaOptimizerBase):
def
_disable_strategy
(
self
,
dist_strategy
):
dist_strategy
.
lars
=
False
dist_strategy
.
lars_configs
=
{
'lars_coeff'
:
0.001
,
'lars_weight_decay'
:
0.0005
,
}
dist_strategy
.
lars_configs
=
{}
def
backward
(
self
,
loss
,
...
...
python/paddle/distributed/fleet/meta_optimizers/localsgd_optimizer.py
浏览文件 @
4ec51e02
...
...
@@ -39,7 +39,7 @@ class LocalSGDOptimizer(MetaOptimizerBase):
def
_disable_strategy
(
self
,
dist_strategy
):
dist_strategy
.
localsgd
=
False
dist_strategy
.
localsgd_configs
=
{
'k_steps'
:
1
}
dist_strategy
.
localsgd_configs
=
{}
def
snapshot_name
(
self
,
param_name
):
return
param_name
+
self
.
snapshot_key
...
...
python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py
浏览文件 @
4ec51e02
...
...
@@ -38,6 +38,7 @@ class MetaOptimizerBase(object):
def
_can_update
(
self
,
optimizer
):
if
str
(
optimizer
.
__class__
.
__name__
)
in
self
.
meta_optimizers_white_list
:
return
True
return
False
def
_disable_strategy
(
self
,
dist_strategy
):
raise
NotImplementedError
(
"you should implement disable strategy in {}"
.
...
...
python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py
浏览文件 @
4ec51e02
...
...
@@ -110,7 +110,7 @@ class PipelineOptimizer(MetaOptimizerBase):
def
_disable_strategy
(
self
,
dist_strategy
):
dist_strategy
.
pipeline
=
False
dist_strategy
.
pipeline_configs
=
{
"micro_batch"
:
1
}
dist_strategy
.
pipeline_configs
=
{}
def
minimize_impl
(
self
,
loss
,
...
...
python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py
浏览文件 @
4ec51e02
...
...
@@ -42,7 +42,7 @@ class RecomputeOptimizer(MetaOptimizerBase):
def
_disable_strategy
(
self
,
dist_strategy
):
dist_strategy
.
recompute
=
False
dist_strategy
.
recompute_configs
=
{
"checkpoints"
:
[]
}
dist_strategy
.
recompute_configs
=
{}
def
backward
(
self
,
loss
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录