Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
08d736ad
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
08d736ad
编写于
8月 25, 2020
作者:
D
Dong Daxiang
提交者:
GitHub
8月 25, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
【paddle.fleet】add cudnn related strategies to DistributedStrategy (#26598)
* add cudnn related strategies to DistributedStrategy
上级
0a895bc0
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
89 addition
and
2 deletion
+89
-2
paddle/fluid/framework/distributed_strategy.proto
paddle/fluid/framework/distributed_strategy.proto
+3
-1
python/paddle/distributed/fleet/base/distributed_strategy.py
python/paddle/distributed/fleet/base/distributed_strategy.py
+63
-1
python/paddle/distributed/fleet/base/fleet_base.py
python/paddle/distributed/fleet/base/fleet_base.py
+1
-0
python/paddle/fluid/tests/unittests/test_fleet_distributed_strategy.py
.../fluid/tests/unittests/test_fleet_distributed_strategy.py
+22
-0
未找到文件。
paddle/fluid/framework/distributed_strategy.proto
浏览文件 @
08d736ad
...
...
@@ -113,7 +113,9 @@ message DistributedStrategy {
optional
bool
fuse_all_reduce_ops
=
18
[
default
=
true
];
optional
int32
fuse_grad_size_in_MB
=
19
[
default
=
32
];
optional
float
fuse_grad_size_in_TFLOPS
=
20
[
default
=
50
];
// optional bool enable_backward_optimizer_op_deps = 19 [ default = true ];
optional
bool
cudnn_exhaustive_search
=
21
[
default
=
true
];
optional
int32
conv_workspace_size_limit
=
22
[
default
=
4000
];
optional
bool
cudnn_batchnorm_spatial_persistent
=
23
[
default
=
true
];
optional
RecomputeConfig
recompute_configs
=
101
;
optional
AMPConfig
amp_configs
=
102
;
...
...
python/paddle/distributed/fleet/base/distributed_strategy.py
浏览文件 @
08d736ad
...
...
@@ -14,7 +14,7 @@
import
paddle
from
paddle.distributed.fleet.proto
import
distributed_strategy_pb2
from
paddle.fluid.framework
import
Variable
from
paddle.fluid.framework
import
Variable
,
set_flags
,
core
import
google.protobuf.text_format
...
...
@@ -810,6 +810,68 @@ class DistributedStrategy(object):
else
:
print
(
"WARNING: auto should have value of bool type"
)
@
property
def
cudnn_exhaustive_search
(
self
):
return
self
.
strategy
.
cudnn_exhaustive_search
@
cudnn_exhaustive_search
.
setter
def
cudnn_exhaustive_search
(
self
,
flag
):
if
isinstance
(
flag
,
bool
):
self
.
strategy
.
cudnn_exhaustive_search
=
flag
else
:
print
(
"WARNING: cudnn_exhaustive_search should have value of bool type"
)
@
property
def
conv_workspace_size_limit
(
self
):
return
self
.
strategy
.
conv_workspace_size_limit
@
conv_workspace_size_limit
.
setter
def
conv_workspace_size_limit
(
self
,
value
):
if
isinstance
(
value
,
int
):
self
.
strategy
.
conv_workspace_size_limit
=
value
else
:
print
(
"WARNING: conv_workspace_size_limit should have value of int type"
)
@
property
def
cudnn_batchnorm_spatial_persistent
(
self
):
return
self
.
strategy
.
cudnn_batchnorm_spatial_persistent
@
cudnn_batchnorm_spatial_persistent
.
setter
def
cudnn_batchnorm_spatial_persistent
(
self
,
flag
):
if
isinstance
(
flag
,
bool
):
self
.
strategy
.
cudnn_batchnorm_spatial_persistent
=
flag
else
:
print
(
"WARNING: cudnn_batchnorm_spatial_persistent should have value of bool type"
)
def
_enable_env
(
self
):
strategy
=
self
.
strategy
keys
=
[
"FLAGS_cudnn_batchnorm_spatial_persistent"
,
"FLAGS_conv_workspace_size_limit"
,
"FLAGS_cudnn_exhaustive_search"
,
"FLAGS_sync_nccl_allreduce"
,
"FLAGS_fuse_parameter_memory_size"
,
"FLAGS_fuse_parameter_groups_size"
,
]
values
=
[
bool
(
strategy
.
cudnn_batchnorm_spatial_persistent
),
int
(
strategy
.
conv_workspace_size_limit
),
bool
(
strategy
.
cudnn_exhaustive_search
),
bool
(
strategy
.
sync_nccl_allreduce
),
int
(
strategy
.
fuse_grad_size_in_MB
),
int
(
strategy
.
fuse_grad_size_in_TFLOPS
),
]
for
i
,
key
in
enumerate
(
keys
):
if
core
.
globals
().
is_public
(
key
):
core
.
globals
()[
key
]
=
values
[
i
]
def
__repr__
(
self
):
fields
=
self
.
strategy
.
DESCRIPTOR
.
fields
for
f
in
fields
:
...
...
python/paddle/distributed/fleet/base/fleet_base.py
浏览文件 @
08d736ad
...
...
@@ -383,6 +383,7 @@ class Fleet(object):
context
[
"valid_strategy"
]
=
valid_strategy
self
.
valid_strategy
=
valid_strategy
self
.
valid_strategy
.
_enable_env
()
optimize_ops
=
[]
params_grads
=
[]
...
...
python/paddle/fluid/tests/unittests/test_fleet_distributed_strategy.py
浏览文件 @
08d736ad
...
...
@@ -294,6 +294,28 @@ class TestStrategyConfig(unittest.TestCase):
with
self
.
assertRaises
(
TypeError
):
strategy
.
unknown_key
=
'UNK'
def
test_cudnn_exhaustive_search
(
self
):
strategy
=
paddle
.
distributed
.
fleet
.
DistributedStrategy
()
strategy
.
cudnn_exhaustive_search
=
False
self
.
assertEqual
(
strategy
.
cudnn_exhaustive_search
,
False
)
strategy
.
cudnn_exhaustive_search
=
"True"
self
.
assertEqual
(
strategy
.
cudnn_exhaustive_search
,
False
)
def
test_cudnn_batchnorm_spatial_persistent
(
self
):
strategy
=
paddle
.
distributed
.
fleet
.
DistributedStrategy
()
strategy
.
cudnn_batchnorm_spatial_persistent
=
False
self
.
assertEqual
(
strategy
.
cudnn_batchnorm_spatial_persistent
,
False
)
strategy
.
cudnn_batchnorm_spatial_persistent
=
"True"
self
.
assertEqual
(
strategy
.
cudnn_batchnorm_spatial_persistent
,
False
)
def
test_conv_workspace_size_limit
(
self
):
strategy
=
paddle
.
distributed
.
fleet
.
DistributedStrategy
()
strategy
.
conv_workspace_size_limit
=
1000
self
.
assertEqual
(
strategy
.
conv_workspace_size_limit
,
1000
)
strategy
.
conv_workspace_size_limit
=
"400"
self
.
assertEqual
(
strategy
.
conv_workspace_size_limit
,
1000
)
strategy
.
_enable_env
()
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录