Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
cee7a3db
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
cee7a3db
编写于
12月 05, 2022
作者:
S
ShenLiang
提交者:
GitHub
12月 05, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix bug of reducer in best_fit (#48668)
上级
89f024e3
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
39 addition
and
13 deletion
+39
-13
paddle/fluid/distributed/collective/reducer.cc
paddle/fluid/distributed/collective/reducer.cc
+22
-11
paddle/fluid/distributed/collective/reducer.h
paddle/fluid/distributed/collective/reducer.h
+1
-1
python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel.py
...uid/tests/unittests/test_parallel_dygraph_dataparallel.py
+16
-1
未找到文件。
paddle/fluid/distributed/collective/reducer.cc
浏览文件 @
cee7a3db
...
...
@@ -17,10 +17,16 @@
#include "paddle/phi/backends/device_manager.h"
DECLARE_bool
(
use_stream_safe_cuda_allocator
);
DECLARE_string
(
allocator_strategy
);
namespace
paddle
{
namespace
distributed
{
static
bool
IsStreamSafeAllocator
()
{
return
FLAGS_allocator_strategy
==
"auto_growth"
&&
FLAGS_use_stream_safe_cuda_allocator
;
}
static
Backend
TransToBackend
(
platform
::
Place
place
)
{
static
const
std
::
map
<
phi
::
AllocationType
,
Backend
>
type_backend
=
{
{
phi
::
AllocationType
::
GPU
,
Backend
::
GPU
},
...
...
@@ -399,14 +405,14 @@ void EagerGroup::ConcatTensors(const platform::Place &place) {
}
}
void
EagerGroup
::
SplitTensors
Dev
(
const
platform
::
DeviceContext
&
context
)
{
void
EagerGroup
::
SplitTensors
(
const
platform
::
DeviceContext
&
context
)
{
auto
place
=
context
.
GetPlace
();
if
(
platform
::
is_gpu_place
(
place
))
{
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
auto
&
gpu_context
=
static_cast
<
const
phi
::
GPUContext
&>
(
context
);
SplitTensorsWithType
(
gpu_context
,
&
dense_contents_
,
&
dense_tensors_
,
dtype_
);
if
(
FLAGS_use_stream_safe_cuda_allocator
)
{
if
(
IsStreamSafeAllocator
()
)
{
auto
dense_tensor
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
dense_contents_
.
impl
());
VLOG
(
3
)
<<
"Free dense_contents_ "
<<
dense_contents_
.
numel
();
...
...
@@ -1011,12 +1017,11 @@ void EagerReducer::FinalizeBackward() {
for
(
auto
&
group
:
groups_
)
{
if
(
!
group
.
is_sparse_
)
{
group
.
task
->
Synchronize
();
}
}
for
(
auto
&
group
:
groups_
)
{
if
(
!
group
.
is_sparse_
)
{
group
.
dense_contents_
.
reset
();
if
(
!
IsStreamSafeAllocator
())
{
auto
*
default_ctx
=
platform
::
DeviceContextPool
::
Instance
().
Get
(
inner_place_
);
group
.
SplitTensors
(
*
default_ctx
);
}
}
}
...
...
@@ -1054,9 +1059,15 @@ void EagerReducer::FusedAllReduceSchedule(EagerGroup *group,
group
->
task
=
process_group_
->
AllReduce
(
in_out
,
in_out
,
opts
);
auto
*
context
=
process_group_
->
GetDeviceContext
(
inner_place_
);
group
->
SplitTensorsDev
(
*
context
);
group
->
task
->
UpdateWaitChain
(
*
context
);
// split in FinalizeBackward()
if
(
IsStreamSafeAllocator
())
{
// NOTE(shenliang03): The best_fit allocator strategy is multi-stream
// insecure. In the Split operator, additional memory will be applied for
// calculation, and if it is asynchronous, an illegal memory access may be
// encountered.
group
->
SplitTensors
(
*
context
);
group
->
task
->
UpdateWaitChain
(
*
context
);
}
}
void
EagerReducer
::
AllReduceSparse
(
EagerGroup
*
group
,
...
...
paddle/fluid/distributed/collective/reducer.h
浏览文件 @
cee7a3db
...
...
@@ -75,7 +75,7 @@ class EagerGroup {
// context is used to select the stream for split
void
SplitTensors
Dev
(
const
platform
::
DeviceContext
&
);
void
SplitTensors
(
const
platform
::
DeviceContext
&
);
friend
std
::
ostream
&
operator
<<
(
std
::
ostream
&
,
const
EagerGroup
&
);
};
...
...
python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel.py
浏览文件 @
cee7a3db
...
...
@@ -103,6 +103,7 @@ def start_local_trainers(
training_script
,
training_script_args
,
eager_mode
=
True
,
allocator_strategy
=
"auto_growth"
,
log_dir
=
None
,
):
current_env
=
copy
.
copy
(
os
.
environ
.
copy
())
...
...
@@ -126,6 +127,10 @@ def start_local_trainers(
if
not
eager_mode
:
proc_env
[
"FLAGS_enable_eager_mode"
]
=
"%d"
%
0
proc_env
[
"FLAGS_allocator_strategy"
]
=
allocator_strategy
if
allocator_strategy
==
"auto_growth"
:
proc_env
[
"FLAGS_fraction_of_gpu_memory_to_use"
]
=
"0.1"
current_env
.
update
(
proc_env
)
print
(
"trainer proc env:{}"
.
format
(
current_env
))
...
...
@@ -153,7 +158,12 @@ def start_local_trainers(
class
TestMultipleGpus
(
unittest
.
TestCase
):
def
run_mnist_2gpu
(
self
,
target_file_name
,
eager_mode
=
True
):
def
run_mnist_2gpu
(
self
,
target_file_name
,
eager_mode
=
True
,
allocator_strategy
=
"auto_growth"
,
):
if
(
not
fluid
.
core
.
is_compiled_with_cuda
()
or
fluid
.
core
.
get_cuda_device_count
()
==
0
...
...
@@ -170,6 +180,7 @@ class TestMultipleGpus(unittest.TestCase):
cluster
,
pod
,
eager_mode
=
eager_mode
,
allocator_strategy
=
allocator_strategy
,
training_script
=
target_file_name
,
training_script_args
=
[],
)
...
...
@@ -218,6 +229,10 @@ class TestDataParallelWithPyLayer(TestMultipleGpus):
self
.
run_mnist_2gpu
(
'parallel_dygraph_dataparallel_with_pylayer.py'
,
eager_mode
=
False
)
self
.
run_mnist_2gpu
(
'parallel_dygraph_dataparallel_with_pylayer.py'
,
allocator_strategy
=
"naive_best_fit"
,
)
class
TestGradientCheckInEagerMode
(
TestMultipleGpus
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录