Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
81217a94
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
81217a94
编写于
1月 19, 2021
作者:
L
Leo Chen
提交者:
GitHub
1月 19, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
unify calling cudaSetDevice (#30470)
* unify calling cudaSetDevice * fix compile
上级
00554b3f
变更
8
显示空白变更内容
内联
并排
Showing
8 changed file
with
9 addition
and
9 deletion
+9
-9
paddle/fluid/framework/details/nccl_op_handle.h
paddle/fluid/framework/details/nccl_op_handle.h
+1
-1
paddle/fluid/framework/details/op_handle_base.cc
paddle/fluid/framework/details/op_handle_base.cc
+1
-1
paddle/fluid/framework/fleet/nccl_wrapper.cc
paddle/fluid/framework/fleet/nccl_wrapper.cc
+1
-1
paddle/fluid/inference/tensorrt/engine.cc
paddle/fluid/inference/tensorrt/engine.cc
+1
-1
paddle/fluid/memory/malloc_test.cu
paddle/fluid/memory/malloc_test.cu
+2
-2
paddle/fluid/platform/collective_helper.cc
paddle/fluid/platform/collective_helper.cc
+1
-1
paddle/fluid/platform/gpu_info.cc
paddle/fluid/platform/gpu_info.cc
+1
-1
paddle/fluid/platform/nccl_helper.h
paddle/fluid/platform/nccl_helper.h
+1
-1
未找到文件。
paddle/fluid/framework/details/nccl_op_handle.h
浏览文件 @
81217a94
...
...
@@ -94,7 +94,7 @@ class NCCLOpHandleBase : public OpHandleBase {
continue
;
}
PADDLE_ENFORCE_CUDA_SUCCESS
(
cudaSetDevice
(
dev_id
)
);
platform
::
SetDeviceId
(
dev_id
);
PADDLE_ENFORCE_CUDA_SUCCESS
(
cudaEventCreateWithFlags
(
&
inter_events_
[
dev_id
],
cudaEventDisableTiming
));
PADDLE_ENFORCE_CUDA_SUCCESS
(
cudaEventCreateWithFlags
(
...
...
paddle/fluid/framework/details/op_handle_base.cc
浏览文件 @
81217a94
...
...
@@ -47,7 +47,7 @@ void OpHandleBase::InitCUDA() {
#ifdef PADDLE_WITH_CUDA
for
(
auto
&
p
:
dev_ctxes_
)
{
int
dev_id
=
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
p
.
first
).
device
;
PADDLE_ENFORCE_CUDA_SUCCESS
(
cudaSetDevice
(
dev_id
)
);
platform
::
SetDeviceId
(
dev_id
);
PADDLE_ENFORCE_CUDA_SUCCESS
(
cudaEventCreateWithFlags
(
&
events_
[
dev_id
],
cudaEventDisableTiming
));
}
...
...
paddle/fluid/framework/fleet/nccl_wrapper.cc
浏览文件 @
81217a94
...
...
@@ -50,7 +50,7 @@ void NCCLWrapper::SetRankInfo(const int local_rank, const int global_rank,
nccl_info_
.
local_rank_
=
local_rank
;
nccl_info_
.
my_global_rank_
=
global_rank
;
nccl_info_
.
global_ranks_
=
ranks
;
PADDLE_ENFORCE_CUDA_SUCCESS
(
cudaSetDevice
(
local_rank
)
);
platform
::
SetDeviceId
(
local_rank
);
PADDLE_ENFORCE_CUDA_SUCCESS
(
cudaStreamCreate
(
&
(
nccl_info_
.
stream_
)));
#endif
return
;
...
...
paddle/fluid/inference/tensorrt/engine.cc
浏览文件 @
81217a94
...
...
@@ -339,7 +339,7 @@ void TensorRTEngine::freshDeviceId() {
platform
::
errors
::
OutOfRange
(
"Device id %d exceeds the current device count: %d."
,
device_id_
,
count
));
cudaSetDevice
(
device_id_
);
platform
::
SetDeviceId
(
device_id_
);
}
}
// namespace tensorrt
...
...
paddle/fluid/memory/malloc_test.cu
浏览文件 @
81217a94
...
...
@@ -64,7 +64,7 @@ void MultiStreamCompute(float **data, float **second_data,
TEST
(
Malloc
,
CUDADeviceContextMultiStream
)
{
auto
place
=
platform
::
CUDAPlace
(
0
);
EXPECT_TRUE
(
cudaSuccess
==
cudaSetDevice
(
0
)
);
platform
::
SetDeviceId
(
0
);
AllocationPtr
main_stream_alloc_ptr
=
Alloc
(
place
,
N
*
sizeof
(
float
));
EXPECT_GE
(
main_stream_alloc_ptr
->
size
(),
N
*
sizeof
(
float
));
...
...
@@ -94,7 +94,7 @@ TEST(Malloc, CUDADeviceContextMultiStream) {
TEST
(
Malloc
,
CUDADeviceContextMultiThreadMultiStream
)
{
auto
place
=
platform
::
CUDAPlace
(
0
);
EXPECT_TRUE
(
cudaSuccess
==
cudaSetDevice
(
0
)
);
platform
::
SetDeviceId
(
0
);
AllocationPtr
main_stream_alloc_ptr
=
Alloc
(
place
,
N
*
sizeof
(
float
));
EXPECT_GE
(
main_stream_alloc_ptr
->
size
(),
N
*
sizeof
(
float
));
...
...
paddle/fluid/platform/collective_helper.cc
浏览文件 @
81217a94
...
...
@@ -75,7 +75,7 @@ NCCLComm* NCCLCommContext::CreateNCCLComm(ncclUniqueId* nccl_id, int nranks,
"Expected dev_id >= 0. But received dev_id is %d."
,
dev_id
));
ncclComm_t
comm
=
nullptr
;
PADDLE_ENFORCE_CUDA_SUCCESS
(
cudaSetDevice
(
dev_id
)
);
SetDeviceId
(
dev_id
);
PADDLE_ENFORCE_CUDA_SUCCESS
(
platform
::
dynload
::
ncclCommInitRank
(
&
comm
,
nranks
,
*
nccl_id
,
rank
));
...
...
paddle/fluid/platform/gpu_info.cc
浏览文件 @
81217a94
...
...
@@ -226,7 +226,7 @@ void SetDeviceId(int id) {
"Device id must be less than GPU count, "
"but received id is: %d. GPU count is: %d."
,
id
,
GetCUDADeviceCount
()));
PADDLE_
ENFORCE
_CUDA_SUCCESS
(
cudaSetDevice
(
id
));
PADDLE_
RETRY
_CUDA_SUCCESS
(
cudaSetDevice
(
id
));
}
void
GpuMemoryUsage
(
size_t
*
available
,
size_t
*
total
)
{
...
...
paddle/fluid/platform/nccl_helper.h
浏览文件 @
81217a94
...
...
@@ -132,7 +132,7 @@ struct NCCLContextMap {
}
VLOG
(
1
)
<<
"init nccl rank:"
<<
rank
<<
", nranks:"
<<
nranks
<<
", gpu_id:"
<<
gpu_id
<<
", dev_id:"
<<
order_
[
i
];
PADDLE_RETRY_CUDA_SUCCESS
(
cudaSetDevice
(
gpu_id
)
);
SetDeviceId
(
gpu_id
);
PADDLE_RETRY_CUDA_SUCCESS
(
platform
::
dynload
::
ncclCommInitRank
(
comms
.
get
()
+
i
,
nranks
,
*
nccl_id
,
rank
));
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录