Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
718e1807
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
718e1807
编写于
4月 11, 2018
作者:
Y
Yu Yang
提交者:
GitHub
4月 11, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #9821 from reyoung/feature/change_int64
Make cuda_helper.h Pass cpplint
上级
129859e7
c64190ec
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
22 addition
and
15 deletion
+22
-15
paddle/fluid/platform/cuda_helper.h
paddle/fluid/platform/cuda_helper.h
+11
-7
paddle/fluid/platform/nccl_helper.h
paddle/fluid/platform/nccl_helper.h
+11
-8
未找到文件。
paddle/fluid/platform/cuda_helper.h
浏览文件 @
718e1807
...
...
@@ -33,22 +33,26 @@ constexpr int PADDLE_CUDA_NUM_THREADS = 512;
USE_CUDA_ATOMIC
(
Add
,
float
);
USE_CUDA_ATOMIC
(
Add
,
int
);
USE_CUDA_ATOMIC
(
Add
,
unsigned
int
);
USE_CUDA_ATOMIC
(
Add
,
unsigned
long
long
int
);
// CUDA API uses unsigned long long int, we cannot use uint64_t here.
// It because unsigned long long int is not necessarily uint64_t
USE_CUDA_ATOMIC
(
Add
,
unsigned
long
long
int
);
// NOLINT
CUDA_ATOMIC_WRAPPER
(
Add
,
int64_t
)
{
static_assert
(
sizeof
(
int64_t
)
==
sizeof
(
long
long
int
),
// Here, we check long long int must be int64_t.
static_assert
(
sizeof
(
int64_t
)
==
sizeof
(
long
long
int
),
// NOLINT
"long long should be int64"
);
return
CudaAtomicAdd
(
reinterpret_cast
<
unsigned
long
long
int
*>
(
address
),
static_cast
<
unsigned
long
long
int
>
(
val
));
return
CudaAtomicAdd
(
reinterpret_cast
<
unsigned
long
long
int
*>
(
address
),
// NOLINT
static_cast
<
unsigned
long
long
int
>
(
val
));
// NOLINT
}
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600
USE_CUDA_ATOMIC
(
Add
,
double
);
#else
CUDA_ATOMIC_WRAPPER
(
Add
,
double
)
{
unsigned
long
long
int
*
address_as_ull
=
reinterpret_cast
<
unsigned
long
long
int
*>
(
address
);
unsigned
long
long
int
old
=
*
address_as_ull
,
assumed
;
unsigned
long
long
int
*
address_as_ull
=
// NOLINT
reinterpret_cast
<
unsigned
long
long
int
*>
(
address
);
// NOLINT
unsigned
long
long
int
old
=
*
address_as_ull
,
assumed
;
// NOLINT
do
{
assumed
=
old
;
...
...
paddle/fluid/platform/nccl_helper.h
浏览文件 @
718e1807
...
...
@@ -61,7 +61,7 @@ struct NCCLContext {
ncclComm_t
comm_
;
explicit
NCCLContext
(
int
dev_id
)
:
ctx_
(
new
CUDADeviceContext
(
CUDAPlace
(
dev_id
)))
{}
:
ctx_
(
new
CUDADeviceContext
(
CUDAPlace
(
dev_id
)))
,
comm_
{
nullptr
}
{}
cudaStream_t
stream
()
const
{
return
ctx_
->
stream
();
}
...
...
@@ -95,6 +95,7 @@ struct NCCLContextMap {
std
::
vector
<
int
>
order_
;
explicit
NCCLContextMap
(
const
std
::
vector
<
platform
::
Place
>
&
places
)
{
PADDLE_ENFORCE
(
!
places
.
empty
());
order_
.
reserve
(
places
.
size
());
for
(
auto
&
p
:
places
)
{
int
dev_id
=
boost
::
get
<
CUDAPlace
>
(
p
).
device
;
...
...
@@ -105,15 +106,17 @@ struct NCCLContextMap {
order_
.
size
(),
contexts_
.
size
(),
"NCCL Context Map does not support contain two or more same device"
);
std
::
vector
<
ncclComm_t
>
comms
;
comms
.
resize
(
order_
.
size
());
if
(
places
.
size
()
>
1
)
{
std
::
vector
<
ncclComm_t
>
comms
;
comms
.
resize
(
order_
.
size
());
PADDLE_ENFORCE
(
platform
::
dynload
::
ncclCommInitAll
(
&
comms
[
0
],
static_cast
<
int
>
(
order_
.
size
()),
&
order_
[
0
]));
PADDLE_ENFORCE
(
platform
::
dynload
::
ncclCommInitAll
(
&
comms
[
0
],
static_cast
<
int
>
(
order_
.
size
()),
&
order_
[
0
]));
int
i
=
0
;
for
(
auto
&
dev_id
:
order_
)
{
contexts_
.
at
(
dev_id
).
comm_
=
comms
[
i
++
];
int
i
=
0
;
for
(
auto
&
dev_id
:
order_
)
{
contexts_
.
at
(
dev_id
).
comm_
=
comms
[
i
++
];
}
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录