Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
964497b5
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
964497b5
编写于
3月 30, 2023
作者:
Y
Yuang Liu
提交者:
GitHub
3月 30, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
use int64 for c split (#52279)
上级
bd3b6adf
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
21 addition
and
21 deletion
+21
-21
paddle/fluid/operators/collective/c_split_op.cu
paddle/fluid/operators/collective/c_split_op.cu
+16
-16
paddle/fluid/operators/collective/partial_recv_op.cc
paddle/fluid/operators/collective/partial_recv_op.cc
+1
-1
paddle/fluid/operators/collective/partial_recv_op.cu.cc
paddle/fluid/operators/collective/partial_recv_op.cu.cc
+2
-2
paddle/fluid/operators/collective/partial_send_op.cu.cc
paddle/fluid/operators/collective/partial_send_op.cu.cc
+2
-2
未找到文件。
paddle/fluid/operators/collective/c_split_op.cu
浏览文件 @
964497b5
...
...
@@ -21,10 +21,10 @@ limitations under the License. */
namespace
paddle
{
namespace
operators
{
static
constexpr
int
kNumCUDAThreads
=
512
;
static
constexpr
int
kNumMaxinumNumBlocks
=
4096
;
static
constexpr
int
64_t
kNumCUDAThreads
=
512
;
static
constexpr
int
64_t
kNumMaxinumNumBlocks
=
4096
;
static
inline
int
NumBlocks
(
const
in
t
N
)
{
static
inline
int
64_t
NumBlocks
(
const
int64_
t
N
)
{
return
std
::
min
((
N
+
kNumCUDAThreads
-
1
)
/
kNumCUDAThreads
,
kNumMaxinumNumBlocks
);
}
...
...
@@ -32,21 +32,21 @@ static inline int NumBlocks(const int N) {
template
<
typename
T
>
__global__
void
SplitFromRank
(
const
T
*
input
,
T
*
output
,
const
int
rows
,
const
int
columns
,
const
int
64_t
rows
,
const
int
64_t
columns
,
const
int
rank
,
const
int
nranks
,
const
int
limit
)
{
CUDA_KERNEL_LOOP
(
i
,
limi
t
)
{
int
row
=
i
/
columns
;
int
col
=
i
%
columns
;
const
int
64_t
limit
)
{
CUDA_KERNEL_LOOP
_TYPE
(
i
,
limit
,
int64_
t
)
{
int
64_t
row
=
i
/
columns
;
int
64_t
col
=
i
%
columns
;
int
block
=
columns
/
nranks
;
int
start
=
block
*
rank
;
int
end
=
start
+
block
;
int
64_t
block
=
columns
/
nranks
;
int
64_t
start
=
block
*
rank
;
int
64_t
end
=
start
+
block
;
if
(
col
>=
start
&&
col
<
end
)
{
int
idx
=
block
*
row
+
col
%
block
;
int
64_t
idx
=
block
*
row
+
col
%
block
;
output
[
idx
]
=
input
[
i
];
}
}
...
...
@@ -93,9 +93,9 @@ class CSplitOpCUDAKernel : public framework::OpKernel<T> {
auto
remain_ddim
=
phi
::
slice_ddim
(
dims
,
0
,
dims_size
-
1
);
int64_t
remain_numel
=
phi
::
product
(
remain_ddim
);
int
limit
=
x
->
numel
();
int
blocks
=
NumBlocks
(
limit
);
int
threads
=
kNumCUDAThreads
;
int
64_t
limit
=
x
->
numel
();
int
64_t
blocks
=
NumBlocks
(
limit
);
int
64_t
threads
=
kNumCUDAThreads
;
dims
[
dims_size
-
1
]
/=
nranks
;
out
->
mutable_data
<
T
>
(
dims
,
place
);
...
...
paddle/fluid/operators/collective/partial_recv_op.cc
浏览文件 @
964497b5
...
...
@@ -69,7 +69,7 @@ class PartialRecvOp : public framework::OperatorWithKernel {
out_shape
[
i
]));
}
auto
out_dims
=
phi
::
make_ddim
(
out_shape
);
int
numel
=
phi
::
product
(
out_dims
);
int
64_t
numel
=
phi
::
product
(
out_dims
);
PADDLE_ENFORCE_EQ
(
(
numel
%
num
),
0
,
...
...
paddle/fluid/operators/collective/partial_recv_op.cu.cc
浏览文件 @
964497b5
...
...
@@ -68,8 +68,8 @@ class PartialRecvOpCUDAKernel : public framework::OpKernel<T> {
auto
place
=
ctx
.
GetPlace
();
out
->
mutable_data
<
T
>
(
out_dims
,
place
);
int
recv_numel
=
numel
/
num
;
int
offset
=
recv_numel
*
id
;
int
64_t
recv_numel
=
numel
/
num
;
int
64_t
offset
=
recv_numel
*
id
;
auto
map
=
distributed
::
ProcessGroupMapFromGid
::
getInstance
();
if
(
map
->
has
(
rid
))
{
...
...
paddle/fluid/operators/collective/partial_send_op.cu.cc
浏览文件 @
964497b5
...
...
@@ -62,8 +62,8 @@ class PartialSendCUDAKernel : public framework::OpKernel<T> {
platform
::
errors
::
InvalidArgument
(
"The input numel (%d) must be divisible by num(%d)"
,
numel
,
num
));
int
send_numel
=
numel
/
num
;
int
offset
=
send_numel
*
id
;
int
64_t
send_numel
=
numel
/
num
;
int
64_t
offset
=
send_numel
*
id
;
auto
map
=
distributed
::
ProcessGroupMapFromGid
::
getInstance
();
if
(
map
->
has
(
rid
))
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录