Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
edff59b1
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
edff59b1
编写于
6月 24, 2022
作者:
W
wawltor
提交者:
GitHub
6月 24, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[cherry-pick] fix the cumsum big shape and random bug (#43777)
上级
e700ffdc
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
15 addition
and
13 deletion
+15
-13
paddle/phi/kernels/gpu/cumsum_kernel.cu
paddle/phi/kernels/gpu/cumsum_kernel.cu
+15
-13
未找到文件。
paddle/phi/kernels/gpu/cumsum_kernel.cu
浏览文件 @
edff59b1
...
@@ -152,10 +152,8 @@ __global__ void BlockScanKernel(T* d_out,
...
@@ -152,10 +152,8 @@ __global__ void BlockScanKernel(T* d_out,
}
temp_storage
;
}
temp_storage
;
int
bx
=
blockIdx
.
x
;
int
bx
=
blockIdx
.
x
;
int
by
=
blockIdx
.
y
;
BlockPrefixCallbackOp
<
T
>
prefix_op
(
0
);
BlockPrefixCallbackOp
<
T
>
prefix_op
(
0
);
T
block_aggregate
=
static_cast
<
T
>
(
0
);
// Obtain this block's segment of consecutive keys (blocked across threads)
// Obtain this block's segment of consecutive keys (blocked across threads)
int
item_per_block
=
BLOCK_THREADS
*
ITEMS_PER_THREAD
;
int
item_per_block
=
BLOCK_THREADS
*
ITEMS_PER_THREAD
;
...
@@ -168,7 +166,7 @@ __global__ void BlockScanKernel(T* d_out,
...
@@ -168,7 +166,7 @@ __global__ void BlockScanKernel(T* d_out,
valid_item
=
scan_size
;
valid_item
=
scan_size
;
}
}
int
offset
=
b
x
*
scan_size
+
block_offset
+
by
*
(
inner_size
*
scan_size
)
;
int
offset
=
b
lock_offset
+
bx
*
scan_size
;
T
thread_keys
[
ITEMS_PER_THREAD
];
T
thread_keys
[
ITEMS_PER_THREAD
];
BlockLoadT
(
temp_storage
.
load
)
BlockLoadT
(
temp_storage
.
load
)
...
@@ -260,8 +258,10 @@ void CumsumKernel(const Context& dev_ctx,
...
@@ -260,8 +258,10 @@ void CumsumKernel(const Context& dev_ctx,
dim3
blocks
(
32
,
8
);
dim3
blocks
(
32
,
8
);
dim3
transpose_grids
((
width
+
tile_size
-
1
)
/
tile_size
,
dim3
transpose_grids
((
width
+
tile_size
-
1
)
/
tile_size
,
(
height
+
tile_size
-
1
)
/
tile_size
);
(
height
+
tile_size
-
1
)
/
tile_size
);
out
->
Resize
(
out_dims
);
auto
*
tmp_data
=
out
->
data
<
T
>
();
DenseTensor
tmp_tensor
;
tmp_tensor
.
Resize
(
out_dims
);
auto
*
tmp_data
=
dev_ctx
.
template
Alloc
<
T
>(
&
tmp_tensor
);
T
*
next_in_data
=
out_data
;
T
*
next_in_data
=
out_data
;
T
*
next_out_data
=
tmp_data
;
T
*
next_out_data
=
tmp_data
;
...
@@ -281,6 +281,8 @@ void CumsumKernel(const Context& dev_ctx,
...
@@ -281,6 +281,8 @@ void CumsumKernel(const Context& dev_ctx,
// Consider the size of shared memory, here block size is 128
// Consider the size of shared memory, here block size is 128
dim3
scan_grid
(
outer_size
,
inner_size
);
dim3
scan_grid
(
outer_size
,
inner_size
);
dim3
reverse_grid
=
scan_grid
;
dim3
reverse_grid
=
scan_grid
;
int64_t
grid_size
=
outer_size
*
inner_size
;
if
(
reverse
)
{
if
(
reverse
)
{
if
(
transpose
)
{
if
(
transpose
)
{
reverse_grid
.
x
=
scan_grid
.
y
;
reverse_grid
.
x
=
scan_grid
.
y
;
...
@@ -295,17 +297,17 @@ void CumsumKernel(const Context& dev_ctx,
...
@@ -295,17 +297,17 @@ void CumsumKernel(const Context& dev_ctx,
}
}
}
}
if
(
!
transpose
&&
!
reverse
)
{
if
(
!
transpose
&&
!
reverse
)
{
BlockScanKernel
<
T
,
128
,
4
><<<
scan_grid
,
128
,
0
,
dev_ctx
.
stream
()
>>>
(
BlockScanKernel
<
T
,
128
,
4
><<<
grid_size
,
128
,
0
,
dev_ctx
.
stream
()
>>>
(
out_data
,
in_data
,
outer_size
,
inner_size
,
scan_size
,
exclusive
);
out_data
,
in_data
,
outer_size
,
inner_size
,
scan_size
,
exclusive
);
}
else
{
}
else
{
BlockScanKernel
<
T
,
128
,
4
>
<<<
scan_grid
,
128
,
0
,
dev_ctx
.
stream
()
>>>
(
BlockScanKernel
<
T
,
128
,
4
>
next_out_data
,
<<<
grid_size
,
128
,
0
,
dev_ctx
.
stream
()
>>>
(
next_out_data
,
next_in_data
,
next_in_data
,
outer_size
,
outer_size
,
inner_size
,
inner_size
,
scan_size
,
scan_size
,
exclusive
);
exclusive
);
}
}
swap_ptr
(
next_in_data
,
next_out_data
);
swap_ptr
(
next_in_data
,
next_out_data
);
if
(
reverse
)
{
if
(
reverse
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录