Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
17c8e3ad
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
17c8e3ad
编写于
12月 18, 2020
作者:
A
Aurelius84
提交者:
GitHub
12月 18, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish code in gpu_launch_config.h (#29730)
上级
068d905e
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
21 addition
and
20 deletion
+21
-20
paddle/fluid/platform/gpu_launch_config.h
paddle/fluid/platform/gpu_launch_config.h
+21
-20
未找到文件。
paddle/fluid/platform/gpu_launch_config.h
100755 → 100644
浏览文件 @
17c8e3ad
...
@@ -37,19 +37,20 @@ struct GpuLaunchConfig {
...
@@ -37,19 +37,20 @@ struct GpuLaunchConfig {
inline
GpuLaunchConfig
GetGpuLaunchConfig1D
(
inline
GpuLaunchConfig
GetGpuLaunchConfig1D
(
const
platform
::
CUDADeviceContext
&
context
,
int
element_count
)
{
const
platform
::
CUDADeviceContext
&
context
,
int
element_count
)
{
PADDLE_ENFORCE_GT
(
element_count
,
0
,
platform
::
errors
::
InvalidArgument
(
PADDLE_ENFORCE_GT
(
element_count
,
0
,
"element count should greater than 0,"
platform
::
errors
::
InvalidArgument
(
" but received value is %d."
,
"element count should be greater than 0,"
element_count
));
" but received value is: %d."
,
element_count
));
const
int
theory_thread_count
=
element_count
;
const
int
theory_thread_count
=
element_count
;
// Get Max threads in all SM
// Get Max threads in all SM
int
max_p
yh
sical_threads
=
context
.
GetMaxPhysicalThreadCount
();
int
max_p
hy
sical_threads
=
context
.
GetMaxPhysicalThreadCount
();
int
sm
=
context
.
GetSMCount
();
int
sm
=
context
.
GetSMCount
();
// Compute p
yh
sical threads we need, should small than max sm threads
// Compute p
hy
sical threads we need, should small than max sm threads
const
int
physical_thread_count
=
const
int
physical_thread_count
=
std
::
min
(
max_p
yh
sical_threads
,
theory_thread_count
);
std
::
min
(
max_p
hy
sical_threads
,
theory_thread_count
);
// Need get from device
// Need get from device
const
int
thread_per_block
=
std
::
min
(
1024
,
context
.
GetMaxThreadsPerBlock
());
const
int
thread_per_block
=
std
::
min
(
1024
,
context
.
GetMaxThreadsPerBlock
());
...
@@ -64,18 +65,18 @@ inline GpuLaunchConfig GetGpuLaunchConfig1D(
...
@@ -64,18 +65,18 @@ inline GpuLaunchConfig GetGpuLaunchConfig1D(
}
}
inline
GpuLaunchConfig
GetGpuLaunchConfig2D
(
inline
GpuLaunchConfig
GetGpuLaunchConfig2D
(
const
platform
::
CUDADeviceContext
&
context
,
int
x
dim
,
int
y
dim
)
{
const
platform
::
CUDADeviceContext
&
context
,
int
x
_dim
,
int
y_
dim
)
{
PADDLE_ENFORCE_GT
(
xdim
,
0
,
platform
::
errors
::
InvalidArgument
(
PADDLE_ENFORCE_GT
(
x
_
dim
,
0
,
platform
::
errors
::
InvalidArgument
(
"x dim number should greater than 0,"
"x dim number should greater than 0,"
" but received value is:
%d"
,
" but received value is:
%d"
,
x
dim
));
x_
dim
));
PADDLE_ENFORCE_GT
(
ydim
,
0
,
platform
::
errors
::
InvalidArgument
(
PADDLE_ENFORCE_GT
(
y
_
dim
,
0
,
platform
::
errors
::
InvalidArgument
(
"y dim number should greater than 0,"
"y dim number should greater than 0,"
" but received value is:
%d"
,
" but received value is:
%d"
,
y
dim
));
y_
dim
));
const
int
kThreadsPerBlock
=
256
;
const
int
kThreadsPerBlock
=
256
;
int
block_cols
=
std
::
min
(
xdim
,
kThreadsPerBlock
);
int
block_cols
=
std
::
min
(
x
_
dim
,
kThreadsPerBlock
);
int
block_rows
=
std
::
max
(
kThreadsPerBlock
/
block_cols
,
1
);
int
block_rows
=
std
::
max
(
kThreadsPerBlock
/
block_cols
,
1
);
int
max_physical_threads
=
context
.
GetMaxPhysicalThreadCount
();
int
max_physical_threads
=
context
.
GetMaxPhysicalThreadCount
();
...
@@ -83,11 +84,11 @@ inline GpuLaunchConfig GetGpuLaunchConfig2D(
...
@@ -83,11 +84,11 @@ inline GpuLaunchConfig GetGpuLaunchConfig2D(
GpuLaunchConfig
config
;
GpuLaunchConfig
config
;
// Noticed, block size is not align to 32, if needed do it yourself.
// Noticed, block size is not align to 32, if needed do it yourself.
config
.
theory_thread_count
=
dim3
(
x
dim
,
y
dim
,
1
);
config
.
theory_thread_count
=
dim3
(
x
_dim
,
y_
dim
,
1
);
config
.
thread_per_block
=
dim3
(
block_cols
,
block_rows
,
1
);
config
.
thread_per_block
=
dim3
(
block_cols
,
block_rows
,
1
);
int
grid_x
=
std
::
min
(
DivUp
(
xdim
,
block_cols
),
max_blocks
);
int
grid_x
=
std
::
min
(
DivUp
(
x
_
dim
,
block_cols
),
max_blocks
);
int
grid_y
=
std
::
min
(
max_blocks
/
grid_x
,
std
::
max
(
ydim
/
block_rows
,
1
));
int
grid_y
=
std
::
min
(
max_blocks
/
grid_x
,
std
::
max
(
y
_
dim
/
block_rows
,
1
));
config
.
block_per_grid
=
dim3
(
grid_x
,
grid_y
,
1
);
config
.
block_per_grid
=
dim3
(
grid_x
,
grid_y
,
1
);
return
config
;
return
config
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录