Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
c31da789
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c31da789
编写于
2月 28, 2019
作者:
J
jerrywgz
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refine code, test=develop
上级
e8a8fe07
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
15 addition
and
7 deletion
+15
-7
paddle/fluid/operators/detection/distribute_fpn_proposals_op.cu
.../fluid/operators/detection/distribute_fpn_proposals_op.cu
+15
-7
未找到文件。
paddle/fluid/operators/detection/distribute_fpn_proposals_op.cu
浏览文件 @
c31da789
/* Copyright (c) 201
6
PaddlePaddle Authors. All Rights Reserved.
/* Copyright (c) 201
9
PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
you may not use this file except in compliance with the License.
...
@@ -47,7 +47,7 @@ static inline int NumBlocks(const int N) {
...
@@ -47,7 +47,7 @@ static inline int NumBlocks(const int N) {
kNumMaxinumNumBlocks
);
kNumMaxinumNumBlocks
);
}
}
static
inline
void
transform_lod
(
const
int
*
length_lod
,
const
int
lod_size
,
static
inline
void
TransLoD
(
const
int
*
length_lod
,
const
int
lod_size
,
int
*
offset_lod
)
{
int
*
offset_lod
)
{
int
offset
=
0
;
int
offset
=
0
;
for
(
int
i
=
0
;
i
<
lod_size
;
++
i
)
{
for
(
int
i
=
0
;
i
<
lod_size
;
++
i
)
{
...
@@ -75,7 +75,7 @@ static __device__ inline T RoIArea(const T* box, bool normalized) {
...
@@ -75,7 +75,7 @@ static __device__ inline T RoIArea(const T* box, bool normalized) {
}
}
template
<
class
T
>
template
<
class
T
>
static
__global__
void
GPUDist
ribute
Helper
(
static
__global__
void
GPUDist
FpnProposals
Helper
(
const
int
nthreads
,
const
T
*
rois
,
const
int
lod_size
,
const
int
nthreads
,
const
T
*
rois
,
const
int
lod_size
,
const
int
refer_level
,
const
int
refer_scale
,
const
int
max_level
,
const
int
refer_level
,
const
int
refer_scale
,
const
int
max_level
,
const
int
min_level
,
int
*
roi_batch_id_data
,
int
*
sub_lod_list
,
const
int
min_level
,
int
*
roi_batch_id_data
,
int
*
sub_lod_list
,
...
@@ -83,11 +83,13 @@ static __global__ void GPUDistributeHelper(
...
@@ -83,11 +83,13 @@ static __global__ void GPUDistributeHelper(
CUDA_1D_KERNEL_LOOP
(
i
,
nthreads
)
{
CUDA_1D_KERNEL_LOOP
(
i
,
nthreads
)
{
const
T
*
offset_roi
=
rois
+
i
*
BBoxSize
;
const
T
*
offset_roi
=
rois
+
i
*
BBoxSize
;
int
roi_batch_ind
=
roi_batch_id_data
[
i
];
int
roi_batch_ind
=
roi_batch_id_data
[
i
];
// get the target level of current rois
T
roi_area
=
RoIArea
(
offset_roi
,
false
);
T
roi_area
=
RoIArea
(
offset_roi
,
false
);
T
roi_scale
=
sqrt
(
roi_area
);
T
roi_scale
=
sqrt
(
roi_area
);
int
tgt_lvl
=
floor
(
log2
(
roi_scale
/
refer_scale
)
+
refer_level
);
int
tgt_lvl
=
floor
(
log2
(
roi_scale
/
refer_scale
)
+
refer_level
);
tgt_lvl
=
min
(
max_level
,
max
(
tgt_lvl
,
min_level
));
tgt_lvl
=
min
(
max_level
,
max
(
tgt_lvl
,
min_level
));
target_lvls
[
i
]
=
tgt_lvl
;
target_lvls
[
i
]
=
tgt_lvl
;
// compute number of rois in the same batch and same target level
platform
::
CudaAtomicAdd
(
sub_lod_list
+
tgt_lvl
*
lod_size
+
roi_batch_ind
,
platform
::
CudaAtomicAdd
(
sub_lod_list
+
tgt_lvl
*
lod_size
+
roi_batch_ind
,
1
);
1
);
}
}
...
@@ -118,6 +120,7 @@ class GPUDistributeFpnProposalsOpKernel : public framework::OpKernel<T> {
...
@@ -118,6 +120,7 @@ class GPUDistributeFpnProposalsOpKernel : public framework::OpKernel<T> {
auto
&
dev_ctx
=
ctx
.
template
device_context
<
DeviceContext
>();
auto
&
dev_ctx
=
ctx
.
template
device_context
<
DeviceContext
>();
// get batch id by lod in CPU
Tensor
roi_batch_id_list
;
Tensor
roi_batch_id_list
;
roi_batch_id_list
.
Resize
({
roi_num
});
roi_batch_id_list
.
Resize
({
roi_num
});
int
*
roi_batch_id_data
=
int
*
roi_batch_id_data
=
...
@@ -127,6 +130,7 @@ class GPUDistributeFpnProposalsOpKernel : public framework::OpKernel<T> {
...
@@ -127,6 +130,7 @@ class GPUDistributeFpnProposalsOpKernel : public framework::OpKernel<T> {
roi_batch_id_data
[
i
]
=
n
;
roi_batch_id_data
[
i
]
=
n
;
}
}
}
}
// copy batch id list to GPU
Tensor
roi_batch_id_list_gpu
;
Tensor
roi_batch_id_list_gpu
;
framework
::
TensorCopySync
(
roi_batch_id_list
,
dev_ctx
.
GetPlace
(),
framework
::
TensorCopySync
(
roi_batch_id_list
,
dev_ctx
.
GetPlace
(),
&
roi_batch_id_list_gpu
);
&
roi_batch_id_list_gpu
);
...
@@ -140,7 +144,9 @@ class GPUDistributeFpnProposalsOpKernel : public framework::OpKernel<T> {
...
@@ -140,7 +144,9 @@ class GPUDistributeFpnProposalsOpKernel : public framework::OpKernel<T> {
int
blocks
=
NumBlocks
(
roi_num
);
int
blocks
=
NumBlocks
(
roi_num
);
int
threads
=
kNumCUDAThreads
;
int
threads
=
kNumCUDAThreads
;
GPUDistributeHelper
<
T
><<<
blocks
,
threads
>>>
(
// get target levels and sub_lod list
GPUDistFpnProposalsHelper
<
T
><<<
blocks
,
threads
>>>
(
roi_num
,
fpn_rois
->
data
<
T
>
(),
lod_size
,
refer_level
,
refer_scale
,
roi_num
,
fpn_rois
->
data
<
T
>
(),
lod_size
,
refer_level
,
refer_scale
,
max_level
,
min_level
,
roi_batch_id_list_gpu
.
data
<
int
>
(),
max_level
,
min_level
,
roi_batch_id_list_gpu
.
data
<
int
>
(),
sub_lod_list_data
,
target_lvls_data
);
sub_lod_list_data
,
target_lvls_data
);
...
@@ -166,13 +172,14 @@ class GPUDistributeFpnProposalsOpKernel : public framework::OpKernel<T> {
...
@@ -166,13 +172,14 @@ class GPUDistributeFpnProposalsOpKernel : public framework::OpKernel<T> {
memory
::
Allocator
::
kScratchpad
);
memory
::
Allocator
::
kScratchpad
);
// Run sorting operation
// Run sorting operation
// sort target level to get corresponding index
cub
::
DeviceRadixSort
::
SortPairsDescending
<
int
,
int
>
(
cub
::
DeviceRadixSort
::
SortPairsDescending
<
int
,
int
>
(
d_temp_storage
->
ptr
(),
temp_storage_bytes
,
target_lvls_data
,
keys_out
,
d_temp_storage
->
ptr
(),
temp_storage_bytes
,
target_lvls_data
,
keys_out
,
idx_in
,
idx_out
,
roi_num
);
idx_in
,
idx_out
,
roi_num
);
int
*
restore_idx_data
=
int
*
restore_idx_data
=
restore_index
->
mutable_data
<
int
>
({
roi_num
,
1
},
dev_ctx
.
GetPlace
());
restore_index
->
mutable_data
<
int
>
({
roi_num
,
1
},
dev_ctx
.
GetPlace
());
// sort current index to get restore index
cub
::
DeviceRadixSort
::
SortPairsDescending
<
int
,
int
>
(
cub
::
DeviceRadixSort
::
SortPairsDescending
<
int
,
int
>
(
d_temp_storage
->
ptr
(),
temp_storage_bytes
,
idx_out
,
keys_out
,
idx_in
,
d_temp_storage
->
ptr
(),
temp_storage_bytes
,
idx_out
,
keys_out
,
idx_in
,
restore_idx_data
,
roi_num
);
restore_idx_data
,
roi_num
);
...
@@ -183,7 +190,8 @@ class GPUDistributeFpnProposalsOpKernel : public framework::OpKernel<T> {
...
@@ -183,7 +190,8 @@ class GPUDistributeFpnProposalsOpKernel : public framework::OpKernel<T> {
for
(
int
i
=
0
;
i
<
num_level
;
++
i
)
{
for
(
int
i
=
0
;
i
<
num_level
;
++
i
)
{
Tensor
sub_lod
=
sub_lod_list
.
Slice
(
i
,
i
+
1
);
Tensor
sub_lod
=
sub_lod_list
.
Slice
(
i
,
i
+
1
);
int
*
sub_lod_data
=
sub_lod
.
data
<
int
>
();
int
*
sub_lod_data
=
sub_lod
.
data
<
int
>
();
transform_lod
(
sub_lod_data
,
lod_size
+
1
,
offset_lod_data
);
// transfer length-based lod to offset-based lod
TransLoD
(
sub_lod_data
,
lod_size
+
1
,
offset_lod_data
);
int
sub_rois_num
=
offset_lod_data
[
lod_size
];
int
sub_rois_num
=
offset_lod_data
[
lod_size
];
Tensor
sub_idx
=
index_out_t
.
Slice
(
0
,
sub_rois_num
);
Tensor
sub_idx
=
index_out_t
.
Slice
(
0
,
sub_rois_num
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录