Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
ea093283
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ea093283
编写于
12月 13, 2017
作者:
S
sweetsky0901
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
for code review by zhaolong
上级
89de58d9
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
55 addition
and
55 deletion
+55
-55
paddle/operators/spp_op.cc
paddle/operators/spp_op.cc
+16
-8
paddle/operators/spp_op.cu.cc
paddle/operators/spp_op.cu.cc
+6
-5
paddle/operators/spp_op.h
paddle/operators/spp_op.h
+33
-42
未找到文件。
paddle/operators/spp_op.cc
浏览文件 @
ea093283
...
...
@@ -31,9 +31,15 @@ class SppOpMaker : public framework::OpProtoAndCheckerMaker {
"M = C * H * W"
);
AddAttr
<
int
>
(
"pyramid_height"
,
"(int), multi level pooling"
);
AddComment
(
R"DOC(
"Does spatial pyramid pooling on the input image by taking the max,
etc. within regions so that the result vector of different sized
images are of the same size
"With spatial pyramid pooling, the input image can
be of any sizes. This not only allows arbitrary aspect
ratios, but also allows arbitrary scales. We can resize
the input image to any scale (e.g., min(w, h)=180, 224,
...) and apply the same deep network. When the
input image is at different scales, the network (with
the same filter sizes) will extract features at different
scales. The scales play important roles in traditional
methods.
Input shape: $(N, C_{in}, H_{in}, W_{in})$
Output shape: $(H_{out}, W_{out})$
Where
...
...
@@ -41,6 +47,7 @@ class SppOpMaker : public framework::OpProtoAndCheckerMaker {
H_{out} = N \\
W_{out} = (((4^pyramid_height) - 1) / (4 - 1))$ * C_{in}
$$
paper https://arxiv.org/pdf/1406.4729v4.pdf
)DOC"
);
}
};
...
...
@@ -79,8 +86,9 @@ class SppOpGrad : public framework::OperatorWithKernel {
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
spp
,
ops
::
SppOp
,
ops
::
SppOpMaker
,
spp_grad
,
ops
::
SppOpGrad
);
REGISTER_OP_CPU_KERNEL
(
spp
,
ops
::
SppKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
SppKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
);
REGISTER_OP_CPU_KERNEL
(
spp_grad
,
ops
::
SppGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
SppGradKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
);
REGISTER_OP_CPU_KERNEL
(
spp
,
ops
::
SppKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
SppKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
);
REGISTER_OP_CPU_KERNEL
(
spp_grad
,
ops
::
SppGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
SppGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
);
paddle/operators/spp_op.cu.cc
浏览文件 @
ea093283
...
...
@@ -15,8 +15,9 @@ limitations under the License. */
#include "paddle/operators/spp_op.h"
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_GPU_KERNEL
(
spp
,
ops
::
SppKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
,
ops
::
SppKernel
<
paddle
::
platform
::
GPUPlace
,
double
>
);
REGISTER_OP_GPU_KERNEL
(
spp_grad
,
ops
::
SppGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
,
ops
::
SppGradKernel
<
paddle
::
platform
::
GPUPlace
,
double
>
);
REGISTER_OP_CUDA_KERNEL
(
spp
,
ops
::
SppKernel
<
paddle
::
platform
::
CUDADeviceContext
,
float
>
,
ops
::
SppKernel
<
paddle
::
platform
::
CUDADeviceContext
,
double
>
);
REGISTER_OP_CUDA_KERNEL
(
spp_grad
,
ops
::
SppGradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
float
>
,
ops
::
SppGradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
double
>
);
paddle/operators/spp_op.h
浏览文件 @
ea093283
...
...
@@ -20,7 +20,7 @@ limitations under the License. */
namespace
paddle
{
namespace
operators
{
template
<
typename
Place
,
typename
T
>
template
<
typename
DeviceContext
,
typename
T
>
class
SppKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
...
...
@@ -43,39 +43,32 @@ class SppKernel : public framework::OpKernel<T> {
std
::
vector
<
int
>
paddings
({
padding_h
,
padding_w
});
// pooling output shape
framework
::
Tensor
out_level
;
std
::
vector
<
int64_t
>
output_shape_vec
({
in_x
->
dims
()[
0
],
in_x
->
dims
()[
1
]});
output_shape_vec
.
push_back
(
(
input_h
-
kernel_size_h
+
2
*
padding_h
)
/
kernel_size_h
+
1
);
output_shape_vec
.
push_back
(
(
input_w
-
kernel_size_w
+
2
*
padding_w
)
/
kernel_size_w
+
1
);
std
::
vector
<
int64_t
>
output_shape_vec
(
{
in_x
->
dims
()[
0
],
in_x
->
dims
()[
1
],
bins
,
bins
});
framework
::
DDim
output_shape
(
framework
::
make_ddim
(
output_shape_vec
));
out_level
.
mutable_data
<
T
>
(
output_shape
,
context
.
GetPlace
());
// pooling
math
::
Pool2dFunctor
<
Place
,
math
::
MaxPool
<
T
>
,
T
>
pool_forward
;
math
::
Pool2dFunctor
<
DeviceContext
,
math
::
MaxPool
<
T
>
,
T
>
pool_forward
;
math
::
MaxPool
<
T
>
max_process
;
pool_forward
(
context
.
device_context
(),
*
in_x
,
kernel_size
,
strides
,
paddings
,
max_process
,
&
out_level
);
pool_forward
(
context
.
template
device_context
<
DeviceContext
>(),
*
in_x
,
kernel_size
,
strides
,
paddings
,
max_process
,
&
out_level
);
// flatten pooling output shape
framework
::
Tensor
out_flatten_level
;
int
output_flatten_w
=
in_x
->
dims
()[
1
]
*
bins
*
bins
;
std
::
vector
<
int64_t
>
output_flatten_shape_vec
(
{
in_x
->
dims
()[
0
],
output_flatten_w
});
framework
::
DDim
output_flatten_shape
(
framework
::
make_ddim
(
output_flatten_shape_vec
));
out_flatten_level
.
ShareDataWith
(
out_level
);
out_flatten_level
.
Resize
(
output_flatten_shape
);
out_level
.
Resize
(
output_flatten_shape
);
// concat
auto
out_flatten_level_stride
=
framework
::
stride
(
out_flatten_level
.
dims
());
StridedMemcpy
<
T
>
(
context
.
device_context
(),
out_flatten_level
.
data
<
T
>
(),
out_flatten_level_stride
,
out_flatten_level
.
dims
(),
auto
out_level_stride
=
framework
::
stride
(
out_level
.
dims
());
StridedMemcpy
<
T
>
(
context
.
template
device_context
<
DeviceContext
>(),
out_level
.
data
<
T
>
(),
out_level_stride
,
out_level
.
dims
(),
out_stride
,
out
->
data
<
T
>
()
+
output_offset
);
output_offset
+=
out_flatten_level
.
dims
()[
1
]
*
out_flatten_level_stride
[
1
];
output_offset
+=
out_level
.
dims
()[
1
]
*
out_level_stride
[
1
];
}
}
};
template
<
typename
Place
,
typename
T
>
template
<
typename
DeviceContext
,
typename
T
>
class
SppGradKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
...
...
@@ -86,8 +79,8 @@ class SppGradKernel : public framework::OpKernel<T> {
framework
::
Tensor
*
in_x_grad
=
context
.
Output
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"X"
));
int
pyramid_height
=
context
.
template
Attr
<
int
>(
"pyramid_height"
);
auto
&
device_ctx
=
context
.
device_context
();
math
::
SetConstant
<
Place
,
T
>
zero
;
auto
&
device_ctx
=
context
.
template
device_context
<
DeviceContext
>
();
math
::
SetConstant
<
DeviceContext
,
T
>
zero
;
in_x_grad
->
mutable_data
<
T
>
(
context
.
GetPlace
());
zero
(
device_ctx
,
in_x_grad
,
static_cast
<
T
>
(
0
));
auto
out_stride
=
framework
::
stride
(
out
->
dims
());
...
...
@@ -104,45 +97,43 @@ class SppGradKernel : public framework::OpKernel<T> {
std
::
vector
<
int
>
strides
({
kernel_size_h
,
kernel_size_w
});
std
::
vector
<
int
>
paddings
({
padding_h
,
padding_w
});
// split out and outgrad ... to flatten
framework
::
Tensor
out_
flatten_
level
;
framework
::
Tensor
outgrad_
flatten_
level
;
framework
::
Tensor
out_level
;
framework
::
Tensor
outgrad_level
;
int
out_flatten_w
=
in_x
->
dims
()[
1
]
*
bins
*
bins
;
std
::
vector
<
int64_t
>
out_flatten_shape_vec
(
{
in_x
->
dims
()[
0
],
out_flatten_w
});
framework
::
DDim
out_flatten_shape
(
framework
::
make_ddim
(
out_flatten_shape_vec
));
out_flatten_level
.
mutable_data
<
T
>
(
out_flatten_shape
,
context
.
GetPlace
());
outgrad_flatten_level
.
mutable_data
<
T
>
(
out_flatten_shape
,
context
.
GetPlace
());
auto
flatten_stride
=
framework
::
stride
(
out_flatten_level
.
dims
());
out_level
.
mutable_data
<
T
>
(
out_flatten_shape
,
context
.
GetPlace
());
outgrad_level
.
mutable_data
<
T
>
(
out_flatten_shape
,
context
.
GetPlace
());
auto
flatten_stride
=
framework
::
stride
(
out_level
.
dims
());
// memcpy
StridedMemcpy
<
T
>
(
context
.
device_context
(),
out
->
data
<
T
>
()
+
out_offset
,
out
_stride
,
out_flatten_level
.
dims
(),
flatten
_stride
,
out_
flatten
_level
.
data
<
T
>
());
StridedMemcpy
<
T
>
(
context
.
template
device_context
<
DeviceContext
>()
,
out
->
data
<
T
>
()
+
out_offset
,
out
_stride
,
out_
level
.
dims
(),
flatten_stride
,
out
_level
.
data
<
T
>
());
StridedMemcpy
<
T
>
(
context
.
device_context
(),
StridedMemcpy
<
T
>
(
context
.
template
device_context
<
DeviceContext
>
(),
out_grad
->
data
<
T
>
()
+
out_offset
,
out_stride
,
outgrad_
flatten_
level
.
dims
(),
flatten_stride
,
outgrad_
flatten_
level
.
data
<
T
>
());
out_offset
+=
out_
flatten_
level
.
dims
()[
1
]
*
out_stride
[
1
];
outgrad_level
.
dims
(),
flatten_stride
,
outgrad_level
.
data
<
T
>
());
out_offset
+=
out_level
.
dims
()[
1
]
*
out_stride
[
1
];
// flatten backward to nchw
framework
::
Tensor
out_level
;
framework
::
Tensor
outgrad_level
;
std
::
vector
<
int64_t
>
out_shape_vec
({
in_x
->
dims
()[
0
],
in_x
->
dims
()[
1
]});
out_shape_vec
.
push_back
(
(
input_h
-
kernel_size_h
+
2
*
padding_h
)
/
kernel_size_h
+
1
);
out_shape_vec
.
push_back
(
(
input_w
-
kernel_size_w
+
2
*
padding_w
)
/
kernel_size_w
+
1
);
framework
::
DDim
out_shape
(
framework
::
make_ddim
(
out_shape_vec
));
out_level
.
ShareDataWith
(
out_
flatten_
level
);
out_level
.
ShareDataWith
(
out_level
);
out_level
.
Resize
(
out_shape
);
outgrad_level
.
ShareDataWith
(
outgrad_
flatten_
level
);
outgrad_level
.
ShareDataWith
(
outgrad_level
);
outgrad_level
.
Resize
(
out_shape
);
// pooling backward
math
::
MaxPool2dGradFunctor
<
Place
,
T
>
pool2d_backward
;
pool2d_backward
(
context
.
device_context
(),
*
in_x
,
*&
out_level
,
*&
out
grad_level
,
kernel_size
,
strides
,
padding
s
,
in_x_grad
);
math
::
MaxPool2dGradFunctor
<
DeviceContext
,
T
>
pool2d_backward
;
pool2d_backward
(
context
.
template
device_context
<
DeviceContext
>(),
*
in_x
,
*&
out
_level
,
*&
outgrad_level
,
kernel_size
,
stride
s
,
paddings
,
in_x_grad
);
}
}
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录