Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
8368e55b
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8368e55b
编写于
12月 04, 2017
作者:
S
sweetsky0901
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
modify some doc
上级
531e7b6f
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
38 addition
and
32 deletion
+38
-32
paddle/operators/spp_op.cc
paddle/operators/spp_op.cc
+2
-2
paddle/operators/spp_op.h
paddle/operators/spp_op.h
+24
-23
python/paddle/v2/fluid/tests/test_spp_op.py
python/paddle/v2/fluid/tests/test_spp_op.py
+12
-7
未找到文件。
paddle/operators/spp_op.cc
浏览文件 @
8368e55b
...
...
@@ -29,7 +29,7 @@ class SppOpMaker : public framework::OpProtoAndCheckerMaker {
"(Tensor) The output tensor of spp operator."
"N * M."
"M = C * H * W"
);
AddAttr
<
int
>
(
"pyramid_height"
,
"int"
);
AddAttr
<
int
>
(
"pyramid_height"
,
"int"
,
"multi level pooling"
);
AddComment
(
R"DOC(
"Does spatial pyramid pooling on the input image by taking the max,
etc. within regions so that the result vector of different sized
...
...
@@ -39,7 +39,7 @@ class SppOpMaker : public framework::OpProtoAndCheckerMaker {
Where
$$
H_{out} = N \\
W_{out} = ((
std::pow(4, pyramid_height) - 1) / (4 - 1))
* C_{in}
W_{out} = ((
(4^pyramid_height) - 1) / (4 - 1))$
* C_{in}
$$
)DOC"
);
}
...
...
paddle/operators/spp_op.h
浏览文件 @
8368e55b
...
...
@@ -34,27 +34,27 @@ class SppKernel : public framework::OpKernel<T> {
size_t
output_offset
=
0
;
for
(
int
p
=
0
;
p
<
pyramid_height
;
++
p
)
{
int
bins
=
std
::
pow
(
2
,
p
);
int
ksize_h
=
std
::
ceil
(
input_h
/
static_cast
<
double
>
(
bins
));
int
ksize_w
=
std
::
ceil
(
input_w
/
static_cast
<
double
>
(
bins
));
int
padding_h
=
(
ksize_h
*
bins
-
input_h
+
1
)
/
2
;
int
padding_w
=
(
ksize_w
*
bins
-
input_w
+
1
)
/
2
;
std
::
vector
<
int
>
k
size
({
ksize_h
,
k
size_w
});
std
::
vector
<
int
>
strides
({
k
size_h
,
k
size_w
});
int
k
ernel_
size_h
=
std
::
ceil
(
input_h
/
static_cast
<
double
>
(
bins
));
int
k
ernel_
size_w
=
std
::
ceil
(
input_w
/
static_cast
<
double
>
(
bins
));
int
padding_h
=
(
k
ernel_
size_h
*
bins
-
input_h
+
1
)
/
2
;
int
padding_w
=
(
k
ernel_
size_w
*
bins
-
input_w
+
1
)
/
2
;
std
::
vector
<
int
>
k
ernel_size
({
kernel_size_h
,
kernel_
size_w
});
std
::
vector
<
int
>
strides
({
k
ernel_size_h
,
kernel_
size_w
});
std
::
vector
<
int
>
paddings
({
padding_h
,
padding_w
});
// pooling output shape
framework
::
Tensor
out_level
;
std
::
vector
<
int64_t
>
output_shape_vec
({
in_x
->
dims
()[
0
],
in_x
->
dims
()[
1
]});
output_shape_vec
.
push_back
(
(
input_h
-
ksize_h
+
2
*
padding_h
)
/
ksize_h
+
1
);
output_shape_vec
.
push_back
(
(
input_w
-
ksize_w
+
2
*
padding_w
)
/
ksize_w
+
1
);
output_shape_vec
.
push_back
(
(
input_h
-
kernel_size_h
+
2
*
padding_h
)
/
kernel_size_h
+
1
);
output_shape_vec
.
push_back
(
(
input_w
-
kernel_size_w
+
2
*
padding_w
)
/
kernel_size_w
+
1
);
framework
::
DDim
output_shape
(
framework
::
make_ddim
(
output_shape_vec
));
out_level
.
mutable_data
<
T
>
(
output_shape
,
context
.
GetPlace
());
// pooling
math
::
Pool2dFunctor
<
Place
,
math
::
MaxPool
<
T
>
,
T
>
pool_forward
;
math
::
MaxPool
<
T
>
max_process
;
pool_forward
(
context
.
device_context
(),
*
in_x
,
k
size
,
strides
,
padding
s
,
max_process
,
&
out_level
);
pool_forward
(
context
.
device_context
(),
*
in_x
,
k
ernel_size
,
stride
s
,
paddings
,
max_process
,
&
out_level
);
// flatten pooling output shape
framework
::
Tensor
out_flatten_level
;
int
output_flatten_w
=
in_x
->
dims
()[
1
]
*
bins
*
bins
;
...
...
@@ -96,12 +96,12 @@ class SppGradKernel : public framework::OpKernel<T> {
size_t
out_offset
=
0
;
for
(
int
p
=
0
;
p
<
pyramid_height
;
++
p
)
{
int
bins
=
std
::
pow
(
2
,
p
);
int
ksize_h
=
std
::
ceil
(
input_h
/
static_cast
<
double
>
(
bins
));
int
ksize_w
=
std
::
ceil
(
input_w
/
static_cast
<
double
>
(
bins
));
int
padding_h
=
(
ksize_h
*
bins
-
input_h
+
1
)
/
2
;
int
padding_w
=
(
ksize_w
*
bins
-
input_w
+
1
)
/
2
;
std
::
vector
<
int
>
k
size
({
ksize_h
,
k
size_w
});
std
::
vector
<
int
>
strides
({
k
size_h
,
k
size_w
});
int
k
ernel_
size_h
=
std
::
ceil
(
input_h
/
static_cast
<
double
>
(
bins
));
int
k
ernel_
size_w
=
std
::
ceil
(
input_w
/
static_cast
<
double
>
(
bins
));
int
padding_h
=
(
k
ernel_
size_h
*
bins
-
input_h
+
1
)
/
2
;
int
padding_w
=
(
k
ernel_
size_w
*
bins
-
input_w
+
1
)
/
2
;
std
::
vector
<
int
>
k
ernel_size
({
kernel_size_h
,
kernel_
size_w
});
std
::
vector
<
int
>
strides
({
k
ernel_size_h
,
kernel_
size_w
});
std
::
vector
<
int
>
paddings
({
padding_h
,
padding_w
});
// split out and outgrad ... to flatten
framework
::
Tensor
out_flatten_level
;
...
...
@@ -129,10 +129,10 @@ class SppGradKernel : public framework::OpKernel<T> {
framework
::
Tensor
out_level
;
framework
::
Tensor
outgrad_level
;
std
::
vector
<
int64_t
>
out_shape_vec
({
in_x
->
dims
()[
0
],
in_x
->
dims
()[
1
]});
out_shape_vec
.
push_back
(
(
input_h
-
ksize_h
+
2
*
padding_h
)
/
ksize_h
+
1
);
out_shape_vec
.
push_back
(
(
input_w
-
ksize_w
+
2
*
padding_w
)
/
ksize_w
+
1
);
out_shape_vec
.
push_back
(
(
input_h
-
kernel_size_h
+
2
*
padding_h
)
/
kernel_size_h
+
1
);
out_shape_vec
.
push_back
(
(
input_w
-
kernel_size_w
+
2
*
padding_w
)
/
kernel_size_w
+
1
);
framework
::
DDim
out_shape
(
framework
::
make_ddim
(
out_shape_vec
));
out_level
.
ShareDataWith
(
out_flatten_level
);
out_level
.
Resize
(
out_shape
);
...
...
@@ -141,7 +141,8 @@ class SppGradKernel : public framework::OpKernel<T> {
// pooling backward
math
::
MaxPool2dGradFunctor
<
Place
,
T
>
pool2d_backward
;
pool2d_backward
(
context
.
device_context
(),
*
in_x
,
*&
out_level
,
*&
outgrad_level
,
ksize
,
strides
,
paddings
,
in_x_grad
);
*&
outgrad_level
,
kernel_size
,
strides
,
paddings
,
in_x_grad
);
}
}
};
...
...
python/paddle/v2/fluid/tests/test_spp_op.py
浏览文件 @
8368e55b
...
...
@@ -13,14 +13,19 @@ class TestSppOp(OpTest):
out_level_flatten
=
[]
for
i
in
xrange
(
self
.
pyramid_height
):
bins
=
np
.
power
(
2
,
i
)
ksize
=
[
0
,
0
]
k
ernel_
size
=
[
0
,
0
]
padding
=
[
0
,
0
]
ksize
[
0
]
=
np
.
ceil
(
hsize
/
bins
.
astype
(
"double"
)).
astype
(
"int32"
)
padding
[
0
]
=
((
ksize
[
0
]
*
bins
-
hsize
+
1
)
/
2
).
astype
(
"int32"
)
ksize
[
1
]
=
np
.
ceil
(
wsize
/
bins
.
astype
(
"double"
)).
astype
(
"int32"
)
padding
[
1
]
=
((
ksize
[
1
]
*
bins
-
wsize
+
1
)
/
2
).
astype
(
"int32"
)
out_level
=
max_pool2D_forward_naive
(
input
,
ksize
,
ksize
,
padding
)
kernel_size
[
0
]
=
np
.
ceil
(
hsize
/
bins
.
astype
(
"double"
)).
astype
(
"int32"
)
padding
[
0
]
=
(
(
kernel_size
[
0
]
*
bins
-
hsize
+
1
)
/
2
).
astype
(
"int32"
)
kernel_size
[
1
]
=
np
.
ceil
(
wsize
/
bins
.
astype
(
"double"
)).
astype
(
"int32"
)
padding
[
1
]
=
(
(
kernel_size
[
1
]
*
bins
-
wsize
+
1
)
/
2
).
astype
(
"int32"
)
out_level
=
max_pool2D_forward_naive
(
input
,
kernel_size
,
kernel_size
,
padding
)
out_level_flatten
.
append
(
out_level
.
reshape
(
nsize
,
bins
*
bins
*
csize
))
if
i
==
0
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录