Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
90f664d0
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
90f664d0
编写于
11月 22, 2017
作者:
S
sweetsky0901
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
test unpool ok cpu
上级
822f2834
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
98 addition
and
80 deletion
+98
-80
paddle/operators/CMakeLists.txt
paddle/operators/CMakeLists.txt
+0
-7
paddle/operators/math/unpooling.cc
paddle/operators/math/unpooling.cc
+4
-5
paddle/operators/math/unpooling.cu
paddle/operators/math/unpooling.cu
+2
-2
paddle/operators/unpool_op.cc
paddle/operators/unpool_op.cc
+12
-13
paddle/operators/unpool_op.cu.cc
paddle/operators/unpool_op.cu.cc
+2
-2
paddle/operators/unpool_op.h
paddle/operators/unpool_op.h
+4
-4
python/paddle/v2/fluid/tests/test_unpool2d_op.py
python/paddle/v2/fluid/tests/test_unpool2d_op.py
+0
-47
python/paddle/v2/fluid/tests/test_unpool_op.py
python/paddle/v2/fluid/tests/test_unpool_op.py
+74
-0
未找到文件。
paddle/operators/CMakeLists.txt
浏览文件 @
90f664d0
...
...
@@ -80,13 +80,6 @@ function(op_library TARGET)
file
(
APPEND
${
pybind_file
}
"USE_OP(pool2d);
\n
"
)
endif
()
# unpool_op contains several operators
if
(
"
${
TARGET
}
"
STREQUAL
"unpool_op"
)
set
(
pybind_flag 1
)
# It's enough to just adding one operator to pybind
file
(
APPEND
${
pybind_file
}
"USE_OP(unpool2d);
\n
"
)
endif
()
# pool_cudnn_op contains several operators
if
(
"
${
TARGET
}
"
STREQUAL
"pool_cudnn_op"
)
set
(
pybind_flag 1
)
...
...
paddle/operators/math/unpooling.cc
浏览文件 @
90f664d0
...
...
@@ -32,13 +32,13 @@ class Unpool2dMaxFunctor<platform::CPUPlace, T> {
const
int
output_channels
=
output
->
dims
()[
1
];
const
int
output_height
=
output
->
dims
()[
2
];
const
int
output_width
=
output
->
dims
()[
3
];
int
input_feasize
=
input_height
*
input_width
;
int
output_feasize
=
output_height
*
output_width
;
const
T
*
input_data
=
input
.
data
<
T
>
();
const
int
*
indices_data
=
indices
.
data
<
int
>
();
const
T
*
indices_data
=
indices
.
data
<
T
>
();
T
*
output_data
=
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
memset
(
output_data
,
0
,
\
sizeof
(
T
)
*
output_feasize
*
output_channels
*
batch_size
);
for
(
int
b
=
0
;
b
<
batch_size
;
++
b
)
{
for
(
int
c
=
0
;
c
<
output_channels
;
++
c
)
{
for
(
int
i
=
0
;
i
<
input_feasize
;
++
i
)
{
...
...
@@ -74,9 +74,8 @@ public:
int
input_feasize
=
input_height
*
input_width
;
int
output_feasize
=
output_height
*
output_width
;
const
int
*
indices_data
=
indices
.
data
<
int
>
();
const
T
*
indices_data
=
indices
.
data
<
T
>
();
const
T
*
output_grad_data
=
output_grad
.
data
<
T
>
();
T
*
input_grad_data
=
input_grad
->
mutable_data
<
T
>
(
context
.
GetPlace
());
for
(
int
b
=
0
;
b
<
batch_size
;
++
b
)
{
...
...
paddle/operators/math/unpooling.cu
浏览文件 @
90f664d0
...
...
@@ -76,7 +76,7 @@ class Unpool2dMaxFunctor<platform::GPUPlace, T> {
const
int
output_height
=
output
->
dims
()[
2
];
const
int
output_width
=
output
->
dims
()[
3
];
const
T
*
input_data
=
input
.
data
<
T
>
();
const
int
*
indices_data
=
indices
.
data
<
int
>
();
const
T
*
indices_data
=
indices
.
data
<
T
>
();
T
*
output_data
=
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
int
nthreads
=
output
->
numel
();
...
...
@@ -111,7 +111,7 @@ class Unpool2dMaxGradFunctor<platform::GPUPlace, T> {
const
int
output_height
=
output
.
dims
()[
2
];
const
int
output_width
=
output
.
dims
()[
3
];
const
T
*
input_data
=
input
.
data
<
T
>
();
const
int
*
indices_data
=
indices
.
data
<
int
>
();
const
T
*
indices_data
=
indices
.
data
<
T
>
();
const
T
*
output_data
=
output
.
data
<
T
>
();
const
T
*
output_grad_data
=
output_grad
.
data
<
T
>
();
T
*
input_grad_data
=
input_grad
->
mutable_data
<
T
>
(
context
.
GetPlace
());
...
...
paddle/operators/unpool_op.cc
浏览文件 @
90f664d0
...
...
@@ -48,7 +48,7 @@ class Unpool2dOpMaker : public framework::OpProtoAndCheckerMaker {
"(vector defalut:{0,0}), "
"paddings(height, width) of unpooling operator."
)
.
SetDefault
({
0
,
0
});
AddAttr
<
std
::
string
>
(
"unpooling
T
ype"
,
AddAttr
<
std
::
string
>
(
"unpooling
t
ype"
,
"(string), unpooling type, can be
\"
max
\"
for max-unpooling "
)
.
InEnum
({
"max"
});
AddComment
(
R"DOC(
...
...
@@ -80,8 +80,8 @@ class UnpoolOp : public framework::OperatorWithKernel {
auto
in_x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
in_y_dims
=
ctx
->
GetInputDim
(
"Y"
);
std
::
string
unpooling
_
type
=
\
ctx
->
Attrs
().
Get
<
std
::
string
>
(
"unpooling
_
type"
);
std
::
string
unpoolingtype
=
\
ctx
->
Attrs
().
Get
<
std
::
string
>
(
"unpoolingtype"
);
std
::
vector
<
int
>
ksize
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"ksize"
);
std
::
vector
<
int
>
strides
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"paddings"
);
...
...
@@ -108,9 +108,9 @@ class UnpoolOpGrad : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) must not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Y"
),
"Input(Y) must not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
"Input(Out@GRAD) should not be null"
);
//
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) must not be null.");
//
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
//
"Input(Out@GRAD) should not be null");
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"X"
)),
"Input(X@GRAD) should not be null."
);
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
ctx
->
GetInputDim
(
"X"
));
...
...
@@ -120,13 +120,12 @@ class UnpoolOpGrad : public framework::OperatorWithKernel {
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
unpool
2d
,
ops
::
UnpoolOp
,
ops
::
Unpool2dOpMaker
,
unpool2d
_grad
,
REGISTER_OP
(
unpool
,
ops
::
UnpoolOp
,
ops
::
Unpool2dOpMaker
,
unpool
_grad
,
ops
::
UnpoolOpGrad
);
REGISTER_OP_CPU_KERNEL
(
unpool
2d
,
REGISTER_OP_CPU_KERNEL
(
unpool
,
ops
::
UnpoolKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
UnpoolKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
);
REGISTER_OP_CPU_KERNEL
(
unpool2d_grad
,
ops
::
UnpoolGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
UnpoolGradKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
);
REGISTER_OP_CPU_KERNEL
(
unpool_grad
,
ops
::
UnpoolGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
UnpoolGradKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
);
paddle/operators/unpool_op.cu.cc
浏览文件 @
90f664d0
...
...
@@ -15,10 +15,10 @@
#include "paddle/operators/unpool_op.h"
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_GPU_KERNEL
(
unpool
2d
,
REGISTER_OP_GPU_KERNEL
(
unpool
,
ops
::
UnpoolKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
,
ops
::
UnpoolKernel
<
paddle
::
platform
::
GPUPlace
,
double
>
);
REGISTER_OP_GPU_KERNEL
(
unpool
2d
_grad
,
REGISTER_OP_GPU_KERNEL
(
unpool_grad
,
ops
::
UnpoolGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
,
ops
::
UnpoolGradKernel
<
paddle
::
platform
::
GPUPlace
,
...
...
paddle/operators/unpool_op.h
浏览文件 @
90f664d0
...
...
@@ -30,13 +30,13 @@ class UnpoolKernel : public framework::OpKernel<T> {
const
Tensor
*
in_x
=
context
.
Input
<
Tensor
>
(
"X"
);
const
Tensor
*
in_y
=
context
.
Input
<
Tensor
>
(
"Y"
);
Tensor
*
out
=
context
.
Output
<
Tensor
>
(
"Out"
);
std
::
string
pooling_type
=
context
.
Attr
<
std
::
string
>
(
"unpooling_
type"
);
std
::
string
unpoolingtype
=
context
.
Attr
<
std
::
string
>
(
"unpooling
type"
);
std
::
vector
<
int
>
ksize
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"ksize"
);
std
::
vector
<
int
>
strides
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
switch
(
ksize
.
size
())
{
case
2
:
{
if
(
pooling_
type
==
"max"
)
{
if
(
unpooling
type
==
"max"
)
{
math
::
Unpool2dMaxFunctor
<
Place
,
T
>
unpool2d_max_forward
;
unpool2d_max_forward
(
context
.
device_context
(),
*
in_x
,
*
in_y
,
out
);
}
...
...
@@ -56,7 +56,7 @@ class UnpoolGradKernel : public framework::OpKernel<T> {
const
Tensor
*
out_grad
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
Tensor
*
in_x_grad
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
std
::
string
pooling_type
=
context
.
Attr
<
std
::
string
>
(
"unpooling_
type"
);
std
::
string
unpoolingtype
=
context
.
Attr
<
std
::
string
>
(
"unpooling
type"
);
std
::
vector
<
int
>
ksize
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"ksize"
);
std
::
vector
<
int
>
strides
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
...
...
@@ -69,7 +69,7 @@ class UnpoolGradKernel : public framework::OpKernel<T> {
}
switch
(
ksize
.
size
())
{
case
2
:
{
if
(
pooling_
type
==
"max"
)
{
if
(
unpooling
type
==
"max"
)
{
math
::
Unpool2dMaxGradFunctor
<
Place
,
T
>
unpool2d_max_backward
;
unpool2d_max_backward
(
context
.
device_context
(),
*
in_x
,
*
in_y
,
in_x_grad
,
*
out
,
*
out_grad
);
...
...
python/paddle/v2/fluid/tests/test_unpool2d_op.py
已删除
100644 → 0
浏览文件 @
822f2834
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
def
maxout_forward_naive
(
input
,
groups
):
s0
,
s1
,
s2
,
s3
=
input
.
shape
return
np
.
ndarray
([
s0
,
s1
/
groups
,
groups
,
s2
,
s3
],
\
buffer
=
input
,
dtype
=
input
.
dtype
).
max
(
axis
=
(
2
))
class
TestUnpool2dOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"unpool2d"
self
.
init_test_case
()
input
=
np
.
random
.
random
(
self
.
shape
).
astype
(
"float32"
)
output
=
self
.
MaxOut_forward_naive
(
input
,
self
.
groups
).
astype
(
"float32"
)
self
.
inputs
=
{
'X'
:
input
}
self
.
attrs
=
{
'strides'
:
self
.
strides
,
'paddings'
:
self
.
paddings
,
'ksize'
:
self
.
ksize
,
'unpooling_type'
:
self
.
pool_type
,
}
self
.
outputs
=
{
'Out'
:
output
.
astype
(
'float32'
)}
def
init_pool_type
(
self
):
self
.
pool_type
=
"max"
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
def
init_test_case
(
self
):
self
.
MaxOut_forward_naive
=
maxout_forward_naive
self
.
shape
=
[
100
,
6
,
2
,
2
]
self
.
groups
=
2
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/v2/fluid/tests/test_unpool_op.py
0 → 100644
浏览文件 @
90f664d0
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
def
unpool2dmax_forward_naive
(
input
,
indices
,
ksize
,
strides
,
paddings
):
s0
,
s1
,
s2
,
s3
=
input
.
shape
out_H
=
(
s2
-
1
)
*
strides
[
0
]
-
2
*
paddings
[
0
]
+
ksize
[
0
]
out_W
=
(
s2
-
1
)
*
strides
[
1
]
-
2
*
paddings
[
1
]
+
ksize
[
1
]
out
=
np
.
zeros
((
s0
,
s1
,
out_H
,
out_W
))
for
nidx
in
xrange
(
s0
):
for
cidx
in
xrange
(
s1
):
for
h
in
xrange
(
s2
):
for
w
in
xrange
(
s3
):
index
=
indices
[
nidx
,
cidx
,
h
,
w
]
hidx
=
(
index
-
index
%
out_W
)
/
out_W
widx
=
index
%
out_W
out
[
nidx
,
cidx
,
int
(
hidx
),
int
(
widx
)]
=
input
[
nidx
,
cidx
,
h
,
w
]
return
out
class
TestUnpoolOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"unpool"
self
.
init_test_case
()
pre_input
=
np
.
random
.
random
(
self
.
shape
).
astype
(
"float32"
)
N
,
C
,
H
,
W
=
pre_input
.
shape
H_out
=
(
H
-
self
.
ksize
[
0
]
+
2
*
self
.
paddings
[
0
])
/
self
.
strides
[
0
]
+
1
W_out
=
(
W
-
self
.
ksize
[
1
]
+
2
*
self
.
paddings
[
1
])
/
self
.
strides
[
1
]
+
1
input
=
np
.
zeros
((
N
,
C
,
H_out
,
W_out
))
indices
=
np
.
zeros
((
N
,
C
,
H_out
,
W_out
))
for
i
in
xrange
(
H_out
):
for
j
in
xrange
(
W_out
):
r_start
=
np
.
max
((
i
*
self
.
strides
[
0
]
-
self
.
paddings
[
0
],
0
))
r_end
=
np
.
min
((
i
*
self
.
strides
[
0
]
+
self
.
ksize
[
0
]
-
self
.
paddings
[
0
],
H
))
c_start
=
np
.
max
((
j
*
self
.
strides
[
1
]
-
self
.
paddings
[
1
],
0
))
c_end
=
np
.
min
((
j
*
self
.
strides
[
1
]
+
self
.
ksize
[
1
]
-
self
.
paddings
[
1
],
W
))
for
nidx
in
xrange
(
N
):
for
cidx
in
xrange
(
C
):
x_masked
=
pre_input
[
nidx
,
cidx
,
r_start
:
r_end
,
c_start
:
c_end
]
input
[
nidx
,
cidx
,
i
,
j
]
=
x_masked
.
max
()
arg
=
x_masked
.
argmax
()
indices
[
nidx
,
cidx
,
i
,
j
]
=
(
r_start
+
arg
/
self
.
ksize
[
1
])
*
W
+
c_start
+
arg
%
self
.
ksize
[
1
]
output
=
self
.
Unpool2d_forward_naive
(
input
,
indices
,
self
.
ksize
,
self
.
strides
,
self
.
paddings
).
astype
(
"float32"
)
self
.
inputs
=
{
'X'
:
input
.
astype
(
'float32'
),
'Y'
:
indices
.
astype
(
'int16'
)}
self
.
attrs
=
{
'strides'
:
self
.
strides
,
'paddings'
:
self
.
paddings
,
'ksize'
:
self
.
ksize
,
'unpoolingtype'
:
self
.
unpoolingtype
,
}
self
.
outputs
=
{
'Out'
:
output
.
astype
(
'float32'
)}
def
test_check_output
(
self
):
print
self
.
outputs
[
'Out'
]
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
max_relative_error
=
0.5
)
def
init_test_case
(
self
):
self
.
Unpool2d_forward_naive
=
unpool2dmax_forward_naive
self
.
unpoolingtype
=
"max"
self
.
shape
=
[
10
,
2
,
5
,
5
]
self
.
ksize
=
[
3
,
3
]
self
.
strides
=
[
2
,
2
]
self
.
paddings
=
[
0
,
0
]
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录