Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
f67f0cae
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f67f0cae
编写于
3月 21, 2018
作者:
W
wangyang59
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
finished testing cpu bilinear_interp_op
上级
c7cd6d13
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
97 addition
and
8 deletion
+97
-8
paddle/fluid/operators/bilinear_interp_op.cc
paddle/fluid/operators/bilinear_interp_op.cc
+4
-3
paddle/fluid/operators/bilinear_interp_op.h
paddle/fluid/operators/bilinear_interp_op.h
+5
-5
python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py
...n/paddle/fluid/tests/unittests/test_bilinear_interp_op.py
+88
-0
未找到文件。
paddle/fluid/operators/bilinear_interp_op.cc
浏览文件 @
f67f0cae
...
...
@@ -27,13 +27,13 @@ class BilinearInterpOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of BilinearInterOp should not be null."
);
auto
dim_x
=
ctx
->
GetInputDim
(
"
Input
"
);
// NCHW format
auto
dim_x
=
ctx
->
GetInputDim
(
"
X
"
);
// NCHW format
int
out_h
=
ctx
->
Attrs
().
Get
<
int
>
(
"out_h"
);
int
out_w
=
ctx
->
Attrs
().
Get
<
int
>
(
"out_w"
);
PADDLE_ENFORCE_EQ
(
dim_x
.
size
(),
4
,
"X's dimension must be 4"
);
std
::
vector
<
int64_t
>
dim_out
({
dim_x
[
0
],
dim_x
[
1
],
out_h
,
out_w
});
ctx
->
SetOutputDim
(
"Out
put
"
,
framework
::
make_ddim
(
dim_out
));
ctx
->
SetOutputDim
(
"Out"
,
framework
::
make_ddim
(
dim_out
));
}
};
...
...
@@ -83,4 +83,5 @@ namespace ops = paddle::operators;
REGISTER_OP
(
bilinear_interp
,
ops
::
BilinearInterpOp
,
ops
::
BilinearInterpOpMaker
,
bilinear_interp_grad
,
ops
::
BilinearInterpOpGrad
);
REGISTER_OP_CPU_KERNEL
(
bilinear_interp
,
ops
::
BilinearInterpKernel
<
float
>
);
REGISTER_OP_CPU_KERNEL
(
bilinear_interp_grad
,
ops
::
BilinearInterpKernel
<
float
>
);
REGISTER_OP_CPU_KERNEL
(
bilinear_interp_grad
,
ops
::
BilinearInterpGradKernel
<
float
>
);
paddle/fluid/operators/bilinear_interp_op.h
浏览文件 @
f67f0cae
...
...
@@ -46,7 +46,7 @@ class BilinearInterpKernel : public framework::OpKernel<T> {
T
ratio_w
=
(
out_w
>
1
)
?
static_cast
<
T
>
(
in_w
-
1
)
/
(
out_w
-
1
)
:
0.
f
;
if
(
in_h
==
out_h
&&
in_w
==
out_w
)
{
memcpy
(
output
,
input
,
product
(
input_t
->
dims
()
)
*
sizeof
(
T
));
memcpy
(
output
,
input
,
input_t
->
numel
(
)
*
sizeof
(
T
));
}
else
{
for
(
int
k
=
0
;
k
<
batch_size
;
++
k
)
{
// loop for batches
for
(
int
i
=
0
;
i
<
out_h
;
++
i
)
{
// loop for images
...
...
@@ -123,10 +123,10 @@ class BilinearInterpGradKernel : public framework::OpKernel<T> {
const
T
*
out_pos
=
&
d_output
[
k
*
out_chw
+
i
*
out_w
+
j
];
for
(
int
c
=
0
;
c
<
channels
;
++
c
)
{
// loop for channels
in_pos
[
0
]
=
h2lambda
*
w2lambda
*
out_pos
[
0
];
in_pos
[
wid
]
=
h2lambda
*
w1lambda
*
out_pos
[
0
];
in_pos
[
hid
*
in_w
]
=
h1lambda
*
w2lambda
*
out_pos
[
0
];
in_pos
[
hid
*
in_w
+
wid
]
=
h1lambda
*
w1lambda
*
out_pos
[
0
];
in_pos
[
0
]
+
=
h2lambda
*
w2lambda
*
out_pos
[
0
];
in_pos
[
wid
]
+
=
h2lambda
*
w1lambda
*
out_pos
[
0
];
in_pos
[
hid
*
in_w
]
+
=
h1lambda
*
w2lambda
*
out_pos
[
0
];
in_pos
[
hid
*
in_w
+
wid
]
+
=
h1lambda
*
w1lambda
*
out_pos
[
0
];
in_pos
+=
in_hw
;
out_pos
+=
out_hw
;
}
...
...
python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py
0 → 100644
浏览文件 @
f67f0cae
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
def
bilinear_interp_np
(
input
,
out_h
,
out_w
):
batch_size
,
channel
,
in_h
,
in_w
=
input
.
shape
if
out_h
>
1
:
ratio_h
=
(
in_h
-
1.0
)
/
(
out_h
-
1.0
)
else
:
ratio_h
=
0.0
if
out_w
>
1
:
ratio_w
=
(
in_w
-
1.0
)
/
(
out_w
-
1.0
)
else
:
ratio_w
=
0.0
out
=
np
.
zeros
((
batch_size
,
channel
,
out_h
,
out_w
))
for
i
in
range
(
out_h
):
h
=
int
(
ratio_h
*
i
)
hid
=
1
if
h
<
in_h
-
1
else
0
h1lambda
=
ratio_h
*
i
-
h
h2lambda
=
1.0
-
h1lambda
for
j
in
range
(
out_w
):
w
=
int
(
ratio_w
*
j
)
wid
=
1
if
w
<
in_w
-
1
else
0
w1lambda
=
ratio_w
*
j
-
w
w2lambda
=
1.0
-
w1lambda
out
[:,
:,
i
,
j
]
=
h2lambda
*
(
w2lambda
*
input
[:,
:,
h
,
w
]
+
w1lambda
*
input
[:,
:,
h
,
w
+
wid
])
+
\
h1lambda
*
(
w2lambda
*
input
[:,
:,
h
+
hid
,
w
]
+
w1lambda
*
input
[:,
:,
h
+
hid
,
w
+
wid
])
return
out
.
astype
(
"float32"
)
class
TestBilinearInterpOp
(
OpTest
):
def
setUp
(
self
):
self
.
init_test_case
()
self
.
op_type
=
"bilinear_interp"
input_np
=
np
.
random
.
random
(
self
.
input_shape
).
astype
(
"float32"
)
output_np
=
bilinear_interp_np
(
input_np
,
self
.
out_h
,
self
.
out_w
)
self
.
inputs
=
{
'X'
:
input_np
}
self
.
attrs
=
{
'out_h'
:
self
.
out_h
,
'out_w'
:
self
.
out_w
}
self
.
outputs
=
{
'Out'
:
output_np
}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
in_place
=
True
)
def
init_test_case
(
self
):
self
.
input_shape
=
[
2
,
3
,
4
,
4
]
self
.
out_h
=
2
self
.
out_w
=
2
class
TestCase1
(
TestBilinearInterpOp
):
def
init_test_case
(
self
):
self
.
input_shape
=
[
4
,
1
,
7
,
8
]
self
.
out_h
=
1
self
.
out_w
=
1
class
TestCase2
(
TestBilinearInterpOp
):
def
init_test_case
(
self
):
self
.
input_shape
=
[
3
,
3
,
9
,
6
]
self
.
out_h
=
12
self
.
out_w
=
12
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录