Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
2ffa3a8b
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2ffa3a8b
编写于
2月 02, 2018
作者:
X
xzl
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
rename op to depthwise_conv2d, more efficient
上级
fc9b2b9a
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
34 addition
and
63 deletion
+34
-63
paddle/operators/conv_op.cc
paddle/operators/conv_op.cc
+4
-4
paddle/operators/conv_op.cu.cc
paddle/operators/conv_op.cu.cc
+2
-2
paddle/operators/math/depthwise_conv.cu
paddle/operators/math/depthwise_conv.cu
+25
-54
python/paddle/v2/fluid/layers/nn.py
python/paddle/v2/fluid/layers/nn.py
+1
-1
python/paddle/v2/fluid/tests/test_conv2d_op.py
python/paddle/v2/fluid/tests/test_conv2d_op.py
+2
-2
未找到文件。
paddle/operators/conv_op.cc
浏览文件 @
2ffa3a8b
...
...
@@ -320,20 +320,20 @@ REGISTER_OP(conv2d, ops::ConvOp, ops::Conv2DOpMaker, conv2d_grad,
ops
::
ConvOpGrad
);
// depthwise convolution op
REGISTER_OP
(
depthwise_conv
,
ops
::
ConvOp
,
ops
::
Conv2DOpMaker
,
depthwise_conv_grad
,
ops
::
ConvOpGrad
);
REGISTER_OP
(
depthwise_conv
2d
,
ops
::
ConvOp
,
ops
::
Conv2DOpMaker
,
depthwise_conv
2d
_grad
,
ops
::
ConvOpGrad
);
REGISTER_OP
(
conv3d
,
ops
::
ConvOp
,
ops
::
Conv3DOpMaker
,
conv3d_grad
,
ops
::
ConvOpGrad
);
// depthwise conv kernel
// TODO(xingzhaolong): neon kernel for mobile
REGISTER_OP_CPU_KERNEL
(
depthwise_conv
,
depthwise_conv
2d
,
ops
::
GemmConvKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
GemmConvKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
);
REGISTER_OP_CPU_KERNEL
(
depthwise_conv_grad
,
depthwise_conv
2d
_grad
,
ops
::
GemmConvGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
GemmConvGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
);
...
...
paddle/operators/conv_op.cu.cc
浏览文件 @
2ffa3a8b
...
...
@@ -17,12 +17,12 @@ limitations under the License. */
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_CUDA_KERNEL
(
depthwise_conv
,
depthwise_conv
2d
,
ops
::
DepthwiseConvKernel
<
paddle
::
platform
::
CUDADeviceContext
,
float
>
,
ops
::
DepthwiseConvKernel
<
paddle
::
platform
::
CUDADeviceContext
,
double
>
);
REGISTER_OP_CUDA_KERNEL
(
depthwise_conv_grad
,
depthwise_conv
2d
_grad
,
ops
::
DepthwiseConvGradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
float
>
,
ops
::
DepthwiseConvGradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
double
>
);
...
...
paddle/operators/math/depthwise_conv.cu
浏览文件 @
2ffa3a8b
...
...
@@ -42,38 +42,23 @@ __global__ void KernelDepthwiseConv(
T
value
=
0
;
const
int
h_in_start
=
-
padding_height
+
h_out
*
stride_height
;
const
int
w_in_start
=
-
padding_width
+
w_out
*
stride_width
;
const
int
h_in_end
=
-
padding_height
+
h_out
*
stride_height
+
filter_height
-
1
;
const
int
w_in_end
=
-
padding_width
+
w_out
*
stride_width
+
filter_width
-
1
;
const
int
h_in_end
=
h_in_start
+
filter_height
;
const
int
w_in_end
=
w_in_start
+
filter_width
;
const
int
in_offset
=
((
batch
*
input_channels
+
c_in
)
*
input_height
)
*
input_width
;
if
((
h_in_start
>=
0
)
&&
(
h_in_end
<
input_height
)
&&
(
w_in_start
>=
0
)
&&
(
w_in_end
<
input_width
))
{
for
(
int
kh
=
0
;
kh
<
filter_height
;
++
kh
)
{
for
(
int
kw
=
0
;
kw
<
filter_width
;
++
kw
)
{
const
int
h_in
=
h_in_start
+
kh
;
const
int
w_in
=
w_in_start
+
kw
;
const
int
offset
=
in_offset
+
h_in
*
input_width
+
w_in
;
value
+=
(
*
weight
)
*
input_data
[
offset
];
++
weight
;
}
}
}
else
{
for
(
int
kh
=
0
;
kh
<
filter_height
;
++
kh
)
{
for
(
int
kw
=
0
;
kw
<
filter_width
;
++
kw
)
{
const
int
h_in
=
h_in_start
+
kh
;
const
int
w_in
=
w_in_start
+
kw
;
if
((
h_in
>=
0
)
&&
(
h_in
<
input_height
)
&&
(
w_in
>=
0
)
&&
(
w_in
<
input_width
))
{
const
int
offset
=
in_offset
+
h_in
*
input_width
+
w_in
;
value
+=
(
*
weight
)
*
input_data
[
offset
];
}
++
weight
;
}
const
int
h_end
=
h_in_end
<
input_height
?
h_in_end
:
input_height
;
const
int
w_end
=
w_in_end
<
input_width
?
w_in_end
:
input_width
;
const
int
h_start
=
h_in_start
>
0
?
h_in_start
:
0
;
const
int
w_start
=
w_in_start
>
0
?
w_in_start
:
0
;
for
(
int
h_in
=
h_start
;
h_in
<
h_end
;
h_in
++
)
{
for
(
int
w_in
=
w_start
;
w_in
<
w_end
;
w_in
++
)
{
const
int
offset
=
in_offset
+
h_in
*
input_width
+
w_in
;
value
+=
weight
[(
h_in
-
h_in_start
)
*
filter_width
+
(
w_in
-
w_in_start
)]
*
input_data
[
offset
];
}
}
output_data
[
index
]
=
value
;
...
...
@@ -162,32 +147,18 @@ __global__ void KernelDepthwiseConvFilterGrad(
(
batch
*
input_channels
+
c_in
)
*
input_height
*
input_width
;
T
*
addr_offset
=
filter_grad_data
+
c_out
*
filter_height
*
filter_width
;
if
((
h_in_start
>=
0
)
&&
(
h_in_end
<
input_height
)
&&
(
w_in_start
>=
0
)
&&
(
w_in_end
<
input_width
))
{
for
(
int
kw
=
0
;
kw
<
filter_width
;
kw
++
)
{
for
(
int
kh
=
0
;
kh
<
filter_height
;
kh
++
)
{
const
int
h_in
=
h_in_start
+
kh
;
const
int
w_in
=
w_in_start
+
kw
;
const
int
offset
=
in_offset
+
h_in
*
input_width
+
w_in
;
const
T
diff_temp
=
output_grad_data
[
index
]
*
input_data
[
offset
];
T
*
addr
=
addr_offset
+
kh
*
filter_width
+
kw
;
paddle
::
platform
::
CudaAtomicAdd
(
addr
,
diff_temp
);
}
}
}
else
{
for
(
int
kw
=
0
;
kw
<
filter_width
;
kw
++
)
{
for
(
int
kh
=
0
;
kh
<
filter_height
;
kh
++
)
{
const
int
h_in
=
h_in_start
+
kh
;
const
int
w_in
=
w_in_start
+
kw
;
if
((
h_in
>=
0
)
&&
(
h_in
<
input_height
)
&&
(
w_in
>=
0
)
&&
(
w_in
<
input_width
))
{
const
int
offset
=
in_offset
+
h_in
*
input_width
+
w_in
;
const
T
diff_temp
=
output_grad_data
[
index
]
*
input_data
[
offset
];
T
*
addr
=
addr_offset
+
kh
*
filter_width
+
kw
;
paddle
::
platform
::
CudaAtomicAdd
(
addr
,
diff_temp
);
}
}
const
int
h_end
=
h_in_end
<
input_height
?
h_in_end
:
input_height
;
const
int
w_end
=
w_in_end
<
input_width
?
w_in_end
:
input_width
;
const
int
h_start
=
h_in_start
>
0
?
h_in_start
:
0
;
const
int
w_start
=
w_in_start
>
0
?
w_in_start
:
0
;
for
(
int
h_in
=
h_start
;
h_in
<
h_end
;
h_in
++
)
{
for
(
int
w_in
=
w_start
;
w_in
<
w_end
;
w_in
++
)
{
const
int
offset
=
in_offset
+
h_in
*
input_width
+
w_in
;
const
T
diff_temp
=
output_grad_data
[
index
]
*
input_data
[
offset
];
T
*
addr
=
addr_offset
+
(
h_in
-
h_in_start
)
*
filter_width
+
(
w_in
-
w_in_start
);
paddle
::
platform
::
CudaAtomicAdd
(
addr
,
diff_temp
);
}
}
}
...
...
python/paddle/v2/fluid/layers/nn.py
浏览文件 @
2ffa3a8b
...
...
@@ -1237,7 +1237,7 @@ def conv2d(input,
l_type
=
'conv2d'
if
(
num_channels
==
groups
and
num_filters
%
num_channels
==
0
and
not
use_cudnn
):
l_type
=
'depthwise_conv'
l_type
=
'depthwise_conv
2d
'
helper
=
LayerHelper
(
l_type
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
python/paddle/v2/fluid/tests/test_conv2d_op.py
浏览文件 @
2ffa3a8b
...
...
@@ -250,7 +250,7 @@ class TestDepthwiseConv(TestConv2dOp):
assert
np
.
mod
(
self
.
input_size
[
1
],
self
.
groups
)
==
0
f_c
=
self
.
input_size
[
1
]
/
self
.
groups
self
.
filter_size
=
[
6
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv"
self
.
op_type
=
"depthwise_conv
2d
"
class
TestDepthwiseConv2
(
TestConv2dOp
):
...
...
@@ -262,7 +262,7 @@ class TestDepthwiseConv2(TestConv2dOp):
assert
np
.
mod
(
self
.
input_size
[
1
],
self
.
groups
)
==
0
f_c
=
self
.
input_size
[
1
]
/
self
.
groups
self
.
filter_size
=
[
6
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv"
self
.
op_type
=
"depthwise_conv
2d
"
# cudnn v5 does not support dilation conv.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录