Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
f086ebb8
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
f086ebb8
编写于
1月 18, 2018
作者:
C
chengduo
提交者:
GitHub
1月 18, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #7536 from chengduoZH/feature/refine_conv_pool_python
Exposing use_cudnn
上级
4b3e22b8
edd21326
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
78 addition
and
10 deletion
+78
-10
paddle/operators/conv_op.cc
paddle/operators/conv_op.cc
+15
-0
paddle/operators/conv_transpose_op.cc
paddle/operators/conv_transpose_op.cc
+14
-0
paddle/operators/pool_op.cc
paddle/operators/pool_op.cc
+14
-0
python/paddle/v2/fluid/layers/nn.py
python/paddle/v2/fluid/layers/nn.py
+23
-4
python/paddle/v2/fluid/nets.py
python/paddle/v2/fluid/nets.py
+12
-6
未找到文件。
paddle/operators/conv_op.cc
浏览文件 @
f086ebb8
...
@@ -70,6 +70,13 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const {
...
@@ -70,6 +70,13 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const {
framework
::
OpKernelType
ConvOp
::
GetExpectedKernelType
(
framework
::
OpKernelType
ConvOp
::
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
const
framework
::
ExecutionContext
&
ctx
)
const
{
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
use_cudnn
&=
platform
::
is_gpu_place
(
ctx
.
GetPlace
());
#ifdef PADDLE_WITH_CUDA
if
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()))
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
use_cudnn
&=
dev_ctx
.
cudnn_handle
()
!=
nullptr
;
}
#endif
framework
::
LibraryType
library_
;
framework
::
LibraryType
library_
;
if
(
use_cudnn
)
{
if
(
use_cudnn
)
{
library_
=
framework
::
LibraryType
::
kCUDNN
;
library_
=
framework
::
LibraryType
::
kCUDNN
;
...
@@ -283,6 +290,14 @@ void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const {
...
@@ -283,6 +290,14 @@ void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const {
framework
::
OpKernelType
ConvOpGrad
::
GetExpectedKernelType
(
framework
::
OpKernelType
ConvOpGrad
::
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
const
framework
::
ExecutionContext
&
ctx
)
const
{
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
use_cudnn
&=
platform
::
is_gpu_place
(
ctx
.
GetPlace
());
#ifdef PADDLE_WITH_CUDA
if
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()))
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
use_cudnn
&=
dev_ctx
.
cudnn_handle
()
!=
nullptr
;
}
#endif
framework
::
LibraryType
library_
;
framework
::
LibraryType
library_
;
if
(
use_cudnn
)
{
if
(
use_cudnn
)
{
library_
=
framework
::
LibraryType
::
kCUDNN
;
library_
=
framework
::
LibraryType
::
kCUDNN
;
...
...
paddle/operators/conv_transpose_op.cc
浏览文件 @
f086ebb8
...
@@ -61,6 +61,13 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const {
...
@@ -61,6 +61,13 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const {
framework
::
OpKernelType
ConvTransposeOp
::
GetExpectedKernelType
(
framework
::
OpKernelType
ConvTransposeOp
::
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
const
framework
::
ExecutionContext
&
ctx
)
const
{
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
use_cudnn
&=
platform
::
is_gpu_place
(
ctx
.
GetPlace
());
#ifdef PADDLE_WITH_CUDA
if
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()))
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
use_cudnn
&=
dev_ctx
.
cudnn_handle
()
!=
nullptr
;
}
#endif
framework
::
LibraryType
library_
;
framework
::
LibraryType
library_
;
if
(
use_cudnn
)
{
if
(
use_cudnn
)
{
library_
=
framework
::
LibraryType
::
kCUDNN
;
library_
=
framework
::
LibraryType
::
kCUDNN
;
...
@@ -263,6 +270,13 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const {
...
@@ -263,6 +270,13 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const {
framework
::
OpKernelType
ConvTransposeOpGrad
::
GetExpectedKernelType
(
framework
::
OpKernelType
ConvTransposeOpGrad
::
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
const
framework
::
ExecutionContext
&
ctx
)
const
{
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
use_cudnn
&=
platform
::
is_gpu_place
(
ctx
.
GetPlace
());
#ifdef PADDLE_WITH_CUDA
if
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()))
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
use_cudnn
&=
dev_ctx
.
cudnn_handle
()
!=
nullptr
;
}
#endif
framework
::
LibraryType
library_
;
framework
::
LibraryType
library_
;
if
(
use_cudnn
)
{
if
(
use_cudnn
)
{
library_
=
framework
::
LibraryType
::
kCUDNN
;
library_
=
framework
::
LibraryType
::
kCUDNN
;
...
...
paddle/operators/pool_op.cc
浏览文件 @
f086ebb8
...
@@ -64,6 +64,13 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const {
...
@@ -64,6 +64,13 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const {
framework
::
OpKernelType
PoolOp
::
GetExpectedKernelType
(
framework
::
OpKernelType
PoolOp
::
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
const
framework
::
ExecutionContext
&
ctx
)
const
{
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
use_cudnn
&=
platform
::
is_gpu_place
(
ctx
.
GetPlace
());
#ifdef PADDLE_WITH_CUDA
if
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()))
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
use_cudnn
&=
dev_ctx
.
cudnn_handle
()
!=
nullptr
;
}
#endif
framework
::
LibraryType
library_
;
framework
::
LibraryType
library_
;
if
(
use_cudnn
)
{
if
(
use_cudnn
)
{
library_
=
framework
::
LibraryType
::
kCUDNN
;
library_
=
framework
::
LibraryType
::
kCUDNN
;
...
@@ -88,6 +95,13 @@ void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const {
...
@@ -88,6 +95,13 @@ void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const {
framework
::
OpKernelType
PoolOpGrad
::
GetExpectedKernelType
(
framework
::
OpKernelType
PoolOpGrad
::
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
const
framework
::
ExecutionContext
&
ctx
)
const
{
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
use_cudnn
&=
platform
::
is_gpu_place
(
ctx
.
GetPlace
());
#ifdef PADDLE_WITH_CUDA
if
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()))
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
use_cudnn
&=
dev_ctx
.
cudnn_handle
()
!=
nullptr
;
}
#endif
framework
::
LibraryType
library_
;
framework
::
LibraryType
library_
;
if
(
use_cudnn
)
{
if
(
use_cudnn
)
{
library_
=
framework
::
LibraryType
::
kCUDNN
;
library_
=
framework
::
LibraryType
::
kCUDNN
;
...
...
python/paddle/v2/fluid/layers/nn.py
浏览文件 @
f086ebb8
...
@@ -676,6 +676,7 @@ def conv2d(input,
...
@@ -676,6 +676,7 @@ def conv2d(input,
groups
=
None
,
groups
=
None
,
param_attr
=
None
,
param_attr
=
None
,
bias_attr
=
None
,
bias_attr
=
None
,
use_cudnn
=
True
,
act
=
None
):
act
=
None
):
"""
"""
**Convlution2D Layer**
**Convlution2D Layer**
...
@@ -739,6 +740,8 @@ def conv2d(input,
...
@@ -739,6 +740,8 @@ def conv2d(input,
connected to the second half of the input channels. Default: groups=1
connected to the second half of the input channels. Default: groups=1
param_attr(ParamAttr): The parameters to the Conv2d Layer. Default: None
param_attr(ParamAttr): The parameters to the Conv2d Layer. Default: None
bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None
bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None
use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act(str): Activation type. Default: None
act(str): Activation type. Default: None
Returns:
Returns:
...
@@ -774,6 +777,8 @@ def conv2d(input,
...
@@ -774,6 +777,8 @@ def conv2d(input,
stride
=
[
stride
,
stride
]
stride
=
[
stride
,
stride
]
if
isinstance
(
padding
,
int
):
if
isinstance
(
padding
,
int
):
padding
=
[
padding
,
padding
]
padding
=
[
padding
,
padding
]
if
not
isinstance
(
use_cudnn
,
bool
):
raise
ValueError
(
"use_cudnn should be True or False"
)
input_shape
=
input
.
shape
input_shape
=
input
.
shape
filter_shape
=
[
num_filters
,
num_filter_channels
]
+
filter_size
filter_shape
=
[
num_filters
,
num_filter_channels
]
+
filter_size
...
@@ -797,9 +802,12 @@ def conv2d(input,
...
@@ -797,9 +802,12 @@ def conv2d(input,
'Filter'
:
filter_param
,
'Filter'
:
filter_param
,
},
},
outputs
=
{
"Output"
:
pre_bias
},
outputs
=
{
"Output"
:
pre_bias
},
attrs
=
{
'strides'
:
stride
,
attrs
=
{
'paddings'
:
padding
,
'strides'
:
stride
,
'groups'
:
groups
})
'paddings'
:
padding
,
'groups'
:
groups
,
'use_cudnn'
:
use_cudnn
})
pre_act
=
helper
.
append_bias_op
(
pre_bias
,
dim_start
=
1
,
dim_end
=
2
)
pre_act
=
helper
.
append_bias_op
(
pre_bias
,
dim_start
=
1
,
dim_end
=
2
)
...
@@ -948,6 +956,7 @@ def pool2d(input,
...
@@ -948,6 +956,7 @@ def pool2d(input,
pool_stride
=
None
,
pool_stride
=
None
,
pool_padding
=
None
,
pool_padding
=
None
,
global_pooling
=
False
,
global_pooling
=
False
,
use_cudnn
=
True
,
name
=
None
):
name
=
None
):
"""
"""
This function adds the operator for pooling in 2 dimensions, using the
This function adds the operator for pooling in 2 dimensions, using the
...
@@ -967,6 +976,8 @@ def pool2d(input,
...
@@ -967,6 +976,8 @@ def pool2d(input,
pool_stride
=
[
pool_stride
,
pool_stride
]
pool_stride
=
[
pool_stride
,
pool_stride
]
if
isinstance
(
pool_padding
,
int
):
if
isinstance
(
pool_padding
,
int
):
pool_padding
=
[
pool_padding
,
pool_padding
]
pool_padding
=
[
pool_padding
,
pool_padding
]
if
not
isinstance
(
use_cudnn
,
bool
):
raise
ValueError
(
"use_cudnn should be True or False"
)
helper
=
LayerHelper
(
'pool2d'
,
**
locals
())
helper
=
LayerHelper
(
'pool2d'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
...
@@ -981,7 +992,8 @@ def pool2d(input,
...
@@ -981,7 +992,8 @@ def pool2d(input,
"ksize"
:
pool_size
,
"ksize"
:
pool_size
,
"global_pooling"
:
global_pooling
,
"global_pooling"
:
global_pooling
,
"strides"
:
pool_stride
,
"strides"
:
pool_stride
,
"paddings"
:
pool_padding
"paddings"
:
pool_padding
,
"use_cudnn"
:
use_cudnn
})
})
return
pool_out
return
pool_out
...
@@ -1096,6 +1108,7 @@ def conv2d_transpose(input,
...
@@ -1096,6 +1108,7 @@ def conv2d_transpose(input,
stride
=
None
,
stride
=
None
,
dilation
=
None
,
dilation
=
None
,
param_attr
=
None
,
param_attr
=
None
,
use_cudnn
=
True
,
name
=
None
):
name
=
None
):
"""
"""
The transpose of conv2d layer.
The transpose of conv2d layer.
...
@@ -1123,6 +1136,8 @@ def conv2d_transpose(input,
...
@@ -1123,6 +1136,8 @@ def conv2d_transpose(input,
contain two integers, (dilation_H, dilation_W). Otherwise, the
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation.
dilation_H = dilation_W = dilation.
param_attr: Parameter Attribute.
param_attr: Parameter Attribute.
use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
name(str|None): A name for this layer(optional). If set None, the layer
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
will be named automatically.
...
@@ -1151,6 +1166,10 @@ def conv2d_transpose(input,
...
@@ -1151,6 +1166,10 @@ def conv2d_transpose(input,
elif
dilation
is
not
None
:
elif
dilation
is
not
None
:
op_attr
[
'dilations'
]
=
dilation
op_attr
[
'dilations'
]
=
dilation
if
not
isinstance
(
use_cudnn
,
bool
):
raise
ValueError
(
"use_cudnn should be True or False"
)
op_attr
[
'use_cudnn'
]
=
use_cudnn
if
filter_size
is
None
:
if
filter_size
is
None
:
if
output_size
is
None
:
if
output_size
is
None
:
raise
ValueError
(
"output_size must be set when filter_size is None"
)
raise
ValueError
(
"output_size must be set when filter_size is None"
)
...
...
python/paddle/v2/fluid/nets.py
浏览文件 @
f086ebb8
...
@@ -28,19 +28,22 @@ def simple_img_conv_pool(input,
...
@@ -28,19 +28,22 @@ def simple_img_conv_pool(input,
pool_stride
,
pool_stride
,
act
,
act
,
param_attr
=
None
,
param_attr
=
None
,
pool_type
=
'max'
):
pool_type
=
'max'
,
use_cudnn
=
True
):
conv_out
=
layers
.
conv2d
(
conv_out
=
layers
.
conv2d
(
input
=
input
,
input
=
input
,
num_filters
=
num_filters
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
filter_size
=
filter_size
,
param_attr
=
param_attr
,
param_attr
=
param_attr
,
act
=
act
)
act
=
act
,
use_cudnn
=
use_cudnn
)
pool_out
=
layers
.
pool2d
(
pool_out
=
layers
.
pool2d
(
input
=
conv_out
,
input
=
conv_out
,
pool_size
=
pool_size
,
pool_size
=
pool_size
,
pool_type
=
pool_type
,
pool_type
=
pool_type
,
pool_stride
=
pool_stride
)
pool_stride
=
pool_stride
,
use_cudnn
=
use_cudnn
)
return
pool_out
return
pool_out
...
@@ -54,7 +57,8 @@ def img_conv_group(input,
...
@@ -54,7 +57,8 @@ def img_conv_group(input,
conv_with_batchnorm
=
False
,
conv_with_batchnorm
=
False
,
conv_batchnorm_drop_rate
=
None
,
conv_batchnorm_drop_rate
=
None
,
pool_stride
=
1
,
pool_stride
=
1
,
pool_type
=
None
):
pool_type
=
None
,
use_cudnn
=
True
):
"""
"""
Image Convolution Group, Used for vgg net.
Image Convolution Group, Used for vgg net.
"""
"""
...
@@ -85,7 +89,8 @@ def img_conv_group(input,
...
@@ -85,7 +89,8 @@ def img_conv_group(input,
filter_size
=
conv_filter_size
[
i
],
filter_size
=
conv_filter_size
[
i
],
padding
=
conv_padding
[
i
],
padding
=
conv_padding
[
i
],
param_attr
=
param_attr
[
i
],
param_attr
=
param_attr
[
i
],
act
=
local_conv_act
)
act
=
local_conv_act
,
use_cudnn
=
use_cudnn
)
if
conv_with_batchnorm
[
i
]:
if
conv_with_batchnorm
[
i
]:
tmp
=
layers
.
batch_norm
(
input
=
tmp
,
act
=
conv_act
)
tmp
=
layers
.
batch_norm
(
input
=
tmp
,
act
=
conv_act
)
...
@@ -97,7 +102,8 @@ def img_conv_group(input,
...
@@ -97,7 +102,8 @@ def img_conv_group(input,
input
=
tmp
,
input
=
tmp
,
pool_size
=
pool_size
,
pool_size
=
pool_size
,
pool_type
=
pool_type
,
pool_type
=
pool_type
,
pool_stride
=
pool_stride
)
pool_stride
=
pool_stride
,
use_cudnn
=
use_cudnn
)
return
pool_out
return
pool_out
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录