未验证 提交 f086ebb8 编写于 作者: C chengduo 提交者: GitHub

Merge pull request #7536 from chengduoZH/feature/refine_conv_pool_python

Exposing  use_cudnn
...@@ -70,6 +70,13 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -70,6 +70,13 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const {
framework::OpKernelType ConvOp::GetExpectedKernelType( framework::OpKernelType ConvOp::GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
#ifdef PADDLE_WITH_CUDA
if (platform::is_gpu_place(ctx.GetPlace())) {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
}
#endif
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
...@@ -283,6 +290,14 @@ void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const { ...@@ -283,6 +290,14 @@ void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const {
framework::OpKernelType ConvOpGrad::GetExpectedKernelType( framework::OpKernelType ConvOpGrad::GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
#ifdef PADDLE_WITH_CUDA
if (platform::is_gpu_place(ctx.GetPlace())) {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
}
#endif
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
......
...@@ -61,6 +61,13 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -61,6 +61,13 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const {
framework::OpKernelType ConvTransposeOp::GetExpectedKernelType( framework::OpKernelType ConvTransposeOp::GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
#ifdef PADDLE_WITH_CUDA
if (platform::is_gpu_place(ctx.GetPlace())) {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
}
#endif
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
...@@ -263,6 +270,13 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const { ...@@ -263,6 +270,13 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const {
framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType( framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
#ifdef PADDLE_WITH_CUDA
if (platform::is_gpu_place(ctx.GetPlace())) {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
}
#endif
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
......
...@@ -64,6 +64,13 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { ...@@ -64,6 +64,13 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const {
framework::OpKernelType PoolOp::GetExpectedKernelType( framework::OpKernelType PoolOp::GetExpectedKernelType(
const framework::ExecutionContext &ctx) const { const framework::ExecutionContext &ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
#ifdef PADDLE_WITH_CUDA
if (platform::is_gpu_place(ctx.GetPlace())) {
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
}
#endif
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
...@@ -88,6 +95,13 @@ void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const { ...@@ -88,6 +95,13 @@ void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const {
framework::OpKernelType PoolOpGrad::GetExpectedKernelType( framework::OpKernelType PoolOpGrad::GetExpectedKernelType(
const framework::ExecutionContext &ctx) const { const framework::ExecutionContext &ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
#ifdef PADDLE_WITH_CUDA
if (platform::is_gpu_place(ctx.GetPlace())) {
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
}
#endif
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
......
...@@ -676,6 +676,7 @@ def conv2d(input, ...@@ -676,6 +676,7 @@ def conv2d(input,
groups=None, groups=None,
param_attr=None, param_attr=None,
bias_attr=None, bias_attr=None,
use_cudnn=True,
act=None): act=None):
""" """
**Convlution2D Layer** **Convlution2D Layer**
...@@ -739,6 +740,8 @@ def conv2d(input, ...@@ -739,6 +740,8 @@ def conv2d(input,
connected to the second half of the input channels. Default: groups=1 connected to the second half of the input channels. Default: groups=1
param_attr(ParamAttr): The parameters to the Conv2d Layer. Default: None param_attr(ParamAttr): The parameters to the Conv2d Layer. Default: None
bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None
use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act(str): Activation type. Default: None act(str): Activation type. Default: None
Returns: Returns:
...@@ -774,6 +777,8 @@ def conv2d(input, ...@@ -774,6 +777,8 @@ def conv2d(input,
stride = [stride, stride] stride = [stride, stride]
if isinstance(padding, int): if isinstance(padding, int):
padding = [padding, padding] padding = [padding, padding]
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
input_shape = input.shape input_shape = input.shape
filter_shape = [num_filters, num_filter_channels] + filter_size filter_shape = [num_filters, num_filter_channels] + filter_size
...@@ -797,9 +802,12 @@ def conv2d(input, ...@@ -797,9 +802,12 @@ def conv2d(input,
'Filter': filter_param, 'Filter': filter_param,
}, },
outputs={"Output": pre_bias}, outputs={"Output": pre_bias},
attrs={'strides': stride, attrs={
'paddings': padding, 'strides': stride,
'groups': groups}) 'paddings': padding,
'groups': groups,
'use_cudnn': use_cudnn
})
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
...@@ -948,6 +956,7 @@ def pool2d(input, ...@@ -948,6 +956,7 @@ def pool2d(input,
pool_stride=None, pool_stride=None,
pool_padding=None, pool_padding=None,
global_pooling=False, global_pooling=False,
use_cudnn=True,
name=None): name=None):
""" """
This function adds the operator for pooling in 2 dimensions, using the This function adds the operator for pooling in 2 dimensions, using the
...@@ -967,6 +976,8 @@ def pool2d(input, ...@@ -967,6 +976,8 @@ def pool2d(input,
pool_stride = [pool_stride, pool_stride] pool_stride = [pool_stride, pool_stride]
if isinstance(pool_padding, int): if isinstance(pool_padding, int):
pool_padding = [pool_padding, pool_padding] pool_padding = [pool_padding, pool_padding]
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
helper = LayerHelper('pool2d', **locals()) helper = LayerHelper('pool2d', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
...@@ -981,7 +992,8 @@ def pool2d(input, ...@@ -981,7 +992,8 @@ def pool2d(input,
"ksize": pool_size, "ksize": pool_size,
"global_pooling": global_pooling, "global_pooling": global_pooling,
"strides": pool_stride, "strides": pool_stride,
"paddings": pool_padding "paddings": pool_padding,
"use_cudnn": use_cudnn
}) })
return pool_out return pool_out
...@@ -1096,6 +1108,7 @@ def conv2d_transpose(input, ...@@ -1096,6 +1108,7 @@ def conv2d_transpose(input,
stride=None, stride=None,
dilation=None, dilation=None,
param_attr=None, param_attr=None,
use_cudnn=True,
name=None): name=None):
""" """
The transpose of conv2d layer. The transpose of conv2d layer.
...@@ -1123,6 +1136,8 @@ def conv2d_transpose(input, ...@@ -1123,6 +1136,8 @@ def conv2d_transpose(input,
contain two integers, (dilation_H, dilation_W). Otherwise, the contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. dilation_H = dilation_W = dilation.
param_attr: Parameter Attribute. param_attr: Parameter Attribute.
use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
name(str|None): A name for this layer(optional). If set None, the layer name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically. will be named automatically.
...@@ -1151,6 +1166,10 @@ def conv2d_transpose(input, ...@@ -1151,6 +1166,10 @@ def conv2d_transpose(input,
elif dilation is not None: elif dilation is not None:
op_attr['dilations'] = dilation op_attr['dilations'] = dilation
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
op_attr['use_cudnn'] = use_cudnn
if filter_size is None: if filter_size is None:
if output_size is None: if output_size is None:
raise ValueError("output_size must be set when filter_size is None") raise ValueError("output_size must be set when filter_size is None")
......
...@@ -28,19 +28,22 @@ def simple_img_conv_pool(input, ...@@ -28,19 +28,22 @@ def simple_img_conv_pool(input,
pool_stride, pool_stride,
act, act,
param_attr=None, param_attr=None,
pool_type='max'): pool_type='max',
use_cudnn=True):
conv_out = layers.conv2d( conv_out = layers.conv2d(
input=input, input=input,
num_filters=num_filters, num_filters=num_filters,
filter_size=filter_size, filter_size=filter_size,
param_attr=param_attr, param_attr=param_attr,
act=act) act=act,
use_cudnn=use_cudnn)
pool_out = layers.pool2d( pool_out = layers.pool2d(
input=conv_out, input=conv_out,
pool_size=pool_size, pool_size=pool_size,
pool_type=pool_type, pool_type=pool_type,
pool_stride=pool_stride) pool_stride=pool_stride,
use_cudnn=use_cudnn)
return pool_out return pool_out
...@@ -54,7 +57,8 @@ def img_conv_group(input, ...@@ -54,7 +57,8 @@ def img_conv_group(input,
conv_with_batchnorm=False, conv_with_batchnorm=False,
conv_batchnorm_drop_rate=None, conv_batchnorm_drop_rate=None,
pool_stride=1, pool_stride=1,
pool_type=None): pool_type=None,
use_cudnn=True):
""" """
Image Convolution Group, Used for vgg net. Image Convolution Group, Used for vgg net.
""" """
...@@ -85,7 +89,8 @@ def img_conv_group(input, ...@@ -85,7 +89,8 @@ def img_conv_group(input,
filter_size=conv_filter_size[i], filter_size=conv_filter_size[i],
padding=conv_padding[i], padding=conv_padding[i],
param_attr=param_attr[i], param_attr=param_attr[i],
act=local_conv_act) act=local_conv_act,
use_cudnn=use_cudnn)
if conv_with_batchnorm[i]: if conv_with_batchnorm[i]:
tmp = layers.batch_norm(input=tmp, act=conv_act) tmp = layers.batch_norm(input=tmp, act=conv_act)
...@@ -97,7 +102,8 @@ def img_conv_group(input, ...@@ -97,7 +102,8 @@ def img_conv_group(input,
input=tmp, input=tmp,
pool_size=pool_size, pool_size=pool_size,
pool_type=pool_type, pool_type=pool_type,
pool_stride=pool_stride) pool_stride=pool_stride,
use_cudnn=use_cudnn)
return pool_out return pool_out
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册