diff --git a/paddle/operators/conv_op.cc b/paddle/operators/conv_op.cc index 63e4018555012ca5082ebbeec80c1fb5f4ebc97d..0e8dddd7f19bd969544de91d55b25f346afca6ad 100644 --- a/paddle/operators/conv_op.cc +++ b/paddle/operators/conv_op.cc @@ -284,9 +284,7 @@ void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const { framework::OpKernelType ConvOpGrad::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { bool use_cudnn = ctx.Attr("use_cudnn"); - if (paddle::platform::is_cpu_place(ctx.GetPlace())) { - use_cudnn = false; - } + use_cudnn &= platform::is_gpu_place(ctx.GetPlace()); framework::LibraryType library_; if (use_cudnn) { library_ = framework::LibraryType::kCUDNN; diff --git a/paddle/operators/conv_transpose_op.cc b/paddle/operators/conv_transpose_op.cc index 4145638c74ecae11cd30d1cf57a6d76c0dc5992e..f71838c2aa71c56126eb819c6cedfcf8acd82569 100644 --- a/paddle/operators/conv_transpose_op.cc +++ b/paddle/operators/conv_transpose_op.cc @@ -264,9 +264,7 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const { framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { bool use_cudnn = ctx.Attr("use_cudnn"); - if (paddle::platform::is_cpu_place(ctx.GetPlace())) { - use_cudnn = false; - } + use_cudnn &= platform::is_gpu_place(ctx.GetPlace()); framework::LibraryType library_; if (use_cudnn) { library_ = framework::LibraryType::kCUDNN; diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index ebe7d9a0a581f187dcb7e37d2a2da1f26c8d72bb..a450279451980557a62359ae86c34362e8dd700c 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -89,9 +89,7 @@ void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const { framework::OpKernelType PoolOpGrad::GetExpectedKernelType( const framework::ExecutionContext &ctx) const { bool use_cudnn = ctx.Attr("use_cudnn"); - if (paddle::platform::is_cpu_place(ctx.GetPlace())) { - use_cudnn = false; - } + use_cudnn &= platform::is_gpu_place(ctx.GetPlace()); framework::LibraryType library_; if (use_cudnn) { library_ = framework::LibraryType::kCUDNN; diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index 0cfa011036378a8789676427c34839eb6f001003..251a1535d87b59001f7a089f41f7a0ce070d0c15 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -724,6 +724,8 @@ def conv2d(input, connected to the second half of the input channels. Default: groups=1 param_attr(ParamAttr): The parameters to the Conv2d Layer. Default: None bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None + use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True act(str): Activation type. Default: None Returns: @@ -1115,8 +1117,8 @@ def conv2d_transpose(input, contain two integers, (dilation_H, dilation_W). Otherwise, the dilation_H = dilation_W = dilation. param_attr: Parameter Attribute. - main_program(Program): the main program - startup_program(Program): the startup program + use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True Returns: Variable: Output image.