提交 251c6032 编写于 作者: C chengduoZH

set use_cudnn as default

上级 79aa5122
...@@ -70,7 +70,9 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -70,7 +70,9 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const {
framework::OpKernelType ConvOp::GetExpectedKernelType( framework::OpKernelType ConvOp::GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
use_cudnn &= platform::dynload::HasCUDNN(); if (paddle::platform::is_cpu_place(ctx.GetPlace())) {
use_cudnn = false;
}
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
...@@ -284,7 +286,9 @@ void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const { ...@@ -284,7 +286,9 @@ void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const {
framework::OpKernelType ConvOpGrad::GetExpectedKernelType( framework::OpKernelType ConvOpGrad::GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
use_cudnn &= platform::dynload::HasCUDNN(); if (paddle::platform::is_cpu_place(ctx.GetPlace())) {
use_cudnn = false;
}
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
......
...@@ -19,7 +19,6 @@ limitations under the License. */ ...@@ -19,7 +19,6 @@ limitations under the License. */
#include "paddle/operators/math/im2col.h" #include "paddle/operators/math/im2col.h"
#include "paddle/operators/math/math_function.h" #include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/vol2col.h" #include "paddle/operators/math/vol2col.h"
#include "paddle/platform/dynload/cudnn.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -61,7 +61,9 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -61,7 +61,9 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const {
framework::OpKernelType ConvTransposeOp::GetExpectedKernelType( framework::OpKernelType ConvTransposeOp::GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
use_cudnn &= platform::dynload::HasCUDNN(); if (paddle::platform::is_cpu_place(ctx.GetPlace())) {
use_cudnn = false;
}
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
...@@ -264,7 +266,9 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const { ...@@ -264,7 +266,9 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const {
framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType( framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
use_cudnn &= platform::dynload::HasCUDNN(); if (paddle::platform::is_cpu_place(ctx.GetPlace())) {
use_cudnn = false;
}
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
......
...@@ -19,7 +19,6 @@ limitations under the License. */ ...@@ -19,7 +19,6 @@ limitations under the License. */
#include "paddle/operators/math/im2col.h" #include "paddle/operators/math/im2col.h"
#include "paddle/operators/math/math_function.h" #include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/vol2col.h" #include "paddle/operators/math/vol2col.h"
#include "paddle/platform/dynload/cudnn.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -64,7 +64,9 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { ...@@ -64,7 +64,9 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const {
framework::OpKernelType PoolOp::GetExpectedKernelType( framework::OpKernelType PoolOp::GetExpectedKernelType(
const framework::ExecutionContext &ctx) const { const framework::ExecutionContext &ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
use_cudnn &= platform::dynload::HasCUDNN(); if (paddle::platform::is_cpu_place(ctx.GetPlace())) {
use_cudnn = false;
}
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
...@@ -89,7 +91,9 @@ void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const { ...@@ -89,7 +91,9 @@ void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const {
framework::OpKernelType PoolOpGrad::GetExpectedKernelType( framework::OpKernelType PoolOpGrad::GetExpectedKernelType(
const framework::ExecutionContext &ctx) const { const framework::ExecutionContext &ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
use_cudnn &= platform::dynload::HasCUDNN(); if (paddle::platform::is_cpu_place(ctx.GetPlace())) {
use_cudnn = false;
}
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
......
...@@ -18,7 +18,6 @@ limitations under the License. */ ...@@ -18,7 +18,6 @@ limitations under the License. */
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/math/math_function.h" #include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/pooling.h" #include "paddle/operators/math/pooling.h"
#include "paddle/platform/dynload/cudnn.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -57,10 +57,6 @@ void EnforceCUDNNLoaded(const char* fn_name) { ...@@ -57,10 +57,6 @@ void EnforceCUDNNLoaded(const char* fn_name) {
bool HasCUDNN() { return true; } bool HasCUDNN() { return true; }
#endif #endif
#ifndef PADDLE_WITH_CUDA
bool HasCUDNN() { return false; }
#endif
} // namespace dynload } // namespace dynload
} // namespace platform } // namespace platform
} // namespace paddle } // namespace paddle
...@@ -660,7 +660,7 @@ def conv2d(input, ...@@ -660,7 +660,7 @@ def conv2d(input,
groups=None, groups=None,
param_attr=None, param_attr=None,
bias_attr=None, bias_attr=None,
use_cudnn=False, use_cudnn=True,
act=None): act=None):
""" """
**Convlution2D Layer** **Convlution2D Layer**
...@@ -938,7 +938,7 @@ def pool2d(input, ...@@ -938,7 +938,7 @@ def pool2d(input,
pool_stride=None, pool_stride=None,
pool_padding=None, pool_padding=None,
global_pooling=False, global_pooling=False,
use_cudnn=False): use_cudnn=True):
""" """
This function adds the operator for pooling in 2 dimensions, using the This function adds the operator for pooling in 2 dimensions, using the
pooling configurations mentioned in input parameters. pooling configurations mentioned in input parameters.
...@@ -1088,7 +1088,7 @@ def conv2d_transpose(input, ...@@ -1088,7 +1088,7 @@ def conv2d_transpose(input,
stride=None, stride=None,
dilation=None, dilation=None,
param_attr=None, param_attr=None,
use_cudnn=False): use_cudnn=True):
""" """
The transpose of conv2d layer. The transpose of conv2d layer.
......
...@@ -14,7 +14,7 @@ def simple_img_conv_pool(input, ...@@ -14,7 +14,7 @@ def simple_img_conv_pool(input,
act, act,
param_attr=None, param_attr=None,
pool_type='max', pool_type='max',
use_cudnn=False): use_cudnn=True):
conv_out = layers.conv2d( conv_out = layers.conv2d(
input=input, input=input,
num_filters=num_filters, num_filters=num_filters,
...@@ -41,10 +41,10 @@ def img_conv_group(input, ...@@ -41,10 +41,10 @@ def img_conv_group(input,
param_attr=None, param_attr=None,
conv_with_batchnorm=False, conv_with_batchnorm=False,
conv_batchnorm_drop_rate=None, conv_batchnorm_drop_rate=None,
conv_use_cudnn=False, conv_use_cudnn=True,
pool_stride=1, pool_stride=1,
pool_type=None, pool_type=None,
pool_use_cudnn=False): pool_use_cudnn=True):
""" """
Image Convolution Group, Used for vgg net. Image Convolution Group, Used for vgg net.
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册