提交 ed7e74ab 编写于 作者: C chengduoZH

follow comments and refine python doc

上级 24f528a1
...@@ -284,9 +284,7 @@ void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const { ...@@ -284,9 +284,7 @@ void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const {
framework::OpKernelType ConvOpGrad::GetExpectedKernelType( framework::OpKernelType ConvOpGrad::GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
if (paddle::platform::is_cpu_place(ctx.GetPlace())) { use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
use_cudnn = false;
}
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
......
...@@ -264,9 +264,7 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const { ...@@ -264,9 +264,7 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const {
framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType( framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
if (paddle::platform::is_cpu_place(ctx.GetPlace())) { use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
use_cudnn = false;
}
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
......
...@@ -89,9 +89,7 @@ void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const { ...@@ -89,9 +89,7 @@ void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const {
framework::OpKernelType PoolOpGrad::GetExpectedKernelType( framework::OpKernelType PoolOpGrad::GetExpectedKernelType(
const framework::ExecutionContext &ctx) const { const framework::ExecutionContext &ctx) const {
bool use_cudnn = ctx.Attr<bool>("use_cudnn"); bool use_cudnn = ctx.Attr<bool>("use_cudnn");
if (paddle::platform::is_cpu_place(ctx.GetPlace())) { use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
use_cudnn = false;
}
framework::LibraryType library_; framework::LibraryType library_;
if (use_cudnn) { if (use_cudnn) {
library_ = framework::LibraryType::kCUDNN; library_ = framework::LibraryType::kCUDNN;
......
...@@ -724,6 +724,8 @@ def conv2d(input, ...@@ -724,6 +724,8 @@ def conv2d(input,
connected to the second half of the input channels. Default: groups=1 connected to the second half of the input channels. Default: groups=1
param_attr(ParamAttr): The parameters to the Conv2d Layer. Default: None param_attr(ParamAttr): The parameters to the Conv2d Layer. Default: None
bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None
use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act(str): Activation type. Default: None act(str): Activation type. Default: None
Returns: Returns:
...@@ -1115,8 +1117,8 @@ def conv2d_transpose(input, ...@@ -1115,8 +1117,8 @@ def conv2d_transpose(input,
contain two integers, (dilation_H, dilation_W). Otherwise, the contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. dilation_H = dilation_W = dilation.
param_attr: Parameter Attribute. param_attr: Parameter Attribute.
main_program(Program): the main program use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn
startup_program(Program): the startup program library is installed. Default: True
Returns: Returns:
Variable: Output image. Variable: Output image.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册