diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index f004ea1c69e0c5ba69f26a1e3141e6e407fad4be..d2d57c81f0b41756a76c2b93f72afd3defb1abcc 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -192,7 +192,8 @@ framework::OpKernelType ConvTransposeOp::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { framework::LibraryType library_{framework::LibraryType::kPlain}; framework::DataLayout layout_ = framework::DataLayout::kAnyLayout; - bool use_cudnn = ctx.Attr("use_cudnn"); + bool use_cudnn = + ctx.HasAttr("use_cudnn") ? ctx.Attr("use_cudnn") : false; use_cudnn &= platform::is_gpu_place(ctx.GetPlace()); auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Input"); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) @@ -245,7 +246,8 @@ void Conv2DTransposeOpMaker::Make() { AddAttr("is_test", "(bool, default false) Set to true for inference only, false " "for training. Some layers may run faster when this is true.") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddInput("Input", "(Tensor) The input tensor of convolution transpose operator. " "The format of input tensor is NCHW or NHWC. Where N is batch size, " @@ -263,7 +265,8 @@ void Conv2DTransposeOpMaker::Make() { "(Tensor) Bias to be added to each output of filter application." "The format of output tensor is X (one-dimensional) of size equal" "to the number of output channels. Only used with MKL-DNN.") - .AsDispensable(); + .AsDispensable() + .AsExtra(); AddOutput("Output", "(Tensor) The output tensor of convolution transpose operator. " "The format of output tensor is the same as input tensor."); @@ -298,29 +301,37 @@ void Conv2DTransposeOpMaker::Make() { AddAttr( "use_cudnn", "(bool, default false) Only used in cudnn kernel, need install cudnn") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("use_mkldnn", "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("force_fp32_output", "(bool, default false) Force BF16 kernel output FP32, only " "used in MKL-DNN BF16") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr( "mkldnn_data_type", "(string, default \"float32\"). Data type of mkldnn kernel") .SetDefault("float32") - .InEnum({"float32", "bfloat16"}); + .InEnum({"float32", "bfloat16"}) + .AsExtra(); AddAttr("fuse_relu", "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("fuse_activation", "(string, default \"\") Only used in mkldnn kernel") - .SetDefault(""); + .SetDefault("") + .AsExtra(); AddAttr("fuse_alpha", "(float, default 0.0) Only used in mkldnn kernel") - .SetDefault(0.0f); + .SetDefault(0.0f) + .AsExtra(); AddAttr("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel") - .SetDefault(0.0f); + .SetDefault(0.0f) + .AsExtra(); AddAttr( "data_format", "(string, default NCHW) Only used in " @@ -340,7 +351,8 @@ void Conv2DTransposeOpMaker::Make() { "allocated/freed each time the operator runs, larger " "workspace size can increase performance but also requires " "better hardward. This size should be carefully set.") - .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()); + .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()) + .AsExtra(); AddComment(R"DOC( Convolution2D Transpose Operator. @@ -423,10 +435,12 @@ void Conv3DTransposeOpMaker::Make() { AddAttr( "use_cudnn", "(bool, default false) Only used in cudnn kernel, need install cudnn") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("use_mkldnn", "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr( "data_format", "(string, default NCHW) Only used in " @@ -446,7 +460,8 @@ void Conv3DTransposeOpMaker::Make() { "allocated/freed each time the operator runs, larger " "workspace size can increase performance but also requires " "better hardward. This size should be carefully set.") - .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()); + .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()) + .AsExtra(); AddComment(R"DOC( Convolution3D Transpose Operator. @@ -491,7 +506,8 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const { framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { - bool use_cudnn = ctx.Attr("use_cudnn"); + bool use_cudnn = + ctx.HasAttr("use_cudnn") ? ctx.Attr("use_cudnn") : false; use_cudnn &= platform::is_gpu_place(ctx.GetPlace()); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) if (platform::is_gpu_place(ctx.GetPlace())) { @@ -591,7 +607,8 @@ void ConvTransposeOpDoubleGrad::InferShape( framework::OpKernelType ConvTransposeOpDoubleGrad::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { - bool use_cudnn = ctx.Attr("use_cudnn"); + bool use_cudnn = + ctx.HasAttr("use_cudnn") ? ctx.Attr("use_cudnn") : false; use_cudnn &= platform::is_gpu_place(ctx.GetPlace()); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) if (platform::is_gpu_place(ctx.GetPlace())) {