From 2a149741446f40c54e0d315259dd0c2fc8cd232a Mon Sep 17 00:00:00 2001 From: zyfncg Date: Fri, 2 Sep 2022 15:11:31 +0800 Subject: [PATCH] Clear extra attributes of some Op in OpMaker (#45613) * remove extra attr of abs in opmaker * remove extra attrs of some op in opmaker * remove is_test of conv * fix attr getting of interpretercore * fix inplace_abn * fix bug * fix bug of create_op * refine code format --- .../new_executor/new_executor_defs.cc | 2 +- paddle/fluid/framework/op_registry.cc | 10 +- paddle/fluid/framework/operator.cc | 4 +- paddle/fluid/operators/abs_op.cc | 9 -- paddle/fluid/operators/addmm_op.cc | 4 - paddle/fluid/operators/affine_grid_op.cc | 5 - paddle/fluid/operators/angle_op.cc | 9 -- paddle/fluid/operators/batch_norm_op.cc | 8 -- paddle/fluid/operators/clip_op.cc | 10 -- paddle/fluid/operators/concat_op.cc | 17 ---- paddle/fluid/operators/conv_op.cc | 99 +++---------------- paddle/fluid/operators/conv_transpose_op.cc | 64 ------------ paddle/phi/api/yaml/api_compat.yaml | 89 +++++++++++++++-- 13 files changed, 104 insertions(+), 226 deletions(-) diff --git a/paddle/fluid/framework/new_executor/new_executor_defs.cc b/paddle/fluid/framework/new_executor/new_executor_defs.cc index 6492538c608..8ee7065368c 100644 --- a/paddle/fluid/framework/new_executor/new_executor_defs.cc +++ b/paddle/fluid/framework/new_executor/new_executor_defs.cc @@ -117,7 +117,7 @@ bool InterpretercoreInferShapeContext::HasOutputs(const std::string& name, } AttrReader InterpretercoreInferShapeContext::Attrs() const { - return AttrReader(op_.Attrs()); + return AttrReader(op_.Attrs(), op_.RuntimeAttrs()); } std::vector InterpretercoreInferShapeContext::Inputs( diff --git a/paddle/fluid/framework/op_registry.cc b/paddle/fluid/framework/op_registry.cc index a5ce5b70492..a60563620ad 100644 --- a/paddle/fluid/framework/op_registry.cc +++ b/paddle/fluid/framework/op_registry.cc @@ -65,7 +65,15 @@ std::unique_ptr OpRegistry::CreateOp( op_base = std::unique_ptr( info.Creator()(type, inputs, outputs, attrs)); } - op_base->SetRuntimeAttributeMap(runtime_attrs); + const auto& extra_attr_checkers = + operators::ExtraInfoUtils::Instance().GetExtraAttrsChecker(type); + if (!extra_attr_checkers.empty()) { + auto op_runtime_attr_map = runtime_attrs; + for (const auto& checker : extra_attr_checkers) { + checker(&op_runtime_attr_map, false); + } + op_base->SetRuntimeAttributeMap(op_runtime_attr_map); + } return op_base; } diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 04d51872852..00a49935004 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -767,7 +767,9 @@ class RuntimeInferShapeContext : public InferShapeContext { } } - AttrReader Attrs() const override { return AttrReader(op_.Attrs()); } + AttrReader Attrs() const override { + return AttrReader(op_.Attrs(), op_.RuntimeAttrs()); + } std::vector Inputs(const std::string& name) const override { return op_.Inputs(name); diff --git a/paddle/fluid/operators/abs_op.cc b/paddle/fluid/operators/abs_op.cc index f0adfb10a9f..465637f3ed6 100644 --- a/paddle/fluid/operators/abs_op.cc +++ b/paddle/fluid/operators/abs_op.cc @@ -54,15 +54,6 @@ class AbsOpMaker : public framework::OpProtoAndCheckerMaker { void Make() override { AddInput("X", "(Tensor), The input tensor of abs op."); AddOutput("Out", "(Tensor), The output tensor of abs op."); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); - AddAttr("use_cudnn", - "(bool, default false) Only used in cudnn kernel, need " - "install cudnn") - .SetDefault(false) - .AsExtra(); AddComment(R"DOC( Abs Operator. diff --git a/paddle/fluid/operators/addmm_op.cc b/paddle/fluid/operators/addmm_op.cc index c54e6924c1a..8642d572e2d 100644 --- a/paddle/fluid/operators/addmm_op.cc +++ b/paddle/fluid/operators/addmm_op.cc @@ -72,10 +72,6 @@ class AddMMOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "(Tensor), The first input tensor for mul."); AddInput("Y", "(Tensor), The second input tensor for mul."); AddOutput("Out", "(Tensor), The output tensor of addmm op."); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); AddAttr("Alpha", "coefficient of x*y.").SetDefault(1.0f); AddAttr("Beta", "coefficient of input.").SetDefault(1.0f); AddComment(R"DOC( diff --git a/paddle/fluid/operators/affine_grid_op.cc b/paddle/fluid/operators/affine_grid_op.cc index 6eecb5e6b3e..a459196b761 100644 --- a/paddle/fluid/operators/affine_grid_op.cc +++ b/paddle/fluid/operators/affine_grid_op.cc @@ -158,11 +158,6 @@ class AffineGridOpMaker : public framework::OpProtoAndCheckerMaker { "(Tensor) The shape of target image with format [N, C, H, W].") .AsDispensable(); AddOutput("Output", "(Tensor) Output Tensor with shape [N, H, W, 2]."); - AddAttr( - "use_cudnn", - "(bool, default false) Only used in cudnn kernel, need install cudnn") - .SetDefault(true) - .AsExtra(); AddAttr("align_corners", "(bool, default false) Whether to align the corners of input" "and output.") diff --git a/paddle/fluid/operators/angle_op.cc b/paddle/fluid/operators/angle_op.cc index e787feb7dab..5c18f4c6fcc 100644 --- a/paddle/fluid/operators/angle_op.cc +++ b/paddle/fluid/operators/angle_op.cc @@ -39,15 +39,6 @@ class AngleOpMaker : public framework::OpProtoAndCheckerMaker { void Make() override { AddInput("X", "(Tensor), The input tensor of angle op."); AddOutput("Out", "(Tensor), The output tensor of angle op."); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); - AddAttr("use_cudnn", - "(bool, default false) Only used in cudnn kernel, need " - "install cudnn") - .SetDefault(false) - .AsExtra(); AddComment(R"DOC( Angle Operator. diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index 9ad55c21f10..a4a3f3cd2b0 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -293,14 +293,6 @@ void BatchNormOpMaker::Make() { "NHWC kernel") .AsDispensable() .AsExtra(); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); - AddAttr("fuse_with_relu", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); AddAttr("use_global_stats", "(bool, default false) Whether to use global mean and " "variance. In inference or test mode, set use_global_stats " diff --git a/paddle/fluid/operators/clip_op.cc b/paddle/fluid/operators/clip_op.cc index ee632c77f19..7994dacf087 100644 --- a/paddle/fluid/operators/clip_op.cc +++ b/paddle/fluid/operators/clip_op.cc @@ -64,16 +64,6 @@ class ClipOpMaker : public framework::OpProtoAndCheckerMaker { "input(x)"); AddAttr("min", "float number, the minimum value to clip by."); AddAttr("max", "float number, the maximum value to clip by."); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); - AddAttr( - "mkldnn_data_type", - "(string, default \"float32\"). Data type of mkldnn kernel") - .SetDefault("float32") - .InEnum({"float32", "bfloat16"}) - .AsExtra(); AddComment(R"DOC( Clip Operator. diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index 75dbb9b0379..7c3a8103e1d 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -81,11 +81,6 @@ class ConcatOpMaker : public framework::OpProtoAndCheckerMaker { void Make() override { AddInput("X", "Input tensors of concat operator.").AsDuplicable(); AddOutput("Out", "Output tensor of concat operator."); - AddAttr( - "use_mkldnn", - "(bool, default false) Indicates if MKL-DNN kernel will be used") - .SetDefault(false) - .AsExtra(); AddAttr("axis", "The axis along which the input tensors will be concatenated." "The axis could also be negative numbers. Negative axis is " @@ -99,18 +94,6 @@ class ConcatOpMaker : public framework::OpProtoAndCheckerMaker { "It has higher priority than Attr(axis). " "The shape of AxisTensor must be [1].") .AsDispensable(); - AddAttr( - "use_quantizer", - "(bool, default false) " - "This parameter is no longer used. Use 'mkldnn_data_type' instead.") - .SetDefault(false) - .AsExtra(); - AddAttr( - "mkldnn_data_type", - "(string, default \"float32\"). Data type of mkldnn kernel") - .SetDefault("float32") - .InEnum({"float32", "int8", "bfloat16"}) - .AsExtra(); AddComment(R"DOC( Concat Operator. diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index ab222f3cb36..d9c1332191a 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -285,11 +285,6 @@ framework::OpKernelType ConvOp::GetKernelTypeForVar( } void Conv2DOpMaker::Make() { - AddAttr("is_test", - "(bool, default false) Set to true for inference only, false " - "for training. Some layers may run faster when this is true.") - .SetDefault(false) - .AsExtra(); AddInput("Input", "(Tensor) The input tensor of convolution operator. " "The format of input tensor is NCHW or NHWC, where N is batch size, " @@ -356,22 +351,6 @@ void Conv2DOpMaker::Make() { "the input will be transformed automatically. ") .SetDefault("NCHW"); // TODO(dzhwinter): need to registered layout transform function - AddAttr("workspace_size_MB", - "Only used in cudnn kernel. Need set use_cudnn to true." - "workspace size for cudnn, in MB, " - "workspace is a section of GPU memory which will be " - "allocated/freed each time the operator runs, larger " - "workspace size can increase performance but also requires " - "better hardware. This size should be chosen carefully.") - .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()) - .AsExtra(); - AddAttr("exhaustive_search", - "(bool, default false) cuDNN has many algorithm to calculation " - "convolution, whether enable exhaustive search " - "for cuDNN convolution or not, default is False.") - .SetDefault(false) - .AsExtra(); - AddComment(R"DOC( Convolution Operator. @@ -403,12 +382,18 @@ $$ Apply(); } +class DepthwiseConv2DOpMaker : public Conv2DOpMaker { + protected: + void Apply() override { + AddAttr( + "use_cudnn", + "(bool, default false) Only used in cudnn kernel, need install cudnn") + .SetDefault(false) + .AsExtra(); + } +}; + void Conv3DOpMaker::Make() { - AddAttr("is_test", - "(bool, default false) Set to true for inference only, false " - "for training. Some layers may run faster when this is true.") - .SetDefault(false) - .AsExtra(); AddInput( "Input", "(Tensor) The input tensor of convolution operator. " @@ -465,47 +450,6 @@ void Conv3DOpMaker::Make() { "dilations(d_dilation, h_dilation, w_dilation) of " "convolution operator.") .SetDefault({1, 1, 1}); - AddAttr( - "use_cudnn", - "(bool, default false) Only used in cudnn kernel, need install cudnn") - .SetDefault(false) - .AsExtra(); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); - AddAttr( - "mkldnn_data_type", - "(string, default \"float32\"). Data type of mkldnn kernel") - .SetDefault("float32") - .InEnum({"float32", "int8", "bfloat16"}) - .AsExtra(); - AddAttr("fuse_relu", "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); - AddAttr("fuse_activation", - "(string, default \"\") Only used in mkldnn kernel") - .SetDefault("") - .AsExtra(); - AddAttr("fuse_alpha", - "(float, default 0.0) Only used in mkldnn kernel") - .SetDefault(0.0f) - .AsExtra(); - AddAttr("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel") - .SetDefault(0.0f) - .AsExtra(); - AddAttr( - "use_addto", - "(bool, default false) If use addto strategy or not, only used in " - "cudnn kernel") - .SetDefault(false) - .AsExtra(); - AddAttr("fuse_residual_connection", - "(bool, default false) Only used in mkldnn kernel. Used " - "whenever convolution output is as an input to residual " - "connection.") - .SetDefault(false) - .AsExtra(); AddAttr( "data_format", "(string, default NCDHW) Only used in " @@ -513,25 +457,6 @@ void Conv3DOpMaker::Make() { "Defaults to \"NDHWC\". Specify the data format of the output data, " "the input will be transformed automatically. ") .SetDefault("NCDHW"); - AddAttr("force_fp32_output", - "(bool, default false) Only used in mkldnn INT8 kernel") - .SetDefault(false) - .AsExtra(); - // TODO(dzhwinter): need to registered layout transform function - AddAttr("workspace_size_MB", - "Only used in cudnn kernel. workspace size for cudnn, in MB, " - "workspace is a section of GPU memory which will be " - "allocated/freed each time the operator runs, larger " - "workspace size can increase performance but also requires " - "better hardware. This size should be chosen carefully.") - .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()) - .AsExtra(); - AddAttr("exhaustive_search", - "(bool, default false) cuDNN has many algorithm to calculation " - "convolution, whether enable exhaustive search " - "for cuDNN convolution or not, default is False.") - .SetDefault(false) - .AsExtra(); AddComment(R"DOC( Convolution3D Operator. @@ -811,7 +736,7 @@ REGISTER_OPERATOR(conv2d_grad_grad, ops::ConvOpDoubleGrad); // depthwise convolution op REGISTER_OPERATOR(depthwise_conv2d, ops::ConvOp, - ops::Conv2DOpMaker, + ops::DepthwiseConv2DOpMaker, ops::ConvOpInferVarType, ops::Conv2DGradMaker, ops::Conv2DGradMaker); diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index 3205d5b3538..56875c2d433 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -91,11 +91,6 @@ framework::OpKernelType ConvTransposeOp::GetKernelTypeForVar( } void Conv2DTransposeOpMaker::Make() { - AddAttr("is_test", - "(bool, default false) Set to true for inference only, false " - "for training. Some layers may run faster when this is true.") - .SetDefault(false) - .AsExtra(); AddInput("Input", "(Tensor) The input tensor of convolution transpose operator. " "The format of input tensor is NCHW or NHWC. Where N is batch size, " @@ -146,40 +141,6 @@ void Conv2DTransposeOpMaker::Make() { "(vector default:{0, 0}), the paddings(h_pad, w_pad) of convolution " "transpose operator.") .SetDefault({0, 0}); - AddAttr( - "use_cudnn", - "(bool, default false) Only used in cudnn kernel, need install cudnn") - .SetDefault(false) - .AsExtra(); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); - AddAttr("force_fp32_output", - "(bool, default false) Force BF16 kernel output FP32, only " - "used in MKL-DNN BF16") - .SetDefault(false) - .AsExtra(); - AddAttr( - "mkldnn_data_type", - "(string, default \"float32\"). Data type of mkldnn kernel") - .SetDefault("float32") - .InEnum({"float32", "bfloat16"}) - .AsExtra(); - AddAttr("fuse_relu", "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); - AddAttr("fuse_activation", - "(string, default \"\") Only used in mkldnn kernel") - .SetDefault("") - .AsExtra(); - AddAttr("fuse_alpha", - "(float, default 0.0) Only used in mkldnn kernel") - .SetDefault(0.0f) - .AsExtra(); - AddAttr("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel") - .SetDefault(0.0f) - .AsExtra(); AddAttr( "data_format", "(string, default NCHW) Only used in " @@ -193,14 +154,6 @@ void Conv2DTransposeOpMaker::Make() { "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. " "Set to \"SAME\" or \"VALID\" for algorithm of padding. ") .SetDefault("EXPLICIT"); - AddAttr("workspace_size_MB", - "Used in cudnn kernel only. workspace size for cudnn, in MB, " - "workspace is a section of GPU memory which will be " - "allocated/freed each time the operator runs, larger " - "workspace size can increase performance but also requires " - "better hardward. This size should be carefully set.") - .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()) - .AsExtra(); AddComment(R"DOC( Convolution2D Transpose Operator. @@ -280,15 +233,6 @@ void Conv3DTransposeOpMaker::Make() { "(int default:1), the groups number of the convolution3d " "transpose operator. ") .SetDefault(1); - AddAttr( - "use_cudnn", - "(bool, default false) Only used in cudnn kernel, need install cudnn") - .SetDefault(false) - .AsExtra(); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); AddAttr( "data_format", "(string, default NCHW) Only used in " @@ -302,14 +246,6 @@ void Conv3DTransposeOpMaker::Make() { "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. " "Set to \"SAME\" or \"VALID\" for algorithm of padding. ") .SetDefault("EXPLICIT"); - AddAttr("workspace_size_MB", - "Used in cudnn kernel only. workspace size for cudnn, in MB, " - "workspace is a section of GPU memory which will be " - "allocated/freed each time the operator runs, larger " - "workspace size can increase performance but also requires " - "better hardward. This size should be carefully set.") - .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()) - .AsExtra(); AddComment(R"DOC( Convolution3D Transpose Operator. diff --git a/paddle/phi/api/yaml/api_compat.yaml b/paddle/phi/api/yaml/api_compat.yaml index e2d714e3620..912d63a776b 100644 --- a/paddle/phi/api/yaml/api_compat.yaml +++ b/paddle/phi/api/yaml/api_compat.yaml @@ -1,7 +1,22 @@ -# - api : conv3d_transpose -# backward : conv3d_transpose_grad -# extra : -# attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()] +- api : abs + backward : abs_grad + extra : + attrs : [bool use_cudnn = false, bool use_mkldnn = false] + +- api : addmm + backward : addmm_grad + extra : + attrs : [bool use_mkldnn = false] + +- api : affine_grid + backward : affine_grid_grad + extra : + attrs : [bool use_cudnn = true] + +- api : angle + backward : angle_grad + extra : + attrs : [bool use_cudnn = false, bool use_mkldnn = false] - api : atan2 inputs : @@ -9,6 +24,11 @@ outputs : out : Out +- api : batch_norm + backward : batch_norm_grad + extra : + attrs : [bool use_mkldnn = false, bool fuse_with_relu = false] + - api : bernoulli inputs : x : X @@ -27,24 +47,55 @@ outputs : out : Out +- api : clip + backward : clip_grad + extra : + attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] + +- api : concat + backward : concat_grad + extra : + attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"] + - api : conv2d backward : conv2d_grad extra : - attrs : [bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false, + attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false, str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false, bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f, float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false, - int workspace_size_MB = 512, bool exhaustive_search = false] + int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] - api : conv2d_fusion extra : - attrs : [bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false, + attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false, str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false, bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f, float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false, - int workspace_size_MB = 512, bool exhaustive_search = false] + int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] + +- api : conv2d_transpose + backward : conv2d_transpose_grad + extra : + attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false, + str mkldnn_data_type = "float32", bool fuse_relu = false, + str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, + int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()] + +- api : conv3d + backward : conv3d_grad + extra : + attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false, + str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, + bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false, + int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] + +- api : conv3d_transpose + backward : conv3d_transpose_grad + extra : + attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()] - api : cross inputs : @@ -57,12 +108,20 @@ - api : depthwise_conv2d backward : depthwise_conv2d_grad extra : - attrs : [bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false, + attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false, str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false, bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f, float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false, - int workspace_size_MB = 512, bool exhaustive_search = false] + int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] + +- api : depthwise_conv2d_transpose + backward : depthwise_conv2d_transpose_grad + extra : + attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false, + str mkldnn_data_type = "float32", bool fuse_relu = false, + str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, + int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()] - api : diag op_name : diag_v2 @@ -108,6 +167,11 @@ outputs : out : Out +- api : inplace_abn + backward : inplace_abn_grad + extra : + attrs : [bool use_mkldnn = false, bool fuse_with_relu = false] + - api : lgamma inputs : x : X @@ -132,6 +196,11 @@ outputs : out : Out +- api : sync_batch_norm + backward : sync_batch_norm_grad + extra : + attrs : [bool use_mkldnn = false, bool fuse_with_relu = false] + - api : trace inputs : x : Input -- GitLab