From 93b53f866c97aa86c46c353855117b26cf6fc1e2 Mon Sep 17 00:00:00 2001 From: Wangzheee <634486483@qq.com> Date: Fri, 2 Jul 2021 13:56:38 +0800 Subject: [PATCH] [pass_enhance] depthwise_conv_bn_fuse_pas (#33896) --- .../fluid/framework/ir/conv_bn_fuse_pass.cc | 86 ++++++++- paddle/fluid/framework/ir/conv_bn_fuse_pass.h | 8 +- .../ir/fc_elementwise_layernorm_fuse_pass.cc | 2 +- .../ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc | 7 +- .../ir/quant_conv2d_dequant_fuse_pass.cc | 17 +- .../operators/compat/depthwise_conv2d.pbtxt | 177 ++++++++++++++++++ 6 files changed, 279 insertions(+), 18 deletions(-) create mode 100644 paddle/fluid/operators/compat/depthwise_conv2d.pbtxt diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc index 03a78ec3a21..c362eec34b0 100644 --- a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc @@ -149,17 +149,21 @@ ConvBNFusePass::ConvBNFusePass() { .IsTensor() .End() .AddInput("Bias") + .IsTensor() .IsOptional() .End() .AddInput("ResidualData") + .IsTensor() .IsOptional() .End() .AddOutput("Output") .IsTensor() .End() .AddAttr("strides") + .IsType>() .End() .AddAttr("paddings") + .IsType>() .End() .AddAttr("padding_algorithm") .IsOptional() @@ -169,6 +173,7 @@ ConvBNFusePass::ConvBNFusePass() { .IsNumGE(1) .End() .AddAttr("dilations") + .IsType>() .End() .AddAttr("data_format") .IsStringIn({"NCHW", "NHWC", "AnyLayout"}) @@ -205,6 +210,10 @@ ConvBNFusePass::ConvBNFusePass() { .AddOutput("Y") .IsTensor() .End() + .AddOutput("ReserveSpace") + .IsTensor() + .IsOptional() + .End() .AddAttr("epsilon") .IsNumLE(0.001f) .IsNumGE(0.0f) @@ -375,17 +384,21 @@ ConvEltwiseAddBNFusePass::ConvEltwiseAddBNFusePass() { .IsTensor() .End() .AddInput("Bias") + .IsTensor() .IsOptional() .End() .AddInput("ResidualData") + .IsTensor() .IsOptional() .End() .AddOutput("Output") .IsTensor() .End() .AddAttr("strides") + .IsType>() .End() .AddAttr("paddings") + .IsType>() .End() .AddAttr("padding_algorithm") .IsStringIn({"EXPLICIT", "SAME", "VALID"}) @@ -395,6 +408,7 @@ ConvEltwiseAddBNFusePass::ConvEltwiseAddBNFusePass() { .IsNumGE(1) .End() .AddAttr("dilations") + .IsType>() .End() .AddAttr("data_format") .IsStringIn({"NCHW", "NHWC", "AnyLayout"}) @@ -431,6 +445,10 @@ ConvEltwiseAddBNFusePass::ConvEltwiseAddBNFusePass() { .AddOutput("Y") .IsTensor() .End() + .AddOutput("ReserveSpace") + .IsTensor() + .IsOptional() + .End() .AddAttr("epsilon") .IsNumLE(0.001f) .IsNumGE(0.0f) @@ -575,31 +593,85 @@ ConvTransposeBNFusePass::ConvTransposeBNFusePass() { .IsTensor() .End() .AddInput("Bias") + .IsTensor() .IsOptional() .End() .AddOutput("Output") .IsTensor() .End() + .AddAttr("output_padding") + .IsType>() + .IsOptional() + .End() + .AddAttr("output_size") + .IsType>() + .IsOptional() + .End() + .AddAttr("groups") + .IsNumGE(1) + .End() + .AddAttr("dilations") + .IsType>() + .End() .AddAttr("strides") + .IsType>() .End() .AddAttr("paddings") + .IsType>() .End() .AddAttr("padding_algorithm") .IsStringIn({"EXPLICIT", "SAME", "VALID"}) + .End() + .AddAttr("data_format") + .IsStringIn({"NCHW", "NHWC", "AnyLayout"}) + .End(); +} + +ConvTransposeEltwiseAddBNFusePass::ConvTransposeEltwiseAddBNFusePass() { + AddOpCompat(OpCompat("conv2d_transpose")) + .AddInput("Input") + .IsTensor() + .End() + .AddInput("Filter") + .IsTensor() + .End() + .AddInput("Bias") + .IsTensor() + .IsOptional() + .End() + .AddOutput("Output") + .IsTensor() + .End() + .AddAttr("output_padding") + .IsType>() + .IsOptional() + .End() + .AddAttr("output_size") + .IsType>() .IsOptional() .End() .AddAttr("groups") .IsNumGE(1) .End() .AddAttr("dilations") + .IsType>() + .End() + .AddAttr("strides") + .IsType>() + .End() + .AddAttr("paddings") + .IsType>() + .End() + .AddAttr("padding_algorithm") + .IsStringIn({"EXPLICIT", "SAME", "VALID"}) .End() .AddAttr("data_format") .IsStringIn({"NCHW", "NHWC", "AnyLayout"}) .End(); } -ConvTransposeEltwiseAddBNFusePass::ConvTransposeEltwiseAddBNFusePass() { - AddOpCompat(OpCompat("conv2d_transpose")) +DepthwiseConvBNFusePass::DepthwiseConvBNFusePass() { + AddOpCompat(OpCompat("depthwise_conv2d")) .AddInput("Input") .IsTensor() .End() @@ -607,23 +679,31 @@ ConvTransposeEltwiseAddBNFusePass::ConvTransposeEltwiseAddBNFusePass() { .IsTensor() .End() .AddInput("Bias") + .IsTensor() + .IsOptional() + .End() + .AddInput("ResidualData") + .IsTensor() .IsOptional() .End() .AddOutput("Output") .IsTensor() .End() .AddAttr("strides") + .IsType>() .End() .AddAttr("paddings") + .IsType>() .End() .AddAttr("padding_algorithm") - .IsStringIn({"EXPLICIT", "SAME", "VALID"}) .IsOptional() + .IsStringIn({"EXPLICIT", "SAME", "VALID"}) .End() .AddAttr("groups") .IsNumGE(1) .End() .AddAttr("dilations") + .IsType>() .End() .AddAttr("data_format") .IsStringIn({"NCHW", "NHWC", "AnyLayout"}) diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass.h b/paddle/fluid/framework/ir/conv_bn_fuse_pass.h index c78dfc2a487..b976aab0eea 100644 --- a/paddle/fluid/framework/ir/conv_bn_fuse_pass.h +++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass.h @@ -17,8 +17,6 @@ #include #include "paddle/fluid/framework/ir/fuse_pass_base.h" -#include "paddle/fluid/framework/ir/graph.h" -#include "paddle/fluid/framework/ir/graph_pattern_detector.h" namespace paddle { namespace framework { @@ -27,12 +25,10 @@ namespace ir { /* * Fuse the Conv and BatchNorm to a ConvBNMKLDNNOp. */ -class Graph; class ConvBNFusePass : public FusePassBase { public: ConvBNFusePass(); - virtual ~ConvBNFusePass() {} virtual std::string conv_type() const { return "conv2d"; } protected: @@ -43,7 +39,6 @@ class ConvBNFusePass : public FusePassBase { class ConvEltwiseAddBNFusePass : public FusePassBase { public: ConvEltwiseAddBNFusePass(); - virtual ~ConvEltwiseAddBNFusePass() {} virtual std::string conv_type() const { return "conv2d"; } protected: @@ -54,19 +49,18 @@ class ConvEltwiseAddBNFusePass : public FusePassBase { class ConvTransposeBNFusePass : public ConvBNFusePass { public: ConvTransposeBNFusePass(); - virtual ~ConvTransposeBNFusePass() {} std::string conv_type() const { return "conv2d_transpose"; } }; class ConvTransposeEltwiseAddBNFusePass : public ConvEltwiseAddBNFusePass { public: ConvTransposeEltwiseAddBNFusePass(); - virtual ~ConvTransposeEltwiseAddBNFusePass() {} std::string conv_type() const { return "conv2d_transpose"; } }; class DepthwiseConvBNFusePass : public ConvBNFusePass { public: + DepthwiseConvBNFusePass(); std::string conv_type() const { return "depthwise_conv2d"; } }; diff --git a/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass.cc b/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass.cc index 6f7a52fce59..d3cf3319adf 100644 --- a/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass.cc @@ -196,7 +196,7 @@ FCElementwiseLayerNormFusePass::FCElementwiseLayerNormFusePass() { .IsTensor() .End() .AddAttr("axis") - .IsNumEQ(-1) + .IsIntIn({-1, 0}) .End(); } diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc index c03d6a582e4..efad207e172 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc @@ -84,15 +84,18 @@ Conv2DTransposeBiasFusePass::Conv2DTransposeBiasFusePass() { .End() .AddInput("Bias") .IsTensor() + .IsOptional() .End() .AddOutput("Output") .IsTensor() .End() .AddAttr("output_padding") .IsType>() + .IsOptional() .End() .AddAttr("output_size") - .IsNumGE(1) + .IsType>() + .IsOptional() .End() .AddAttr("groups") .IsNumGE(1) @@ -110,7 +113,7 @@ Conv2DTransposeBiasFusePass::Conv2DTransposeBiasFusePass() { .IsStringIn({"EXPLICIT", "SAME", "VALID"}) .End() .AddAttr("data_format") - .IsStringIn({"NCHW", "NHWC"}) + .IsStringIn({"NCHW", "NHWC", "AnyLayout"}) .End(); } diff --git a/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc b/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc index a092c894d9e..60675bf8488 100644 --- a/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc +++ b/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc @@ -200,14 +200,12 @@ QuantDequantFusePass::QuantDequantFusePass() { .AddOutput("Output") .IsTensor() .End() - .AddAttr("strides") + .AddAttr("output_padding") .IsType>() + .IsOptional() .End() - .AddAttr("paddings") + .AddAttr("output_size") .IsType>() - .End() - .AddAttr("padding_algorithm") - .IsStringIn({"EXPLICIT", "SAME", "VALID"}) .IsOptional() .End() .AddAttr("groups") @@ -216,6 +214,15 @@ QuantDequantFusePass::QuantDequantFusePass() { .AddAttr("dilations") .IsType>() .End() + .AddAttr("strides") + .IsType>() + .End() + .AddAttr("paddings") + .IsType>() + .End() + .AddAttr("padding_algorithm") + .IsStringIn({"EXPLICIT", "SAME", "VALID"}) + .End() .AddAttr("data_format") .IsStringIn({"NCHW", "NHWC", "AnyLayout"}) .End(); diff --git a/paddle/fluid/operators/compat/depthwise_conv2d.pbtxt b/paddle/fluid/operators/compat/depthwise_conv2d.pbtxt new file mode 100644 index 00000000000..901ed164608 --- /dev/null +++ b/paddle/fluid/operators/compat/depthwise_conv2d.pbtxt @@ -0,0 +1,177 @@ +type: "depthwise_conv2d" +def { + inputs { + name: "Input" + } + inputs { + name: "Filter" + } + inputs { + name: "Bias" + } + inputs { + name: "ResidualData" + } + outputs { + name: "Output" + } + attrs { + name: "strides" + type: INTS + } + attrs { + name: "paddings" + type: INTS + } + attrs { + name: "padding_algorithm" + type: STRING + } + attrs { + name: "groups" + type: INT + } + attrs { + name: "dilations" + type: INTS + } + attrs { + name: "data_format" + type: STRING + } +} +extra { + attrs { + name: "Input_scale" + type: FLOAT + } + attrs { + name: "quantization_type" + type: STRING + } + attrs { + name: "bit_length" + type: INT + } + attrs { + name: "out_threshold" + type: FLOAT + } + attrs { + name: "@ENABLE_CACHE_RUNTIME_CONTEXT@" + type: BOOLEAN + } + attrs { + name: "skip_quant" + type: BOOLEAN + } + attrs { + name: "is_test" + type: BOOLEAN + } + attrs { + name: "name" + type: STRING + } + attrs { + name: "use_cudnn" + type: BOOLEAN + } + attrs { + name: "fuse_relu_before_depthwise_conv" + type: BOOLEAN + } + attrs { + name: "use_mkldnn" + type: BOOLEAN + } + attrs { + name: "use_quantizer" + type: BOOLEAN + } + attrs { + name: "mkldnn_data_type" + type: STRING + } + attrs { + name: "fuse_relu" + type: BOOLEAN + } + attrs { + name: "fuse_brelu" + type: BOOLEAN + } + attrs { + name: "fuse_brelu_threshold" + type: FLOAT + } + attrs { + name: "fuse_activation" + type: STRING + } + attrs { + name: "fuse_alpha" + type: FLOAT + } + attrs { + name: "fuse_beta" + type: FLOAT + } + attrs { + name: "use_addto" + type: BOOLEAN + } + attrs { + name: "fuse_residual_connection" + type: BOOLEAN + } + attrs { + name: "Scale_in" + type: FLOAT + } + attrs { + name: "Scale_out" + type: FLOAT + } + attrs { + name: "Scale_in_eltwise" + type: FLOAT + } + attrs { + name: "Scale_weights" + type: FLOATS + } + attrs { + name: "force_fp32_output" + type: BOOLEAN + } + attrs { + name: "workspace_size_MB" + type: INT + } + attrs { + name: "exhaustive_search" + type: BOOLEAN + } + attrs { + name: "op_role" + type: INT + } + attrs { + name: "op_role_var" + type: STRINGS + } + attrs { + name: "op_namescope" + type: STRING + } + attrs { + name: "op_callstack" + type: STRINGS + } + attrs { + name: "op_device" + type: STRING + } +} + -- GitLab