From b33aaea86ccaf6f41eca018377e36ed91b64dd27 Mon Sep 17 00:00:00 2001 From: wawltor Date: Wed, 30 Dec 2020 16:58:25 +0800 Subject: [PATCH] add the op version check for the elementwise ops, test=op_version (#30010) * add the op version check for the elementwise ops, test=op_version * add the support check for elementwise_ops, test=op_version --- .../ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc | 4 +- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 69 +++++++++---------- .../ir/mkldnn/mkldnn_inplace_pass.cc | 2 +- .../ir_passes/tensorrt_subgraph_pass.cc | 4 +- paddle/fluid/operators/arg_max_op.cc | 8 +-- paddle/fluid/operators/arg_min_op.cc | 8 +-- .../operators/elementwise/CMakeLists.txt | 2 +- .../elementwise/elementwise_add_op.cc | 11 ++- .../elementwise/elementwise_div_op.cc | 10 +++ .../elementwise/elementwise_floordiv_op.cc | 9 +++ .../elementwise/elementwise_max_op.cc | 9 +++ .../elementwise/elementwise_min_op.cc | 9 +++ .../elementwise/elementwise_mod_op.cc | 9 +++ .../elementwise/elementwise_mul_op.cc | 9 +++ .../operators/elementwise/elementwise_op.h | 1 + .../elementwise/elementwise_pow_op.cc | 9 +++ .../elementwise/elementwise_sub_op.cc | 9 +++ 17 files changed, 131 insertions(+), 51 deletions(-) diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc index b0849d74b61..10691ded668 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc @@ -153,7 +153,7 @@ REGISTER_PASS_CAPABILITY(conv_bias_mkldnn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); REGISTER_PASS(conv_transpose_bias_mkldnn_fuse_pass, paddle::framework::ir::Conv2DTransposeBiasFusePass); @@ -161,7 +161,7 @@ REGISTER_PASS_CAPABILITY(conv_transpose_bias_mkldnn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d_transpose", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); REGISTER_PASS(conv3d_bias_mkldnn_fuse_pass, paddle::framework::ir::Conv3DBiasFusePass); diff --git a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc index a837b42b3ea..fa1544f780a 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -228,20 +228,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsX( pattern->NewNode(elementwise_add_pattern.elementwise_add_y_repr())); conv_output->AsIntermediate(); - auto get_node_from_elementwise_add = - [&elementwise_add_pattern]( - const GraphPatternDetector::subgraph_t& subgraph) + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) -> std::tuple { - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, - elementwise_add_pattern); - - return std::make_tuple(elementwise_add_op, elementwise_add_y, - elementwise_add_out); - }; + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); + + return std::make_tuple(elementwise_add_op, elementwise_add_y, + elementwise_add_out); + }; return ExecuteHandleOnGraph( &gpd, graph_with_stats, @@ -266,20 +265,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsY( conv_output); conv_output->AsIntermediate(); - auto get_node_from_elementwise_add = - [&elementwise_add_pattern]( - const GraphPatternDetector::subgraph_t& subgraph) + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) -> std::tuple { - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, - elementwise_add_pattern); - - return std::make_tuple(elementwise_add_op, elementwise_add_x, - elementwise_add_out); - }; + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); + + return std::make_tuple(elementwise_add_op, elementwise_add_x, + elementwise_add_out); + }; return ExecuteHandleOnGraph( &gpd, graph_with_stats, @@ -306,17 +304,16 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseProjectionConv( conv_x_output->AsIntermediate(); conv_y_output->AsIntermediate(); - auto get_node_from_elementwise_add = - [&elementwise_add_pattern]( - const GraphPatternDetector::subgraph_t& subgraph) + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) -> std::tuple { - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, - elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); - return std::make_tuple(elementwise_add_op, elementwise_add_out); - }; + return std::make_tuple(elementwise_add_op, elementwise_add_out); + }; return ExecuteHandleOnGraph( &gpd, graph_with_stats, @@ -351,4 +348,4 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_mkldnn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); diff --git a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc b/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc index d655837f743..d2763bd6a6d 100644 --- a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc @@ -221,5 +221,5 @@ REGISTER_PASS_CAPABILITY(mkldnn_inplace_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("softmax", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("tanh", 0)); diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc index 158c834c256..a67908fe7f2 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc @@ -383,8 +383,8 @@ REGISTER_PASS_CAPABILITY(tensorrt_subgraph_pass) .EQ("concat", 0) .EQ("tanh", 0) .EQ("pad", 0) - .EQ("elementwise_add", 0) - .EQ("elementwise_mul", 0) + .LE("elementwise_add", 1) + .LE("elementwise_mul", 1) .EQ("prelu", 0) .LE("conv2d_transpose", 1) .LE("leaky_relu", 1) diff --git a/paddle/fluid/operators/arg_max_op.cc b/paddle/fluid/operators/arg_max_op.cc index a82134921ef..0f5c048b6be 100644 --- a/paddle/fluid/operators/arg_max_op.cc +++ b/paddle/fluid/operators/arg_max_op.cc @@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_max) false) .ModifyAttr( "dtype", - "change the default value of dtype, the older version " - "is -1, means return the int64 indices." - "The new version is 3, return the int64 indices directly." - "And supporting the dtype of -1 in new version.", + "Change the default value of dtype from -1 to 3" + ", means return the int64 indices directly. The rearse why " + "changing the default value is that the int64 value in " + "VarType is 3 in the frameworke.proto.", 3)); diff --git a/paddle/fluid/operators/arg_min_op.cc b/paddle/fluid/operators/arg_min_op.cc index 23ed7d727c5..0a4ba6fb0bf 100644 --- a/paddle/fluid/operators/arg_min_op.cc +++ b/paddle/fluid/operators/arg_min_op.cc @@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_min) false) .ModifyAttr( "dtype", - "change the default value of dtype, the older version " - "is -1, means return the int64 indices." - "The new version is 3, return the int64 indices directly." - "And supporting the dtype of -1 in new version.", + "Change the default value of dtype from -1 to 3" + ", means return the int64 indices directly. The rearse why " + "changing the default value is that the int64 value in " + "VarType is 3 in the frameworke.proto.", 3)); diff --git a/paddle/fluid/operators/elementwise/CMakeLists.txt b/paddle/fluid/operators/elementwise/CMakeLists.txt index 96125e45566..06ca98e526e 100644 --- a/paddle/fluid/operators/elementwise/CMakeLists.txt +++ b/paddle/fluid/operators/elementwise/CMakeLists.txt @@ -3,7 +3,7 @@ if(WITH_UNITY_BUILD) # Load Unity Build rules for operators in paddle/fluid/operators/elementwise. include(unity_build_rule.cmake) endif() -register_operators() +register_operators(DEPS op_version_registry) cc_test(test_elementwise_add_op_inplace SRCS test_elementwise_add_op_inplace.cc DEPS op_registry elementwise_add_op scope device_context enforce executor) cc_test(test_elementwise_div_grad_grad SRCS test_elementwise_div_grad_grad.cc DEPS op_registry elementwise_div_op scope device_context enforce executor) diff --git a/paddle/fluid/operators/elementwise/elementwise_add_op.cc b/paddle/fluid/operators/elementwise/elementwise_add_op.cc index 9885e9c0954..29aa5df27c2 100644 --- a/paddle/fluid/operators/elementwise/elementwise_add_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_add_op.cc @@ -17,7 +17,6 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex64.h" @@ -178,3 +177,13 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseAddKernel); + +REGISTER_OP_VERSION(elementwise_add) + .AddCheckpoint( + R"ROC(Register elementwise_add for adding the attribute of + Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_add.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_div_op.cc b/paddle/fluid/operators/elementwise/elementwise_div_op.cc index f14aee8e499..0252e6dfff5 100644 --- a/paddle/fluid/operators/elementwise/elementwise_div_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_div_op.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_div_op.h" #include #include + #include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex64.h" @@ -162,3 +163,12 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseDivDoubleGradKernel); + +REGISTER_OP_VERSION(elementwise_div) + .AddCheckpoint( + R"ROC(Register elementwise_div for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_div.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc b/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc index ddd69203fd3..b28f7132565 100644 --- a/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc @@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseFloorDivKernel, ops::ElementwiseFloorDivKernel); + +REGISTER_OP_VERSION(elementwise_floordiv) + .AddCheckpoint( + R"ROC(Register elementwise_floordiv for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_floordiv.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_max_op.cc b/paddle/fluid/operators/elementwise/elementwise_max_op.cc index 38607d4558f..dde65c81996 100644 --- a/paddle/fluid/operators/elementwise/elementwise_max_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_max_op.cc @@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseMaxGradKernel, ops::ElementwiseMaxGradKernel, ops::ElementwiseMaxGradKernel); + +REGISTER_OP_VERSION(elementwise_max) + .AddCheckpoint( + R"ROC(Register elementwise_max for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_max.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_min_op.cc b/paddle/fluid/operators/elementwise/elementwise_min_op.cc index 8f544c78658..174684e3c84 100644 --- a/paddle/fluid/operators/elementwise/elementwise_min_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_min_op.cc @@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseMinGradKernel, ops::ElementwiseMinGradKernel, ops::ElementwiseMinGradKernel); + +REGISTER_OP_VERSION(elementwise_min) + .AddCheckpoint( + R"ROC(Register elementwise_min for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_min.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_mod_op.cc b/paddle/fluid/operators/elementwise/elementwise_mod_op.cc index d8ad0a353c9..2ac3aa6ebd3 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mod_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_mod_op.cc @@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseModKernel, ops::ElementwiseModFPKernel, ops::ElementwiseModFPKernel); + +REGISTER_OP_VERSION(elementwise_mod) + .AddCheckpoint( + R"ROC(Register elementwise_mod for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_mod.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_op.cc b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc index 28b131e729c..6bf296f0e0b 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mul_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc @@ -161,3 +161,12 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseMulDoubleGradKernel); + +REGISTER_OP_VERSION(elementwise_mul) + .AddCheckpoint( + R"ROC(Register elementwise_mul for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_mul.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index d799abf92d9..7f692d61649 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -22,6 +22,7 @@ limitations under the License. */ #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/common_infer_shape_functions.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" diff --git a/paddle/fluid/operators/elementwise/elementwise_pow_op.cc b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc index ea0e8e7c013..d564cc3717f 100644 --- a/paddle/fluid/operators/elementwise/elementwise_pow_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc @@ -83,3 +83,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwisePowGradKernel, ops::ElementwisePowGradKernel, ops::ElementwisePowGradKernel); + +REGISTER_OP_VERSION(elementwise_pow) + .AddCheckpoint( + R"ROC(Register elementwise_pow for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_pow.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc index d72eacbfd44..80ce42109ae 100644 --- a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc @@ -156,3 +156,12 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseSubDoubleGradKernel); + +REGISTER_OP_VERSION(elementwise_sub) + .AddCheckpoint( + R"ROC(Register elementwise_sub for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_sub.", + 1.0f)); -- GitLab