diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc index b0849d74b6153ff00689a86f9c2f1c58cbca62f3..10691ded668f8a2d05f75658f8734772d217b5ce 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc @@ -153,7 +153,7 @@ REGISTER_PASS_CAPABILITY(conv_bias_mkldnn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); REGISTER_PASS(conv_transpose_bias_mkldnn_fuse_pass, paddle::framework::ir::Conv2DTransposeBiasFusePass); @@ -161,7 +161,7 @@ REGISTER_PASS_CAPABILITY(conv_transpose_bias_mkldnn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d_transpose", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); REGISTER_PASS(conv3d_bias_mkldnn_fuse_pass, paddle::framework::ir::Conv3DBiasFusePass); diff --git a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc index a837b42b3ead48d8f852c09ed97dda1c7b0f08d2..fa1544f780ac1a549fa2119d552aa844345abfe7 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -228,20 +228,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsX( pattern->NewNode(elementwise_add_pattern.elementwise_add_y_repr())); conv_output->AsIntermediate(); - auto get_node_from_elementwise_add = - [&elementwise_add_pattern]( - const GraphPatternDetector::subgraph_t& subgraph) + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) -> std::tuple { - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, - elementwise_add_pattern); - - return std::make_tuple(elementwise_add_op, elementwise_add_y, - elementwise_add_out); - }; + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); + + return std::make_tuple(elementwise_add_op, elementwise_add_y, + elementwise_add_out); + }; return ExecuteHandleOnGraph( &gpd, graph_with_stats, @@ -266,20 +265,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsY( conv_output); conv_output->AsIntermediate(); - auto get_node_from_elementwise_add = - [&elementwise_add_pattern]( - const GraphPatternDetector::subgraph_t& subgraph) + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) -> std::tuple { - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, - elementwise_add_pattern); - - return std::make_tuple(elementwise_add_op, elementwise_add_x, - elementwise_add_out); - }; + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); + + return std::make_tuple(elementwise_add_op, elementwise_add_x, + elementwise_add_out); + }; return ExecuteHandleOnGraph( &gpd, graph_with_stats, @@ -306,17 +304,16 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseProjectionConv( conv_x_output->AsIntermediate(); conv_y_output->AsIntermediate(); - auto get_node_from_elementwise_add = - [&elementwise_add_pattern]( - const GraphPatternDetector::subgraph_t& subgraph) + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) -> std::tuple { - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, - elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); - return std::make_tuple(elementwise_add_op, elementwise_add_out); - }; + return std::make_tuple(elementwise_add_op, elementwise_add_out); + }; return ExecuteHandleOnGraph( &gpd, graph_with_stats, @@ -351,4 +348,4 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_mkldnn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); diff --git a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc b/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc index d655837f7433696203c04fc25a1553538c995292..d2763bd6a6dc0fe9fac9cdc6b8abdf7ee0853c4b 100644 --- a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc @@ -221,5 +221,5 @@ REGISTER_PASS_CAPABILITY(mkldnn_inplace_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("softmax", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("tanh", 0)); diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc index 158c834c256f59bbb89c2938b268d077181285eb..a67908fe7f22f2e20c445d363499b3ab7d2af4bd 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc @@ -383,8 +383,8 @@ REGISTER_PASS_CAPABILITY(tensorrt_subgraph_pass) .EQ("concat", 0) .EQ("tanh", 0) .EQ("pad", 0) - .EQ("elementwise_add", 0) - .EQ("elementwise_mul", 0) + .LE("elementwise_add", 1) + .LE("elementwise_mul", 1) .EQ("prelu", 0) .LE("conv2d_transpose", 1) .LE("leaky_relu", 1) diff --git a/paddle/fluid/operators/arg_max_op.cc b/paddle/fluid/operators/arg_max_op.cc index a82134921ef64f89151eb9c521ea3cbb6f83ee7b..0f5c048b6be9c73ae98181685269592f409196cd 100644 --- a/paddle/fluid/operators/arg_max_op.cc +++ b/paddle/fluid/operators/arg_max_op.cc @@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_max) false) .ModifyAttr( "dtype", - "change the default value of dtype, the older version " - "is -1, means return the int64 indices." - "The new version is 3, return the int64 indices directly." - "And supporting the dtype of -1 in new version.", + "Change the default value of dtype from -1 to 3" + ", means return the int64 indices directly. The rearse why " + "changing the default value is that the int64 value in " + "VarType is 3 in the frameworke.proto.", 3)); diff --git a/paddle/fluid/operators/arg_min_op.cc b/paddle/fluid/operators/arg_min_op.cc index 23ed7d727c536225a98a1ea9e6e3af723b4352c3..0a4ba6fb0bfdfccfc4eae99da730e96fe5f0a540 100644 --- a/paddle/fluid/operators/arg_min_op.cc +++ b/paddle/fluid/operators/arg_min_op.cc @@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_min) false) .ModifyAttr( "dtype", - "change the default value of dtype, the older version " - "is -1, means return the int64 indices." - "The new version is 3, return the int64 indices directly." - "And supporting the dtype of -1 in new version.", + "Change the default value of dtype from -1 to 3" + ", means return the int64 indices directly. The rearse why " + "changing the default value is that the int64 value in " + "VarType is 3 in the frameworke.proto.", 3)); diff --git a/paddle/fluid/operators/elementwise/CMakeLists.txt b/paddle/fluid/operators/elementwise/CMakeLists.txt index 96125e455665a837ec91f8e01a85272364fdb638..06ca98e526e95b414584f9634a3d42f84d6b369f 100644 --- a/paddle/fluid/operators/elementwise/CMakeLists.txt +++ b/paddle/fluid/operators/elementwise/CMakeLists.txt @@ -3,7 +3,7 @@ if(WITH_UNITY_BUILD) # Load Unity Build rules for operators in paddle/fluid/operators/elementwise. include(unity_build_rule.cmake) endif() -register_operators() +register_operators(DEPS op_version_registry) cc_test(test_elementwise_add_op_inplace SRCS test_elementwise_add_op_inplace.cc DEPS op_registry elementwise_add_op scope device_context enforce executor) cc_test(test_elementwise_div_grad_grad SRCS test_elementwise_div_grad_grad.cc DEPS op_registry elementwise_div_op scope device_context enforce executor) diff --git a/paddle/fluid/operators/elementwise/elementwise_add_op.cc b/paddle/fluid/operators/elementwise/elementwise_add_op.cc index 9885e9c0954ea21668e964a20881248cf7ce589c..29aa5df27c28a70c72561da965a112d646b196fc 100644 --- a/paddle/fluid/operators/elementwise/elementwise_add_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_add_op.cc @@ -17,7 +17,6 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex64.h" @@ -178,3 +177,13 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseAddKernel); + +REGISTER_OP_VERSION(elementwise_add) + .AddCheckpoint( + R"ROC(Register elementwise_add for adding the attribute of + Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_add.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_div_op.cc b/paddle/fluid/operators/elementwise/elementwise_div_op.cc index f14aee8e49927e33a5e14327b7ef1716496f3c42..0252e6dfff5d755cdc9ded56df4dc77f1c542fc0 100644 --- a/paddle/fluid/operators/elementwise/elementwise_div_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_div_op.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_div_op.h" #include #include + #include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex64.h" @@ -162,3 +163,12 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseDivDoubleGradKernel); + +REGISTER_OP_VERSION(elementwise_div) + .AddCheckpoint( + R"ROC(Register elementwise_div for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_div.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc b/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc index ddd69203fd316b41cd4b2b3077df6f4ac12f1a64..b28f71325652617b0e1fcfea01b0a877c9a6cb98 100644 --- a/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc @@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseFloorDivKernel, ops::ElementwiseFloorDivKernel); + +REGISTER_OP_VERSION(elementwise_floordiv) + .AddCheckpoint( + R"ROC(Register elementwise_floordiv for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_floordiv.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_max_op.cc b/paddle/fluid/operators/elementwise/elementwise_max_op.cc index 38607d4558f90df3d0896a5352c9114348ee99c0..dde65c8199626bc7f9d72b44a0e46a7fa628ebef 100644 --- a/paddle/fluid/operators/elementwise/elementwise_max_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_max_op.cc @@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseMaxGradKernel, ops::ElementwiseMaxGradKernel, ops::ElementwiseMaxGradKernel); + +REGISTER_OP_VERSION(elementwise_max) + .AddCheckpoint( + R"ROC(Register elementwise_max for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_max.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_min_op.cc b/paddle/fluid/operators/elementwise/elementwise_min_op.cc index 8f544c786586a1ed7c9112a5e81b3270a9ebbab3..174684e3c8476f7d887688cfa38750e03b08a1c4 100644 --- a/paddle/fluid/operators/elementwise/elementwise_min_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_min_op.cc @@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseMinGradKernel, ops::ElementwiseMinGradKernel, ops::ElementwiseMinGradKernel); + +REGISTER_OP_VERSION(elementwise_min) + .AddCheckpoint( + R"ROC(Register elementwise_min for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_min.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_mod_op.cc b/paddle/fluid/operators/elementwise/elementwise_mod_op.cc index d8ad0a353c9cbebe7b40e70e5e2f293f38931376..2ac3aa6ebd3e33a432273796ac47d78f14e4065d 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mod_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_mod_op.cc @@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseModKernel, ops::ElementwiseModFPKernel, ops::ElementwiseModFPKernel); + +REGISTER_OP_VERSION(elementwise_mod) + .AddCheckpoint( + R"ROC(Register elementwise_mod for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_mod.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_op.cc b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc index 28b131e729ca5b9b427c74657e582723902b01a4..6bf296f0e0b57aaab6e16083a35eab5ec80613ef 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mul_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc @@ -161,3 +161,12 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseMulDoubleGradKernel); + +REGISTER_OP_VERSION(elementwise_mul) + .AddCheckpoint( + R"ROC(Register elementwise_mul for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_mul.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index d799abf92d997b830f788f0f6c6b62a7c4d146be..7f692d61649f873aa6e5e4337cb4db8b30fbb451 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -22,6 +22,7 @@ limitations under the License. */ #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/common_infer_shape_functions.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" diff --git a/paddle/fluid/operators/elementwise/elementwise_pow_op.cc b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc index ea0e8e7c0138770fb45b12411f9acac9ce97777e..d564cc3717f5e8385c8dd5c8437fdc89627be4ef 100644 --- a/paddle/fluid/operators/elementwise/elementwise_pow_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc @@ -83,3 +83,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwisePowGradKernel, ops::ElementwisePowGradKernel, ops::ElementwisePowGradKernel); + +REGISTER_OP_VERSION(elementwise_pow) + .AddCheckpoint( + R"ROC(Register elementwise_pow for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_pow.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc index d72eacbfd44da0845d3552d3b3de600c286e57d3..80ce42109aedec1e291a24d1118d19f56feca1c5 100644 --- a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc @@ -156,3 +156,12 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseSubDoubleGradKernel); + +REGISTER_OP_VERSION(elementwise_sub) + .AddCheckpoint( + R"ROC(Register elementwise_sub for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_sub.", + 1.0f));