diff --git a/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc b/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc index c0ebf6de9de23bf7074bcdb5e6f669a059b4d720..407ef0958e1ef89862fa4200d3d4a3fa1d74f31f 100644 --- a/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc @@ -244,5 +244,5 @@ REGISTER_PASS_CAPABILITY(conv_eltwiseadd_affine_channel_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("affine_channel", 0)); diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc index 6f8591fd82543a28493b7273cb9845a2c50eb17d..a232f7ebb890a8c6af8346cddaca88d470b438e2 100644 --- a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc @@ -389,5 +389,5 @@ REGISTER_PASS_CAPABILITY(conv_eltwiseadd_bn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("batch_norm", 0)); diff --git a/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc index 545beb34e78df521b6469f952063f83c5ee52e33..e7656171700b4ff7dda665b985521902518d7720 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc @@ -122,6 +122,6 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add2_act_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("relu", 0) .EQ("identity", 0)); diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc index d01a2f2622347c37d889ed19ad78e5afbd60c007..24263e66320948fba134bee6d85967639042a27c 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc @@ -109,6 +109,6 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_act_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("relu", 0) .EQ("identity", 0)); diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc index e34a2d96581531001678de3dd4e326f70d8e035c..9121047d2fa53d2d8207359603eb9ef3e4a46322 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc @@ -95,4 +95,4 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); diff --git a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc index 02e3e2542f6e8dea47c53fd298c7ae7512a72c36..855ac2eb619b2c53ca679cf6388d9b8a236037f2 100644 --- a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc @@ -263,6 +263,6 @@ REGISTER_PASS_CAPABILITY(embedding_fc_lstm_fuse_pass) paddle::framework::compatible::OpVersionComparatorCombination() .EQ("lookup_table_v2", 0) .EQ("mul", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("lstm", 0) .EQ("fused_embedding_fc_lstm", 0)); diff --git a/paddle/fluid/framework/ir/fc_fuse_pass.cc b/paddle/fluid/framework/ir/fc_fuse_pass.cc index 0248aeedd0afeda155c19fb9ff59baf1d9197ea6..103fa0f5faf841c78c14dbb1679d7296e0913ba7 100644 --- a/paddle/fluid/framework/ir/fc_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_fuse_pass.cc @@ -187,6 +187,6 @@ REGISTER_PASS_CAPABILITY(fc_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("mul", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("relu", 0) .EQ("fc", 0)); diff --git a/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc b/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc index c4515bbc45538ca211382aa119bbec5721c56c5a..f0e1beeae85c8ee634c9dfdb0b9a29c8e17adaf7 100644 --- a/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc @@ -203,11 +203,11 @@ REGISTER_PASS_CAPABILITY(mul_gru_fuse_pass) paddle::framework::compatible::OpVersionComparatorCombination() .EQ("mul", 0) .EQ("gru", 0) - .EQ("fusion_gru", 0)); + .LE("fusion_gru", 1)); REGISTER_PASS_CAPABILITY(fc_gru_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("mul", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("gru", 0) - .EQ("fusion_gru", 0)); + .LE("fusion_gru", 1)); diff --git a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc index 9dca4d1b29f9f3ef51559383efa3e0a18965ef05..d515e5e4d95b5c1232399d28ad851c7e26b37501 100644 --- a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc @@ -202,7 +202,7 @@ REGISTER_PASS_CAPABILITY(fc_lstm_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("mul", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("lstm", 0) .EQ("fusion_lstm", 0)); REGISTER_PASS_CAPABILITY(mul_lstm_fuse_pass) diff --git a/paddle/fluid/framework/ir/map_matmul_to_mul_pass.cc b/paddle/fluid/framework/ir/map_matmul_to_mul_pass.cc index 76148a90074c1650946d02492b8664007fe7e6b3..8c4e6f330587773226caee1779fbf31eb80d3137 100644 --- a/paddle/fluid/framework/ir/map_matmul_to_mul_pass.cc +++ b/paddle/fluid/framework/ir/map_matmul_to_mul_pass.cc @@ -227,7 +227,7 @@ REGISTER_PASS(map_matmul_to_mul_pass, paddle::framework::ir::MapMatmul2MulPass); REGISTER_PASS_CAPABILITY(map_matmul_to_mul_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() - .EQ("matmul", 0) + .LE("matmul", 1) .EQ("mul", 0)); REGISTER_PASS(squeeze2_matmul_fuse_pass, @@ -235,7 +235,7 @@ REGISTER_PASS(squeeze2_matmul_fuse_pass, REGISTER_PASS_CAPABILITY(squeeze2_matmul_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() - .EQ("matmul", 0) + .LE("matmul", 1) .EQ("squeeze2", 0) .EQ("mul", 0)); @@ -244,6 +244,6 @@ REGISTER_PASS(reshape2_matmul_fuse_pass, REGISTER_PASS_CAPABILITY(reshape2_matmul_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() - .EQ("matmul", 0) + .LE("matmul", 1) .EQ("reshape2", 0) .EQ("mul", 0)); diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc index b0849d74b6153ff00689a86f9c2f1c58cbca62f3..10691ded668f8a2d05f75658f8734772d217b5ce 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc @@ -153,7 +153,7 @@ REGISTER_PASS_CAPABILITY(conv_bias_mkldnn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); REGISTER_PASS(conv_transpose_bias_mkldnn_fuse_pass, paddle::framework::ir::Conv2DTransposeBiasFusePass); @@ -161,7 +161,7 @@ REGISTER_PASS_CAPABILITY(conv_transpose_bias_mkldnn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d_transpose", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); REGISTER_PASS(conv3d_bias_mkldnn_fuse_pass, paddle::framework::ir::Conv3DBiasFusePass); diff --git a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc index a837b42b3ead48d8f852c09ed97dda1c7b0f08d2..fa1544f780ac1a549fa2119d552aa844345abfe7 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -228,20 +228,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsX( pattern->NewNode(elementwise_add_pattern.elementwise_add_y_repr())); conv_output->AsIntermediate(); - auto get_node_from_elementwise_add = - [&elementwise_add_pattern]( - const GraphPatternDetector::subgraph_t& subgraph) + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) -> std::tuple { - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, - elementwise_add_pattern); - - return std::make_tuple(elementwise_add_op, elementwise_add_y, - elementwise_add_out); - }; + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); + + return std::make_tuple(elementwise_add_op, elementwise_add_y, + elementwise_add_out); + }; return ExecuteHandleOnGraph( &gpd, graph_with_stats, @@ -266,20 +265,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsY( conv_output); conv_output->AsIntermediate(); - auto get_node_from_elementwise_add = - [&elementwise_add_pattern]( - const GraphPatternDetector::subgraph_t& subgraph) + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) -> std::tuple { - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, - elementwise_add_pattern); - - return std::make_tuple(elementwise_add_op, elementwise_add_x, - elementwise_add_out); - }; + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); + + return std::make_tuple(elementwise_add_op, elementwise_add_x, + elementwise_add_out); + }; return ExecuteHandleOnGraph( &gpd, graph_with_stats, @@ -306,17 +304,16 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseProjectionConv( conv_x_output->AsIntermediate(); conv_y_output->AsIntermediate(); - auto get_node_from_elementwise_add = - [&elementwise_add_pattern]( - const GraphPatternDetector::subgraph_t& subgraph) + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) -> std::tuple { - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, - elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); - return std::make_tuple(elementwise_add_op, elementwise_add_out); - }; + return std::make_tuple(elementwise_add_op, elementwise_add_out); + }; return ExecuteHandleOnGraph( &gpd, graph_with_stats, @@ -351,4 +348,4 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_mkldnn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); diff --git a/paddle/fluid/framework/ir/mkldnn/matmul_transpose_reshape_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/matmul_transpose_reshape_fuse_pass.cc index 41b859f0af665eae6d9ccb6a08cd29db5ce67fdf..fbc97a0a929c48c4eba3baa881061654dd802b62 100644 --- a/paddle/fluid/framework/ir/mkldnn/matmul_transpose_reshape_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/matmul_transpose_reshape_fuse_pass.cc @@ -103,6 +103,6 @@ REGISTER_PASS(matmul_transpose_reshape_fuse_pass, REGISTER_PASS_CAPABILITY(matmul_transpose_reshape_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() - .EQ("matmul", 0) + .LE("matmul", 1) .EQ("transpose", 0) .EQ("reshape", 0)); diff --git a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc b/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc index d655837f7433696203c04fc25a1553538c995292..d2763bd6a6dc0fe9fac9cdc6b8abdf7ee0853c4b 100644 --- a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc @@ -221,5 +221,5 @@ REGISTER_PASS_CAPABILITY(mkldnn_inplace_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("softmax", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("tanh", 0)); diff --git a/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc index 0784a1a024cfd31cfb2d2a3ea205518416c2ad13..a552e42619f368c2e8e2a51213ac10d9317151cf 100644 --- a/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc @@ -96,4 +96,4 @@ REGISTER_PASS_CAPABILITY(scale_matmul_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("scale", 0) - .EQ("matmul", 0)); + .LE("matmul", 1)); diff --git a/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc b/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc index cd6d1d57034d7ca5e849c98884c6435d6394eebd..224272a5a039fccd331ab050d25b8fa2d00bc6d9 100644 --- a/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc +++ b/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc @@ -716,9 +716,9 @@ REGISTER_PASS_CAPABILITY(multihead_matmul_fuse_pass_v2) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("mul", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("reshape2", 0) .EQ("transpose2", 0) .EQ("scale", 0) - .EQ("matmul", 0) + .LE("matmul", 1) .EQ("softmax", 0)); diff --git a/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc b/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc index 4101d593086cdbf8848034cd478e068c95d8f790..dfbf97c69b33d8fb55ec16cc0187dde71f38bbf1 100644 --- a/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc +++ b/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc @@ -262,7 +262,7 @@ REGISTER_PASS_CAPABILITY(seq_concat_fc_fuse_pass) .EQ("sequence_expand", 0) .EQ("concat", 0) .EQ("mul", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("sigmoid", 0) .EQ("tanh", 0) .EQ("relu", 0) diff --git a/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.cc b/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.cc index d9a1348e05a1291cb1bf8e398c187afaebecfca6..c2e18ca1efb01f461744fac26793014dda04909d 100644 --- a/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.cc +++ b/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.cc @@ -106,5 +106,5 @@ REGISTER_PASS_CAPABILITY(seqconv_eltadd_relu_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("sequence_conv", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("relu", 0)); diff --git a/paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc b/paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc index b708f2eff10e7506a08a7bfefc4bc84cd1b937cf..69bf3eda614ce7ca303cc42266f77d701105ac00 100644 --- a/paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc @@ -193,5 +193,5 @@ REGISTER_PASS(skip_layernorm_fuse_pass, REGISTER_PASS_CAPABILITY(skip_layernorm_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("layer_norm", 0)); diff --git a/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.cc b/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.cc index 542aadbe53d5e5da367fdd0433b9c1b570e52900..c0420e6b5f3c212721b278ce04bf7ece090a5cc5 100644 --- a/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.cc +++ b/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.cc @@ -389,10 +389,10 @@ REGISTER_PASS(squared_mat_sub_fuse_pass, REGISTER_PASS_CAPABILITY(squared_mat_sub_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() - .EQ("matmul", 0) + .LE("matmul", 1) .EQ("matmul_v2", 0) .EQ("square", 0) - .EQ("elementwise_mul", 0) - .EQ("elementwise_sub", 0) + .LE("elementwise_mul", 1) + .LE("elementwise_sub", 1) .EQ("fill_constant", 1) .EQ("fusion_squared_mat_sub", 0)); diff --git a/paddle/fluid/framework/ir/unsqueeze2_eltwise_fuse_pass.cc b/paddle/fluid/framework/ir/unsqueeze2_eltwise_fuse_pass.cc index f984744532fcc458d8fc08ab419bd3a675c43faa..d4d3c41e658a8a048947dd90e26a746d25675280 100644 --- a/paddle/fluid/framework/ir/unsqueeze2_eltwise_fuse_pass.cc +++ b/paddle/fluid/framework/ir/unsqueeze2_eltwise_fuse_pass.cc @@ -131,4 +131,4 @@ REGISTER_PASS_CAPABILITY(unsqueeze2_eltwise_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("unsqueeze2", 0) - .EQ("elementwise_mul", 0)); + .LE("elementwise_mul", 1)); diff --git a/paddle/fluid/framework/op_version_registry.cc b/paddle/fluid/framework/op_version_registry.cc index bab1f20079c5ba2b9d8686a8f9c536bc42e54883..bc9963b392574ddd574894b6eb7de3e67dc31ef5 100644 --- a/paddle/fluid/framework/op_version_registry.cc +++ b/paddle/fluid/framework/op_version_registry.cc @@ -18,29 +18,6 @@ namespace paddle { namespace framework { namespace compatible { -namespace { -template -OpUpdate* new_update(InfoType&& info) { - return new OpUpdate(info); -} -} - -OpVersionDesc&& OpVersionDesc::ModifyAttr(const std::string& name, - const std::string& remark, - const OpAttrVariantT& default_value) { - infos_.emplace_back(new_update( - OpAttrInfo(name, remark, default_value))); - return std::move(*this); -} - -OpVersionDesc&& OpVersionDesc::NewAttr(const std::string& name, - const std::string& remark, - const OpAttrVariantT& default_value) { - infos_.emplace_back(new_update( - OpAttrInfo(name, remark, default_value))); - return std::move(*this); -} - OpVersionDesc&& OpVersionDesc::NewInput(const std::string& name, const std::string& remark) { infos_.emplace_back( diff --git a/paddle/fluid/framework/op_version_registry.h b/paddle/fluid/framework/op_version_registry.h index d8321939f6c61d55c68055dc0d03fa0153489379..83557d5572cd38491c2cce3990338e87de0f92ea 100644 --- a/paddle/fluid/framework/op_version_registry.h +++ b/paddle/fluid/framework/op_version_registry.h @@ -118,13 +118,44 @@ class OpUpdate : public OpUpdateBase { OpUpdateType type_; }; +template +OpUpdate* new_update(InfoType&& info) { + return new OpUpdate(info); +} + +template +OpAttrVariantT op_attr_wrapper(const T& val) { + return OpAttrVariantT{val}; +} + +template +OpAttrVariantT op_attr_wrapper(const char (&val)[N]) { + PADDLE_ENFORCE_EQ( + val[N - 1], 0, + platform::errors::InvalidArgument( + "The argument of operator register %c is illegal.", val[N - 1])); + return OpAttrVariantT{std::string{val}}; +} + class OpVersionDesc { public: /* Compatibility upgrade */ + template OpVersionDesc&& ModifyAttr(const std::string& name, const std::string& remark, - const OpAttrVariantT& default_value); + const T& default_value) { + infos_.emplace_back(new_update( + OpAttrInfo(name, remark, op_attr_wrapper(default_value)))); + return std::move(*this); + } + + template OpVersionDesc&& NewAttr(const std::string& name, const std::string& remark, - const OpAttrVariantT& default_value); + const T& default_value) { + infos_.emplace_back(new_update( + OpAttrInfo(name, remark, op_attr_wrapper(default_value)))); + return std::move(*this); + } + OpVersionDesc&& NewInput(const std::string& name, const std::string& remark); OpVersionDesc&& NewOutput(const std::string& name, const std::string& remark); OpVersionDesc&& BugfixWithBehaviorChanged(const std::string& remark); diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc index 10204271c42d6dbf1a01adfdb8bc60f20ee2baf7..61117cc6032ba08d307bedf91eb2600b60cea483 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc @@ -390,8 +390,8 @@ REGISTER_PASS_CAPABILITY(tensorrt_subgraph_pass) .EQ("concat", 0) .EQ("tanh", 0) .EQ("pad", 0) - .EQ("elementwise_add", 0) - .EQ("elementwise_mul", 0) + .LE("elementwise_add", 1) + .LE("elementwise_mul", 1) .EQ("prelu", 0) .LE("conv2d_transpose", 1) .LE("leaky_relu", 1) @@ -399,8 +399,8 @@ REGISTER_PASS_CAPABILITY(tensorrt_subgraph_pass) .EQ("shuffle_channel", 0) .EQ("swish", 0) .EQ("split", 0) - .EQ("instance_norm", 0) + .LE("instance_norm", 1) .EQ("gelu", 0) .EQ("layer_norm", 0) .EQ("scale", 0) - .EQ("matmul", 0)); + .LE("matmul", 1)); diff --git a/paddle/fluid/operators/allclose_op.cc b/paddle/fluid/operators/allclose_op.cc index e452d3c21b8e0b892d528f5f830ce14e5debafa6..edd626449c6ea38bf4712087066f22d65090a7eb 100644 --- a/paddle/fluid/operators/allclose_op.cc +++ b/paddle/fluid/operators/allclose_op.cc @@ -160,12 +160,37 @@ REGISTER_OPERATOR( REGISTER_OP_CPU_KERNEL(allclose, ops::AllcloseKernel, ops::AllcloseKernel); +/* ========================== register checkpoint ===========================*/ REGISTER_OP_VERSION(allclose) .AddCheckpoint( - R"ROC( - Upgrade allclose add 2 attributes [atol, rtol]. - )ROC", + R"ROC(Upgrade allclose, add two new inputs [Rtol] and [Atol].)ROC", paddle::framework::compatible::OpVersionDesc() + .NewInput("Rtol", + "The added input 'Rtol' is not" + "dispensable.") + .NewInput("Atol", + "The added input 'Atol' is not" + "dispensable.")) + .AddCheckpoint( + R"ROC(Delete two float attributes [rtol] and [atol], + then add 2 string attributes [atol, rtol]. Don't be surprised. + This is because float cannot represent hight-precision + floating-point values, and our framework doesn't support + the use of double attributes. As a result, string instead + of double is used here to represent high-precision + floating-point values. + )ROC", + paddle::framework::compatible::OpVersionDesc() + .DeleteAttr("rtol", + "The attribute 'rtol' is deleted." + "The reason why it is deleted is that" + "attributes do not support a float64 value" + "and it is changed to a tensor.") + .DeleteAttr("atol", + "The attribute 'atol' is deleted." + "The reason why it is deleted is that" + "attributes do not support a float64 value" + "and it is changed to a tensor.") .NewAttr("rtol", "(string) The relative tolerance. Default: :math:`1e-5` .", std::string("1e-5")) diff --git a/paddle/fluid/operators/arg_max_op.cc b/paddle/fluid/operators/arg_max_op.cc index a82134921ef64f89151eb9c521ea3cbb6f83ee7b..0f5c048b6be9c73ae98181685269592f409196cd 100644 --- a/paddle/fluid/operators/arg_max_op.cc +++ b/paddle/fluid/operators/arg_max_op.cc @@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_max) false) .ModifyAttr( "dtype", - "change the default value of dtype, the older version " - "is -1, means return the int64 indices." - "The new version is 3, return the int64 indices directly." - "And supporting the dtype of -1 in new version.", + "Change the default value of dtype from -1 to 3" + ", means return the int64 indices directly. The rearse why " + "changing the default value is that the int64 value in " + "VarType is 3 in the frameworke.proto.", 3)); diff --git a/paddle/fluid/operators/arg_min_op.cc b/paddle/fluid/operators/arg_min_op.cc index 23ed7d727c536225a98a1ea9e6e3af723b4352c3..0a4ba6fb0bfdfccfc4eae99da730e96fe5f0a540 100644 --- a/paddle/fluid/operators/arg_min_op.cc +++ b/paddle/fluid/operators/arg_min_op.cc @@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_min) false) .ModifyAttr( "dtype", - "change the default value of dtype, the older version " - "is -1, means return the int64 indices." - "The new version is 3, return the int64 indices directly." - "And supporting the dtype of -1 in new version.", + "Change the default value of dtype from -1 to 3" + ", means return the int64 indices directly. The rearse why " + "changing the default value is that the int64 value in " + "VarType is 3 in the frameworke.proto.", 3)); diff --git a/paddle/fluid/operators/coalesce_tensor_op.cc b/paddle/fluid/operators/coalesce_tensor_op.cc index 1b133db73350c7aa2f88d94d07d33c5526d17cc3..464d8c8d56f5c425706c01ea01d14f7ac2aed0ab 100644 --- a/paddle/fluid/operators/coalesce_tensor_op.cc +++ b/paddle/fluid/operators/coalesce_tensor_op.cc @@ -15,6 +15,7 @@ #include #include #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/operators/math/math_function.h" @@ -297,3 +298,14 @@ REGISTER_OP_CUDA_KERNEL( ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel); #endif + +REGISTER_OP_VERSION(coalesce_tensor) + .AddCheckpoint( + R"ROC( + Upgrade coalesce_tensor: add a new attribute [use_align].)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "use_align", + "In order to optionally take memory alignment into account when " + "coalescing tensors. The default value is true to be compatible " + "with before.", + true)); diff --git a/paddle/fluid/operators/controlflow/compare_op.cc b/paddle/fluid/operators/controlflow/compare_op.cc index 21c28f9818b51e5b52ca2d89684f95d672a2d913..3cad86d96c26a0e25fcbaeb02405315895744e50 100644 --- a/paddle/fluid/operators/controlflow/compare_op.cc +++ b/paddle/fluid/operators/controlflow/compare_op.cc @@ -133,9 +133,9 @@ class CompareOp : public framework::OperatorWithKernel { REGISTER_OP_VERSION(op_type) \ .AddCheckpoint( \ R"ROC(Upgrade compare ops, add a new attribute [force_cpu])ROC", \ - paddle::framework::compatible::OpVersionDesc().NewAttr( \ + paddle::framework::compatible::OpVersionDesc().ModifyAttr( \ "force_cpu", \ - "In order to force fill output variable to cpu memory.", \ + "In order to force fill output variable to gpu memory.", \ false)); #define REGISTER_COMPARE_OP(op_type, _equation) \ diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index 6c4844855591911c025230822768d091826cb794..d6bdd848bad117d3b6315c6a4368c808ab735026 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -578,4 +578,37 @@ REGISTER_OP_VERSION(conv_transpose) "output_padding", "In order to add additional size to one side of each dimension " "in the output", - {})); + std::vector{})); + +REGISTER_OP_VERSION(conv2d_transpose) + .AddCheckpoint( + R"ROC( + Upgrade conv2d transpose to add a new attribute [output_padding]. + )ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "output_padding", + "In order to add additional size to one side of each dimension " + "in the output", + std::vector{})); + +REGISTER_OP_VERSION(conv3d_transpose) + .AddCheckpoint( + R"ROC( + Upgrade conv3d transpose to add a new attribute [output_padding]. + )ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "output_padding", + "In order to add additional size to one side of each dimension " + "in the output", + std::vector{})); + +REGISTER_OP_VERSION(depthwise_conv2d_transpose) + .AddCheckpoint( + R"ROC( + Upgrade depthwise conv2d transpose to add a new attribute [output_padding]. + )ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "output_padding", + "In order to add additional size to one side of each dimension " + "in the output", + std::vector{})); diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cc b/paddle/fluid/operators/detection/generate_proposals_op.cc index 2bf5e6c5e04da539622a072dae35948492a43243..805ab8aad031887ea9d3d0d269840810b716c9e9 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cc +++ b/paddle/fluid/operators/detection/generate_proposals_op.cc @@ -303,6 +303,13 @@ REGISTER_OPERATOR( REGISTER_OP_CPU_KERNEL(generate_proposals, ops::GenerateProposalsKernel, ops::GenerateProposalsKernel); REGISTER_OP_VERSION(generate_proposals) + .AddCheckpoint( + R"ROC( + Incompatible upgrade of output [RpnRoisLod])ROC", + paddle::framework::compatible::OpVersionDesc().DeleteOutput( + "RpnRoisLod", + "Delete RpnRoisLod due to incorrect output name and " + "it is not used in object detection models yet.")) .AddCheckpoint( R"ROC( Upgrade generate_proposals add a new output [RpnRoisNum])ROC", diff --git a/paddle/fluid/operators/elementwise/CMakeLists.txt b/paddle/fluid/operators/elementwise/CMakeLists.txt index 94886066ca59aad291c711a0fbb027854e908bf5..06ca98e526e95b414584f9634a3d42f84d6b369f 100644 --- a/paddle/fluid/operators/elementwise/CMakeLists.txt +++ b/paddle/fluid/operators/elementwise/CMakeLists.txt @@ -1,5 +1,9 @@ include(operators) -register_operators() +if(WITH_UNITY_BUILD) + # Load Unity Build rules for operators in paddle/fluid/operators/elementwise. + include(unity_build_rule.cmake) +endif() +register_operators(DEPS op_version_registry) cc_test(test_elementwise_add_op_inplace SRCS test_elementwise_add_op_inplace.cc DEPS op_registry elementwise_add_op scope device_context enforce executor) cc_test(test_elementwise_div_grad_grad SRCS test_elementwise_div_grad_grad.cc DEPS op_registry elementwise_div_op scope device_context enforce executor) diff --git a/paddle/fluid/operators/elementwise/elementwise_add_op.cc b/paddle/fluid/operators/elementwise/elementwise_add_op.cc index 9885e9c0954ea21668e964a20881248cf7ce589c..29aa5df27c28a70c72561da965a112d646b196fc 100644 --- a/paddle/fluid/operators/elementwise/elementwise_add_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_add_op.cc @@ -17,7 +17,6 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex64.h" @@ -178,3 +177,13 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseAddKernel); + +REGISTER_OP_VERSION(elementwise_add) + .AddCheckpoint( + R"ROC(Register elementwise_add for adding the attribute of + Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_add.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_div_op.cc b/paddle/fluid/operators/elementwise/elementwise_div_op.cc index f14aee8e49927e33a5e14327b7ef1716496f3c42..0252e6dfff5d755cdc9ded56df4dc77f1c542fc0 100644 --- a/paddle/fluid/operators/elementwise/elementwise_div_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_div_op.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_div_op.h" #include #include + #include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex64.h" @@ -162,3 +163,12 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseDivDoubleGradKernel); + +REGISTER_OP_VERSION(elementwise_div) + .AddCheckpoint( + R"ROC(Register elementwise_div for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_div.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc b/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc index ddd69203fd316b41cd4b2b3077df6f4ac12f1a64..b28f71325652617b0e1fcfea01b0a877c9a6cb98 100644 --- a/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc @@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseFloorDivKernel, ops::ElementwiseFloorDivKernel); + +REGISTER_OP_VERSION(elementwise_floordiv) + .AddCheckpoint( + R"ROC(Register elementwise_floordiv for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_floordiv.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_max_op.cc b/paddle/fluid/operators/elementwise/elementwise_max_op.cc index 38607d4558f90df3d0896a5352c9114348ee99c0..dde65c8199626bc7f9d72b44a0e46a7fa628ebef 100644 --- a/paddle/fluid/operators/elementwise/elementwise_max_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_max_op.cc @@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseMaxGradKernel, ops::ElementwiseMaxGradKernel, ops::ElementwiseMaxGradKernel); + +REGISTER_OP_VERSION(elementwise_max) + .AddCheckpoint( + R"ROC(Register elementwise_max for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_max.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_min_op.cc b/paddle/fluid/operators/elementwise/elementwise_min_op.cc index 8f544c786586a1ed7c9112a5e81b3270a9ebbab3..174684e3c8476f7d887688cfa38750e03b08a1c4 100644 --- a/paddle/fluid/operators/elementwise/elementwise_min_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_min_op.cc @@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseMinGradKernel, ops::ElementwiseMinGradKernel, ops::ElementwiseMinGradKernel); + +REGISTER_OP_VERSION(elementwise_min) + .AddCheckpoint( + R"ROC(Register elementwise_min for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_min.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_mod_op.cc b/paddle/fluid/operators/elementwise/elementwise_mod_op.cc index d8ad0a353c9cbebe7b40e70e5e2f293f38931376..2ac3aa6ebd3e33a432273796ac47d78f14e4065d 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mod_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_mod_op.cc @@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseModKernel, ops::ElementwiseModFPKernel, ops::ElementwiseModFPKernel); + +REGISTER_OP_VERSION(elementwise_mod) + .AddCheckpoint( + R"ROC(Register elementwise_mod for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_mod.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_op.cc b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc index 28b131e729ca5b9b427c74657e582723902b01a4..6bf296f0e0b57aaab6e16083a35eab5ec80613ef 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mul_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc @@ -161,3 +161,12 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseMulDoubleGradKernel); + +REGISTER_OP_VERSION(elementwise_mul) + .AddCheckpoint( + R"ROC(Register elementwise_mul for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_mul.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index f426a54f79491d442c16dc1401aca8e2532b94a2..be10376f6111579377586a04a2cd8212cdcbd2e3 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -22,6 +22,7 @@ limitations under the License. */ #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/common_infer_shape_functions.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" diff --git a/paddle/fluid/operators/elementwise/elementwise_pow_op.cc b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc index ea0e8e7c0138770fb45b12411f9acac9ce97777e..d564cc3717f5e8385c8dd5c8437fdc89627be4ef 100644 --- a/paddle/fluid/operators/elementwise/elementwise_pow_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc @@ -83,3 +83,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwisePowGradKernel, ops::ElementwisePowGradKernel, ops::ElementwisePowGradKernel); + +REGISTER_OP_VERSION(elementwise_pow) + .AddCheckpoint( + R"ROC(Register elementwise_pow for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_pow.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc index d72eacbfd44da0845d3552d3b3de600c286e57d3..80ce42109aedec1e291a24d1118d19f56feca1c5 100644 --- a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc @@ -156,3 +156,12 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseSubDoubleGradKernel); + +REGISTER_OP_VERSION(elementwise_sub) + .AddCheckpoint( + R"ROC(Register elementwise_sub for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_sub.", + 1.0f)); diff --git a/paddle/fluid/operators/fake_dequantize_op.cc b/paddle/fluid/operators/fake_dequantize_op.cc index 9b0328b0945ba9b57cb9ab27233656e3b0af4f5f..b70fe78e1a528279e4ba6369c04b5771dfa287a0 100644 --- a/paddle/fluid/operators/fake_dequantize_op.cc +++ b/paddle/fluid/operators/fake_dequantize_op.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/fake_dequantize_op.h" #include #include +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace operators { @@ -238,3 +239,10 @@ REGISTER_OPERATOR( REGISTER_OP_CPU_KERNEL(fake_channel_wise_dequantize_max_abs, ops::FakeChannelWiseDequantizeMaxAbsKernel, ops::FakeChannelWiseDequantizeMaxAbsKernel); + +REGISTER_OP_VERSION(fake_channel_wise_dequantize_max_abs) + .AddCheckpoint( + R"ROC(add new attributes [quant_axis] for applying per-channel " + "dequantization to conv2d_tranpose and mul ops.)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "quant_axis", "The axis for dequantization.", 0)); diff --git a/paddle/fluid/operators/fake_quantize_op.cc b/paddle/fluid/operators/fake_quantize_op.cc index 04fa8db9a5a6fa4b3b49b6432a564d472f130841..df4debb62033268d9cccdf908794d1c163469a03 100644 --- a/paddle/fluid/operators/fake_quantize_op.cc +++ b/paddle/fluid/operators/fake_quantize_op.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/clip_op.h" #include "paddle/fluid/platform/transform.h" @@ -805,3 +806,10 @@ REGISTER_OPERATOR(fake_channel_wise_quantize_dequantize_abs_max, REGISTER_OP_CPU_KERNEL( fake_channel_wise_quantize_dequantize_abs_max, ops::FakeChannelWiseQuantizeDequantizeAbsMaxKernel); + +REGISTER_OP_VERSION(fake_channel_wise_quantize_abs_max) + .AddCheckpoint( + R"ROC(add new attributes [quant_axis] for applying per-channel " + "quantization to conv2d_tranpose and mul ops.)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "quant_axis", "The axis for quantization.", 0)); diff --git a/paddle/fluid/operators/flip_op.cc b/paddle/fluid/operators/flip_op.cc index fc17657594b7a88d15bd2d9f184bc1bf71a71bc2..d7ed5fb767cd9ac9ee9e5024ae1f29c9f12abdcd 100644 --- a/paddle/fluid/operators/flip_op.cc +++ b/paddle/fluid/operators/flip_op.cc @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/flip_op.h" - #include #include #include +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace operators { @@ -154,3 +154,12 @@ REGISTER_OP_CPU_KERNEL( ops::FlipKernel, ops::FlipKernel, ops::FlipKernel); + +/* ========================== register checkpoint ===========================*/ +REGISTER_OP_VERSION(flip) + .AddCheckpoint( + R"ROC(Upgrade flip, add new attr [axis] and delete attr [dims].)ROC", + paddle::framework::compatible::OpVersionDesc() + .NewAttr("axis", "The added attr 'axis' doesn't set default value.", + boost::none) + .DeleteAttr("dims", "The attr 'dims' is deleted.")); diff --git a/paddle/fluid/operators/fused/fusion_gru_op.cc b/paddle/fluid/operators/fused/fusion_gru_op.cc index f5904039d4b6ef9794991687c535a0989864e9f6..71dccad0b581b0f7f043c989ca9e7854243590f7 100644 --- a/paddle/fluid/operators/fused/fusion_gru_op.cc +++ b/paddle/fluid/operators/fused/fusion_gru_op.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include // for memcpy #include #include +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/jit/kernels.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/fc.h" @@ -479,3 +480,13 @@ REGISTER_OPERATOR(fusion_gru, ops::FusionGRUOp, ops::FusionGRUOpMaker); REGISTER_OP_CPU_KERNEL(fusion_gru, ops::FusionGRUKernel, ops::FusionGRUKernel); + +/* ========================== register checkpoint ===========================*/ +REGISTER_OP_VERSION(fusion_gru) + .AddCheckpoint( + R"ROC(Upgrade fusion_gru add a new attribute [Scale_weights])ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_weights", + "The added attribute 'Scale_weights' is not yet " + "registered.", + std::vector{1.0f})); diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index 840975f754f5afca3ad76251ac65cef35714a1b8..3ae9f6bdae78cca4445fa186530f8b18a9b7ee6d 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -213,4 +213,4 @@ REGISTER_OP_VERSION(gaussian_random) .ModifyAttr( "shape", "Add the default value of shape, the default value is {}.", - {})); + std::vector{})); diff --git a/paddle/fluid/operators/grid_sampler_op.cc b/paddle/fluid/operators/grid_sampler_op.cc index 3d34a3d15c1ddd944dd205def278beeeef3efdeb..e357133be440d0d396a1cdc580127bf47dce6841 100644 --- a/paddle/fluid/operators/grid_sampler_op.cc +++ b/paddle/fluid/operators/grid_sampler_op.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/cudnn_helper.h" #endif @@ -237,3 +238,11 @@ REGISTER_OP_CPU_KERNEL( grid_sampler_grad, ops::GridSampleGradOpKernel, ops::GridSampleGradOpKernel); + +REGISTER_OP_VERSION(grid_sampler) + .AddCheckpoint( + R"ROC( + Upgrade grid_sampler add a new attribute [mode]. + )ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "mode", "In order to specify interpolation mode", "bilinear")); diff --git a/paddle/fluid/operators/instance_norm_op.cc b/paddle/fluid/operators/instance_norm_op.cc index 1018adcd930a4a81fbb46afb35c035f6b4c8df9b..28643ac1c0d832dd9550c036f0a08383b256b5f6 100644 --- a/paddle/fluid/operators/instance_norm_op.cc +++ b/paddle/fluid/operators/instance_norm_op.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/data_layout.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/math/math_function.h" namespace paddle { @@ -701,3 +702,20 @@ REGISTER_OP_CPU_KERNEL( float>, ops::InstanceNormDoubleGradKernel); + +REGISTER_OP_VERSION(instance_norm) + .AddCheckpoint( + R"ROC( + Change dispensable of attribute from False to True in instance_norm. + )ROC", + paddle::framework::compatible::OpVersionDesc() + .ModifyAttr( + "Bias", + "The arg 'dispensable' of Input 'Bias' is changed: from " + "'False' to 'True'.", + true) + .ModifyAttr( + "Scale", + "The arg 'dispensable' of Input 'Scale' is changed: from " + "'False' to 'True'.", + true)); diff --git a/paddle/fluid/operators/linspace_op.cc b/paddle/fluid/operators/linspace_op.cc index 7cc07383bfa5f67a2404b220cb481d9017b40fd8..fe271fa5e893a750bdbbdc05ac4b7835205ebe66 100644 --- a/paddle/fluid/operators/linspace_op.cc +++ b/paddle/fluid/operators/linspace_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/linspace_op.h" #include +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace operators { @@ -92,3 +93,11 @@ REGISTER_OP_CPU_KERNEL(linspace, ops::CPULinspaceKernel, ops::CPULinspaceKernel, ops::CPULinspaceKernel, ops::CPULinspaceKernel); + +REGISTER_OP_VERSION(linspace) + .AddCheckpoint( + R"ROC( + Upgrade linspace to add a new attribute [dtype]. + )ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "dtype", "In order to change output data type ", 5)); diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index d45669a9f075b5dfcbd9df27df9868758891ae4d..668445d2429e2977f26c569e01a50da66f136130 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/math/blas.h" #ifdef PADDLE_WITH_MKLDNN #include "paddle/fluid/platform/mkldnn_helper.h" @@ -932,3 +933,14 @@ REGISTER_OP_CUDA_KERNEL( ops::MatMulDoubleGradKernel, ops::MatMulDoubleGradKernel); #endif + +REGISTER_OP_VERSION(matmul) + .AddCheckpoint( + R"ROC(Register matmul for adding the attribute of + fused_reshape_Y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "fused_reshape_Y", + "In order to support the function of fused the input Y " + " and input X into the input X when " + "using the operator of matmul, and get raw shape of input Y.", + std::vector{})); diff --git a/paddle/fluid/operators/pixel_shuffle_op.cc b/paddle/fluid/operators/pixel_shuffle_op.cc index 111a82c6cce78b0b6fa462c332d9bd3dd6ce6e43..cb9bbe727de5c014ebfe9ea93f6fe279a897569b 100644 --- a/paddle/fluid/operators/pixel_shuffle_op.cc +++ b/paddle/fluid/operators/pixel_shuffle_op.cc @@ -11,6 +11,7 @@ limitations under the License. */ #include "paddle/fluid/operators/pixel_shuffle_op.h" #include +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace operators { @@ -185,3 +186,10 @@ REGISTER_OP_CPU_KERNEL( pixel_shuffle_grad, ops::PixelShuffleGradOpKernel, ops::PixelShuffleGradOpKernel); + +REGISTER_OP_VERSION(pixel_shuffle) + .AddCheckpoint( + R"ROC( + Compatible upgrade of pixel_shuffle, add a new attribute [data_format])ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "data_format", "Specify the data format of the input data", true)); diff --git a/paddle/fluid/operators/print_op.cc b/paddle/fluid/operators/print_op.cc index 80faf833be591d0f4e2e7ee365d0163869672c02..c558f1852f54c3b91cff024cf78eece491843907 100644 --- a/paddle/fluid/operators/print_op.cc +++ b/paddle/fluid/operators/print_op.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/tensor_formatter.h" namespace paddle { @@ -173,3 +174,11 @@ REGISTER_OPERATOR(print, ops::PrintOp, ops::PrintOpProtoAndCheckMaker, ops::PrintOpGradientMaker, ops::PrintOpGradientMaker, ops::PrintOpInferShape, ops::PrintOpVarTypeInference); + +REGISTER_OP_VERSION(print) + .AddCheckpoint( + R"ROC(Upgrade print add a new attribute [print_tensor_layout] to " + "contorl whether to print tensor's layout.)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "print_tensor_layout", "Whether to print the tensor's layout.", + true)); diff --git a/paddle/fluid/operators/rank_attention_op.cc b/paddle/fluid/operators/rank_attention_op.cc index 7c2b4a8b48927aed2c7f369eb902a20311ecff9e..e5332da6475d79be600d82b68604753f8ce32d5c 100644 --- a/paddle/fluid/operators/rank_attention_op.cc +++ b/paddle/fluid/operators/rank_attention_op.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include #include #include +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace operators { @@ -178,3 +179,18 @@ REGISTER_OP_CPU_KERNEL( rank_attention, ops::RankAttentionKernel, ops::RankAttentionKernel); + +REGISTER_OP_VERSION(rank_attention) + .AddCheckpoint( + R"ROC( + Upgrade rank_attention, add 1 outputs [InputHelp] and 1 attribute + [MaxSize]. + )ROC", + paddle::framework::compatible::OpVersionDesc() + .NewOutput("InputHelp", + "Output tensor of rank_attention_Op operator " + "in order to assist calculation in the reverse process.") + .NewAttr( + "MaxSize", + "Forward calculation to set the pre-applied video memory size", + 0)); diff --git a/paddle/fluid/operators/roi_align_op.cc b/paddle/fluid/operators/roi_align_op.cc index 0eeb7e0bb24f512aa6859e92de9f490e491543aa..6a4a88a004586daf83716b3e3c2cd3ea2b4fa376 100644 --- a/paddle/fluid/operators/roi_align_op.cc +++ b/paddle/fluid/operators/roi_align_op.cc @@ -233,6 +233,13 @@ REGISTER_OP_CPU_KERNEL( ops::CPUROIAlignGradOpKernel, ops::CPUROIAlignGradOpKernel); REGISTER_OP_VERSION(roi_align) + .AddCheckpoint( + R"ROC( + Incompatible upgrade of input [RpnRoisLod])ROC", + paddle::framework::compatible::OpVersionDesc().DeleteInput( + "RpnRoisLod", + "Delete RpnRoisLod due to incorrect input name and " + "it is not used in object detection models yet.")) .AddCheckpoint( R"ROC( Upgrade roi_align add a new input [RoisNum])ROC", diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index be3187b7513144f583458f3d7902a102e531a981..a512e7dcd682b517f64e3b14e2f35c4c539ec8b4 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -227,6 +227,13 @@ REGISTER_OP_CPU_KERNEL( ops::CPUROIPoolGradOpKernel, ops::CPUROIPoolGradOpKernel); REGISTER_OP_VERSION(roi_pool) + .AddCheckpoint( + R"ROC( + Incompatible upgrade of input [RpnRoisLod])ROC", + paddle::framework::compatible::OpVersionDesc().DeleteInput( + "RpnRoisLod", + "Delete RpnRoisLod due to incorrect input name and " + "it is not used in object detection models yet.")) .AddCheckpoint( R"ROC( Upgrade roi_pool add a new input [RoisNum])ROC", diff --git a/paddle/fluid/operators/trace_op.cc b/paddle/fluid/operators/trace_op.cc index 1b9e7c10eb27ae647864548baa80314f0ca1d5e4..623d4c7fc23ba2477d720c46697760efb1dd1429 100644 --- a/paddle/fluid/operators/trace_op.cc +++ b/paddle/fluid/operators/trace_op.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/fluid/operators/trace_op.h" +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace operators { @@ -89,13 +90,13 @@ class TraceOpMaker : public framework::OpProtoAndCheckerMaker { R"DOC((int, default 0), the first axis of the 2-D planes from which the diagonals should be taken. Can be either positive or negative. Default: 0. )DOC") - .SetDefault(-2); + .SetDefault(0); AddAttr( "axis2", R"DOC((int, default 1), the second axis of the 2-D planes from which the diagonals should be taken. Can be either positive or negative. Default: 1. )DOC") - .SetDefault(-1); + .SetDefault(1); AddComment(R"DOC( Trace Operator. Return the sum along diagonals of the input tensor. @@ -178,3 +179,21 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::TraceGradKernel); + +/* ========================== register checkpoint ===========================*/ +REGISTER_OP_VERSION(trace) + .AddCheckpoint( + R"ROC(Upgrade trace add a new attribute [axis2])ROC", + paddle::framework::compatible::OpVersionDesc() + .NewAttr("axis1", + "The added attribute 'axis1' is not yet registered.", + std::vector{0.0f}) + .NewAttr("axis2", + "The added attribute 'axis2' is not yet registered.", + std::vector{1.0f}) + .DeleteAttr("dim1", + "The attribute 'dim1' is not recommend according to " + "the specification 2.0.") + .DeleteAttr("dim2", + "The attribute 'dim2' is not recommend according to " + "the specification 2.0.")); diff --git a/paddle/fluid/operators/unique_op.cc b/paddle/fluid/operators/unique_op.cc index aed919e996161fd2800f67b1f51819fe233a4a63..82f894a3a3a3dc476c26c14406d162bb89c5ad04 100644 --- a/paddle/fluid/operators/unique_op.cc +++ b/paddle/fluid/operators/unique_op.cc @@ -184,7 +184,7 @@ REGISTER_OP_VERSION(unique) .NewAttr("axis", "The axis to apply unique. If None, the input will be " "flattened.", - {}) + std::vector{}) .NewAttr("is_sorted", "If True, the unique elements of X are in ascending order." "Otherwise, the unique elements are not sorted.",