From 5eab1a3879dd257b2d650c5464e719e9a0570ed6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=9F=B3=E6=99=93=E4=BC=9F?= <39303645+Shixiaowei02@users.noreply.github.com> Date: Wed, 13 Jan 2021 11:19:18 +0800 Subject: [PATCH] git cherry-pick the commits of operator version registries, test=release/2.0 (#30292) * Register op version for grid_sampler, test=op_version (#29916) * add op version for fake_quant and fake_dequant ops, test=op_version (#29923) * Register op version for print, test=op_version (#29945) * add gru op_register_version; test=op_version; (#29931) * Register op version for coalesce_tensor. (#29940) * register op version for conv2d_transpose, conv3d_transpose and depthwise_conv2d_transpose, test=op_version (#29937) * add op_register_version for allclose op; test=op_version (#29968) * register ModifyAttr for instance_norm, test=op_version (#29938) * add op_version for flip op [test=op_version] (#30019) * add the op version check for the elementwise ops, test=op_version (#30010) * add the support the op version check for matmul, test=op_version (#30011) * Revert "register ModifyAttr for instance_norm, test=op_version (#29938)" * add REGISTER_OP_VERSION for generate_proposals, roi_align, roi_pool test=op_version (#30034) * Fix rank_attention op_version, test=op_version (#30006) * fix rank_attention, test=op_version * Register op version for linspace,test=op_version (#30025) * fix op_register_version for compare ops, test=op_version (#30007) Co-authored-by: zhoushunjie * register ModifyAttr for instance_norm, test=op_version (#30065) * register instance norm, test=op_version * add trace op_register_version and fix version bug; test=op_version (#30000) * fix a bug in op_version_registry, test=develop, test=op_version (#29994) * Add version checking, test=op_version (#30129) * fix a bug in gaussian_random_op version, test=release/2.0 Co-authored-by: LielinJiang <50691816+LielinJiang@users.noreply.github.com> Co-authored-by: cc <52520497+juncaipeng@users.noreply.github.com> Co-authored-by: Qi Li Co-authored-by: Jack Zhou Co-authored-by: Guo Sheng Co-authored-by: wangxinxin08 <69842442+wangxinxin08@users.noreply.github.com> Co-authored-by: wawltor Co-authored-by: FlyingQianMM <245467267@qq.com> Co-authored-by: ceci3 Co-authored-by: hutuxian Co-authored-by: chalsliu <45041955+chalsliu@users.noreply.github.com> Co-authored-by: wangguanzhong Co-authored-by: ShenLiang Co-authored-by: yinhaofeng <66763551+yinhaofeng@users.noreply.github.com> Co-authored-by: channings Co-authored-by: chentianyu03 Co-authored-by: ruri --- .../ir/conv_affine_channel_fuse_pass.cc | 2 +- .../fluid/framework/ir/conv_bn_fuse_pass.cc | 2 +- .../ir/conv_elementwise_add2_act_fuse_pass.cc | 2 +- .../ir/conv_elementwise_add_act_fuse_pass.cc | 2 +- .../ir/conv_elementwise_add_fuse_pass.cc | 2 +- .../ir/embedding_fc_lstm_fuse_pass.cc | 2 +- paddle/fluid/framework/ir/fc_fuse_pass.cc | 2 +- paddle/fluid/framework/ir/fc_gru_fuse_pass.cc | 6 +- .../fluid/framework/ir/fc_lstm_fuse_pass.cc | 2 +- .../framework/ir/map_matmul_to_mul_pass.cc | 6 +- .../ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc | 4 +- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 69 +++++++++---------- .../matmul_transpose_reshape_fuse_pass.cc | 2 +- .../ir/mkldnn/mkldnn_inplace_pass.cc | 2 +- .../ir/mkldnn/scale_matmul_fuse_pass.cc | 2 +- .../ir/multihead_matmul_fuse_pass.cc | 4 +- .../framework/ir/seq_concat_fc_fuse_pass.cc | 2 +- .../ir/seqconv_eltadd_relu_fuse_pass.cc | 2 +- .../framework/ir/skip_layernorm_fuse_pass.cc | 2 +- .../framework/ir/squared_mat_sub_fuse_pass.cc | 6 +- .../ir/unsqueeze2_eltwise_fuse_pass.cc | 2 +- paddle/fluid/framework/op_version_registry.cc | 23 ------- paddle/fluid/framework/op_version_registry.h | 35 +++++++++- .../ir_passes/tensorrt_subgraph_pass.cc | 8 +-- paddle/fluid/operators/allclose_op.cc | 31 ++++++++- paddle/fluid/operators/arg_max_op.cc | 8 +-- paddle/fluid/operators/arg_min_op.cc | 8 +-- paddle/fluid/operators/coalesce_tensor_op.cc | 12 ++++ .../fluid/operators/controlflow/compare_op.cc | 4 +- paddle/fluid/operators/conv_transpose_op.cc | 35 +++++++++- .../detection/generate_proposals_op.cc | 7 ++ .../operators/elementwise/CMakeLists.txt | 6 +- .../elementwise/elementwise_add_op.cc | 11 ++- .../elementwise/elementwise_div_op.cc | 10 +++ .../elementwise/elementwise_floordiv_op.cc | 9 +++ .../elementwise/elementwise_max_op.cc | 9 +++ .../elementwise/elementwise_min_op.cc | 9 +++ .../elementwise/elementwise_mod_op.cc | 9 +++ .../elementwise/elementwise_mul_op.cc | 9 +++ .../operators/elementwise/elementwise_op.h | 1 + .../elementwise/elementwise_pow_op.cc | 9 +++ .../elementwise/elementwise_sub_op.cc | 9 +++ paddle/fluid/operators/fake_dequantize_op.cc | 8 +++ paddle/fluid/operators/fake_quantize_op.cc | 8 +++ paddle/fluid/operators/flip_op.cc | 11 ++- paddle/fluid/operators/fused/fusion_gru_op.cc | 11 +++ paddle/fluid/operators/gaussian_random_op.cc | 2 +- paddle/fluid/operators/grid_sampler_op.cc | 9 +++ paddle/fluid/operators/instance_norm_op.cc | 18 +++++ paddle/fluid/operators/linspace_op.cc | 9 +++ paddle/fluid/operators/matmul_op.cc | 12 ++++ paddle/fluid/operators/pixel_shuffle_op.cc | 8 +++ paddle/fluid/operators/print_op.cc | 9 +++ paddle/fluid/operators/rank_attention_op.cc | 16 +++++ paddle/fluid/operators/roi_align_op.cc | 7 ++ paddle/fluid/operators/roi_pool_op.cc | 7 ++ paddle/fluid/operators/trace_op.cc | 23 ++++++- paddle/fluid/operators/unique_op.cc | 2 +- 58 files changed, 433 insertions(+), 114 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc b/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc index c0ebf6de9d..407ef0958e 100644 --- a/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc @@ -244,5 +244,5 @@ REGISTER_PASS_CAPABILITY(conv_eltwiseadd_affine_channel_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("affine_channel", 0)); diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc index 6f8591fd82..a232f7ebb8 100644 --- a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc @@ -389,5 +389,5 @@ REGISTER_PASS_CAPABILITY(conv_eltwiseadd_bn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("batch_norm", 0)); diff --git a/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc index 545beb34e7..e765617170 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc @@ -122,6 +122,6 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add2_act_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("relu", 0) .EQ("identity", 0)); diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc index d01a2f2622..24263e6632 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc @@ -109,6 +109,6 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_act_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("relu", 0) .EQ("identity", 0)); diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc index e34a2d9658..9121047d2f 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc @@ -95,4 +95,4 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); diff --git a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc index 02e3e2542f..855ac2eb61 100644 --- a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc @@ -263,6 +263,6 @@ REGISTER_PASS_CAPABILITY(embedding_fc_lstm_fuse_pass) paddle::framework::compatible::OpVersionComparatorCombination() .EQ("lookup_table_v2", 0) .EQ("mul", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("lstm", 0) .EQ("fused_embedding_fc_lstm", 0)); diff --git a/paddle/fluid/framework/ir/fc_fuse_pass.cc b/paddle/fluid/framework/ir/fc_fuse_pass.cc index 0248aeedd0..103fa0f5fa 100644 --- a/paddle/fluid/framework/ir/fc_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_fuse_pass.cc @@ -187,6 +187,6 @@ REGISTER_PASS_CAPABILITY(fc_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("mul", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("relu", 0) .EQ("fc", 0)); diff --git a/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc b/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc index c4515bbc45..f0e1beeae8 100644 --- a/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc @@ -203,11 +203,11 @@ REGISTER_PASS_CAPABILITY(mul_gru_fuse_pass) paddle::framework::compatible::OpVersionComparatorCombination() .EQ("mul", 0) .EQ("gru", 0) - .EQ("fusion_gru", 0)); + .LE("fusion_gru", 1)); REGISTER_PASS_CAPABILITY(fc_gru_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("mul", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("gru", 0) - .EQ("fusion_gru", 0)); + .LE("fusion_gru", 1)); diff --git a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc index 9dca4d1b29..d515e5e4d9 100644 --- a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc @@ -202,7 +202,7 @@ REGISTER_PASS_CAPABILITY(fc_lstm_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("mul", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("lstm", 0) .EQ("fusion_lstm", 0)); REGISTER_PASS_CAPABILITY(mul_lstm_fuse_pass) diff --git a/paddle/fluid/framework/ir/map_matmul_to_mul_pass.cc b/paddle/fluid/framework/ir/map_matmul_to_mul_pass.cc index 76148a9007..8c4e6f3305 100644 --- a/paddle/fluid/framework/ir/map_matmul_to_mul_pass.cc +++ b/paddle/fluid/framework/ir/map_matmul_to_mul_pass.cc @@ -227,7 +227,7 @@ REGISTER_PASS(map_matmul_to_mul_pass, paddle::framework::ir::MapMatmul2MulPass); REGISTER_PASS_CAPABILITY(map_matmul_to_mul_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() - .EQ("matmul", 0) + .LE("matmul", 1) .EQ("mul", 0)); REGISTER_PASS(squeeze2_matmul_fuse_pass, @@ -235,7 +235,7 @@ REGISTER_PASS(squeeze2_matmul_fuse_pass, REGISTER_PASS_CAPABILITY(squeeze2_matmul_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() - .EQ("matmul", 0) + .LE("matmul", 1) .EQ("squeeze2", 0) .EQ("mul", 0)); @@ -244,6 +244,6 @@ REGISTER_PASS(reshape2_matmul_fuse_pass, REGISTER_PASS_CAPABILITY(reshape2_matmul_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() - .EQ("matmul", 0) + .LE("matmul", 1) .EQ("reshape2", 0) .EQ("mul", 0)); diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc index b0849d74b6..10691ded66 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc @@ -153,7 +153,7 @@ REGISTER_PASS_CAPABILITY(conv_bias_mkldnn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); REGISTER_PASS(conv_transpose_bias_mkldnn_fuse_pass, paddle::framework::ir::Conv2DTransposeBiasFusePass); @@ -161,7 +161,7 @@ REGISTER_PASS_CAPABILITY(conv_transpose_bias_mkldnn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d_transpose", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); REGISTER_PASS(conv3d_bias_mkldnn_fuse_pass, paddle::framework::ir::Conv3DBiasFusePass); diff --git a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc index a837b42b3e..fa1544f780 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -228,20 +228,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsX( pattern->NewNode(elementwise_add_pattern.elementwise_add_y_repr())); conv_output->AsIntermediate(); - auto get_node_from_elementwise_add = - [&elementwise_add_pattern]( - const GraphPatternDetector::subgraph_t& subgraph) + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) -> std::tuple { - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, - elementwise_add_pattern); - - return std::make_tuple(elementwise_add_op, elementwise_add_y, - elementwise_add_out); - }; + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); + + return std::make_tuple(elementwise_add_op, elementwise_add_y, + elementwise_add_out); + }; return ExecuteHandleOnGraph( &gpd, graph_with_stats, @@ -266,20 +265,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsY( conv_output); conv_output->AsIntermediate(); - auto get_node_from_elementwise_add = - [&elementwise_add_pattern]( - const GraphPatternDetector::subgraph_t& subgraph) + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) -> std::tuple { - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, - elementwise_add_pattern); - - return std::make_tuple(elementwise_add_op, elementwise_add_x, - elementwise_add_out); - }; + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); + + return std::make_tuple(elementwise_add_op, elementwise_add_x, + elementwise_add_out); + }; return ExecuteHandleOnGraph( &gpd, graph_with_stats, @@ -306,17 +304,16 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseProjectionConv( conv_x_output->AsIntermediate(); conv_y_output->AsIntermediate(); - auto get_node_from_elementwise_add = - [&elementwise_add_pattern]( - const GraphPatternDetector::subgraph_t& subgraph) + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) -> std::tuple { - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, - elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); - return std::make_tuple(elementwise_add_op, elementwise_add_out); - }; + return std::make_tuple(elementwise_add_op, elementwise_add_out); + }; return ExecuteHandleOnGraph( &gpd, graph_with_stats, @@ -351,4 +348,4 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_mkldnn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .LE("conv2d", 1) - .EQ("elementwise_add", 0)); + .LE("elementwise_add", 1)); diff --git a/paddle/fluid/framework/ir/mkldnn/matmul_transpose_reshape_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/matmul_transpose_reshape_fuse_pass.cc index 41b859f0af..fbc97a0a92 100644 --- a/paddle/fluid/framework/ir/mkldnn/matmul_transpose_reshape_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/matmul_transpose_reshape_fuse_pass.cc @@ -103,6 +103,6 @@ REGISTER_PASS(matmul_transpose_reshape_fuse_pass, REGISTER_PASS_CAPABILITY(matmul_transpose_reshape_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() - .EQ("matmul", 0) + .LE("matmul", 1) .EQ("transpose", 0) .EQ("reshape", 0)); diff --git a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc b/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc index d655837f74..d2763bd6a6 100644 --- a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc @@ -221,5 +221,5 @@ REGISTER_PASS_CAPABILITY(mkldnn_inplace_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("softmax", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("tanh", 0)); diff --git a/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc index 0784a1a024..a552e42619 100644 --- a/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc @@ -96,4 +96,4 @@ REGISTER_PASS_CAPABILITY(scale_matmul_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("scale", 0) - .EQ("matmul", 0)); + .LE("matmul", 1)); diff --git a/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc b/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc index cd6d1d5703..224272a5a0 100644 --- a/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc +++ b/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc @@ -716,9 +716,9 @@ REGISTER_PASS_CAPABILITY(multihead_matmul_fuse_pass_v2) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("mul", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("reshape2", 0) .EQ("transpose2", 0) .EQ("scale", 0) - .EQ("matmul", 0) + .LE("matmul", 1) .EQ("softmax", 0)); diff --git a/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc b/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc index 4101d59308..dfbf97c69b 100644 --- a/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc +++ b/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc @@ -262,7 +262,7 @@ REGISTER_PASS_CAPABILITY(seq_concat_fc_fuse_pass) .EQ("sequence_expand", 0) .EQ("concat", 0) .EQ("mul", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("sigmoid", 0) .EQ("tanh", 0) .EQ("relu", 0) diff --git a/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.cc b/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.cc index d9a1348e05..c2e18ca1ef 100644 --- a/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.cc +++ b/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.cc @@ -106,5 +106,5 @@ REGISTER_PASS_CAPABILITY(seqconv_eltadd_relu_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("sequence_conv", 0) - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("relu", 0)); diff --git a/paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc b/paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc index b708f2eff1..69bf3eda61 100644 --- a/paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc @@ -193,5 +193,5 @@ REGISTER_PASS(skip_layernorm_fuse_pass, REGISTER_PASS_CAPABILITY(skip_layernorm_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() - .EQ("elementwise_add", 0) + .LE("elementwise_add", 1) .EQ("layer_norm", 0)); diff --git a/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.cc b/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.cc index 542aadbe53..c0420e6b5f 100644 --- a/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.cc +++ b/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.cc @@ -389,10 +389,10 @@ REGISTER_PASS(squared_mat_sub_fuse_pass, REGISTER_PASS_CAPABILITY(squared_mat_sub_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() - .EQ("matmul", 0) + .LE("matmul", 1) .EQ("matmul_v2", 0) .EQ("square", 0) - .EQ("elementwise_mul", 0) - .EQ("elementwise_sub", 0) + .LE("elementwise_mul", 1) + .LE("elementwise_sub", 1) .EQ("fill_constant", 1) .EQ("fusion_squared_mat_sub", 0)); diff --git a/paddle/fluid/framework/ir/unsqueeze2_eltwise_fuse_pass.cc b/paddle/fluid/framework/ir/unsqueeze2_eltwise_fuse_pass.cc index f984744532..d4d3c41e65 100644 --- a/paddle/fluid/framework/ir/unsqueeze2_eltwise_fuse_pass.cc +++ b/paddle/fluid/framework/ir/unsqueeze2_eltwise_fuse_pass.cc @@ -131,4 +131,4 @@ REGISTER_PASS_CAPABILITY(unsqueeze2_eltwise_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("unsqueeze2", 0) - .EQ("elementwise_mul", 0)); + .LE("elementwise_mul", 1)); diff --git a/paddle/fluid/framework/op_version_registry.cc b/paddle/fluid/framework/op_version_registry.cc index bab1f20079..bc9963b392 100644 --- a/paddle/fluid/framework/op_version_registry.cc +++ b/paddle/fluid/framework/op_version_registry.cc @@ -18,29 +18,6 @@ namespace paddle { namespace framework { namespace compatible { -namespace { -template -OpUpdate* new_update(InfoType&& info) { - return new OpUpdate(info); -} -} - -OpVersionDesc&& OpVersionDesc::ModifyAttr(const std::string& name, - const std::string& remark, - const OpAttrVariantT& default_value) { - infos_.emplace_back(new_update( - OpAttrInfo(name, remark, default_value))); - return std::move(*this); -} - -OpVersionDesc&& OpVersionDesc::NewAttr(const std::string& name, - const std::string& remark, - const OpAttrVariantT& default_value) { - infos_.emplace_back(new_update( - OpAttrInfo(name, remark, default_value))); - return std::move(*this); -} - OpVersionDesc&& OpVersionDesc::NewInput(const std::string& name, const std::string& remark) { infos_.emplace_back( diff --git a/paddle/fluid/framework/op_version_registry.h b/paddle/fluid/framework/op_version_registry.h index d8321939f6..83557d5572 100644 --- a/paddle/fluid/framework/op_version_registry.h +++ b/paddle/fluid/framework/op_version_registry.h @@ -118,13 +118,44 @@ class OpUpdate : public OpUpdateBase { OpUpdateType type_; }; +template +OpUpdate* new_update(InfoType&& info) { + return new OpUpdate(info); +} + +template +OpAttrVariantT op_attr_wrapper(const T& val) { + return OpAttrVariantT{val}; +} + +template +OpAttrVariantT op_attr_wrapper(const char (&val)[N]) { + PADDLE_ENFORCE_EQ( + val[N - 1], 0, + platform::errors::InvalidArgument( + "The argument of operator register %c is illegal.", val[N - 1])); + return OpAttrVariantT{std::string{val}}; +} + class OpVersionDesc { public: /* Compatibility upgrade */ + template OpVersionDesc&& ModifyAttr(const std::string& name, const std::string& remark, - const OpAttrVariantT& default_value); + const T& default_value) { + infos_.emplace_back(new_update( + OpAttrInfo(name, remark, op_attr_wrapper(default_value)))); + return std::move(*this); + } + + template OpVersionDesc&& NewAttr(const std::string& name, const std::string& remark, - const OpAttrVariantT& default_value); + const T& default_value) { + infos_.emplace_back(new_update( + OpAttrInfo(name, remark, op_attr_wrapper(default_value)))); + return std::move(*this); + } + OpVersionDesc&& NewInput(const std::string& name, const std::string& remark); OpVersionDesc&& NewOutput(const std::string& name, const std::string& remark); OpVersionDesc&& BugfixWithBehaviorChanged(const std::string& remark); diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc index 10204271c4..61117cc603 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc @@ -390,8 +390,8 @@ REGISTER_PASS_CAPABILITY(tensorrt_subgraph_pass) .EQ("concat", 0) .EQ("tanh", 0) .EQ("pad", 0) - .EQ("elementwise_add", 0) - .EQ("elementwise_mul", 0) + .LE("elementwise_add", 1) + .LE("elementwise_mul", 1) .EQ("prelu", 0) .LE("conv2d_transpose", 1) .LE("leaky_relu", 1) @@ -399,8 +399,8 @@ REGISTER_PASS_CAPABILITY(tensorrt_subgraph_pass) .EQ("shuffle_channel", 0) .EQ("swish", 0) .EQ("split", 0) - .EQ("instance_norm", 0) + .LE("instance_norm", 1) .EQ("gelu", 0) .EQ("layer_norm", 0) .EQ("scale", 0) - .EQ("matmul", 0)); + .LE("matmul", 1)); diff --git a/paddle/fluid/operators/allclose_op.cc b/paddle/fluid/operators/allclose_op.cc index e452d3c21b..edd626449c 100644 --- a/paddle/fluid/operators/allclose_op.cc +++ b/paddle/fluid/operators/allclose_op.cc @@ -160,12 +160,37 @@ REGISTER_OPERATOR( REGISTER_OP_CPU_KERNEL(allclose, ops::AllcloseKernel, ops::AllcloseKernel); +/* ========================== register checkpoint ===========================*/ REGISTER_OP_VERSION(allclose) .AddCheckpoint( - R"ROC( - Upgrade allclose add 2 attributes [atol, rtol]. - )ROC", + R"ROC(Upgrade allclose, add two new inputs [Rtol] and [Atol].)ROC", paddle::framework::compatible::OpVersionDesc() + .NewInput("Rtol", + "The added input 'Rtol' is not" + "dispensable.") + .NewInput("Atol", + "The added input 'Atol' is not" + "dispensable.")) + .AddCheckpoint( + R"ROC(Delete two float attributes [rtol] and [atol], + then add 2 string attributes [atol, rtol]. Don't be surprised. + This is because float cannot represent hight-precision + floating-point values, and our framework doesn't support + the use of double attributes. As a result, string instead + of double is used here to represent high-precision + floating-point values. + )ROC", + paddle::framework::compatible::OpVersionDesc() + .DeleteAttr("rtol", + "The attribute 'rtol' is deleted." + "The reason why it is deleted is that" + "attributes do not support a float64 value" + "and it is changed to a tensor.") + .DeleteAttr("atol", + "The attribute 'atol' is deleted." + "The reason why it is deleted is that" + "attributes do not support a float64 value" + "and it is changed to a tensor.") .NewAttr("rtol", "(string) The relative tolerance. Default: :math:`1e-5` .", std::string("1e-5")) diff --git a/paddle/fluid/operators/arg_max_op.cc b/paddle/fluid/operators/arg_max_op.cc index a82134921e..0f5c048b6b 100644 --- a/paddle/fluid/operators/arg_max_op.cc +++ b/paddle/fluid/operators/arg_max_op.cc @@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_max) false) .ModifyAttr( "dtype", - "change the default value of dtype, the older version " - "is -1, means return the int64 indices." - "The new version is 3, return the int64 indices directly." - "And supporting the dtype of -1 in new version.", + "Change the default value of dtype from -1 to 3" + ", means return the int64 indices directly. The rearse why " + "changing the default value is that the int64 value in " + "VarType is 3 in the frameworke.proto.", 3)); diff --git a/paddle/fluid/operators/arg_min_op.cc b/paddle/fluid/operators/arg_min_op.cc index 23ed7d727c..0a4ba6fb0b 100644 --- a/paddle/fluid/operators/arg_min_op.cc +++ b/paddle/fluid/operators/arg_min_op.cc @@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_min) false) .ModifyAttr( "dtype", - "change the default value of dtype, the older version " - "is -1, means return the int64 indices." - "The new version is 3, return the int64 indices directly." - "And supporting the dtype of -1 in new version.", + "Change the default value of dtype from -1 to 3" + ", means return the int64 indices directly. The rearse why " + "changing the default value is that the int64 value in " + "VarType is 3 in the frameworke.proto.", 3)); diff --git a/paddle/fluid/operators/coalesce_tensor_op.cc b/paddle/fluid/operators/coalesce_tensor_op.cc index 1b133db733..464d8c8d56 100644 --- a/paddle/fluid/operators/coalesce_tensor_op.cc +++ b/paddle/fluid/operators/coalesce_tensor_op.cc @@ -15,6 +15,7 @@ #include #include #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/operators/math/math_function.h" @@ -297,3 +298,14 @@ REGISTER_OP_CUDA_KERNEL( ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel); #endif + +REGISTER_OP_VERSION(coalesce_tensor) + .AddCheckpoint( + R"ROC( + Upgrade coalesce_tensor: add a new attribute [use_align].)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "use_align", + "In order to optionally take memory alignment into account when " + "coalescing tensors. The default value is true to be compatible " + "with before.", + true)); diff --git a/paddle/fluid/operators/controlflow/compare_op.cc b/paddle/fluid/operators/controlflow/compare_op.cc index 21c28f9818..3cad86d96c 100644 --- a/paddle/fluid/operators/controlflow/compare_op.cc +++ b/paddle/fluid/operators/controlflow/compare_op.cc @@ -133,9 +133,9 @@ class CompareOp : public framework::OperatorWithKernel { REGISTER_OP_VERSION(op_type) \ .AddCheckpoint( \ R"ROC(Upgrade compare ops, add a new attribute [force_cpu])ROC", \ - paddle::framework::compatible::OpVersionDesc().NewAttr( \ + paddle::framework::compatible::OpVersionDesc().ModifyAttr( \ "force_cpu", \ - "In order to force fill output variable to cpu memory.", \ + "In order to force fill output variable to gpu memory.", \ false)); #define REGISTER_COMPARE_OP(op_type, _equation) \ diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index 6c48448555..d6bdd848ba 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -578,4 +578,37 @@ REGISTER_OP_VERSION(conv_transpose) "output_padding", "In order to add additional size to one side of each dimension " "in the output", - {})); + std::vector{})); + +REGISTER_OP_VERSION(conv2d_transpose) + .AddCheckpoint( + R"ROC( + Upgrade conv2d transpose to add a new attribute [output_padding]. + )ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "output_padding", + "In order to add additional size to one side of each dimension " + "in the output", + std::vector{})); + +REGISTER_OP_VERSION(conv3d_transpose) + .AddCheckpoint( + R"ROC( + Upgrade conv3d transpose to add a new attribute [output_padding]. + )ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "output_padding", + "In order to add additional size to one side of each dimension " + "in the output", + std::vector{})); + +REGISTER_OP_VERSION(depthwise_conv2d_transpose) + .AddCheckpoint( + R"ROC( + Upgrade depthwise conv2d transpose to add a new attribute [output_padding]. + )ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "output_padding", + "In order to add additional size to one side of each dimension " + "in the output", + std::vector{})); diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cc b/paddle/fluid/operators/detection/generate_proposals_op.cc index 2bf5e6c5e0..805ab8aad0 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cc +++ b/paddle/fluid/operators/detection/generate_proposals_op.cc @@ -303,6 +303,13 @@ REGISTER_OPERATOR( REGISTER_OP_CPU_KERNEL(generate_proposals, ops::GenerateProposalsKernel, ops::GenerateProposalsKernel); REGISTER_OP_VERSION(generate_proposals) + .AddCheckpoint( + R"ROC( + Incompatible upgrade of output [RpnRoisLod])ROC", + paddle::framework::compatible::OpVersionDesc().DeleteOutput( + "RpnRoisLod", + "Delete RpnRoisLod due to incorrect output name and " + "it is not used in object detection models yet.")) .AddCheckpoint( R"ROC( Upgrade generate_proposals add a new output [RpnRoisNum])ROC", diff --git a/paddle/fluid/operators/elementwise/CMakeLists.txt b/paddle/fluid/operators/elementwise/CMakeLists.txt index 94886066ca..06ca98e526 100644 --- a/paddle/fluid/operators/elementwise/CMakeLists.txt +++ b/paddle/fluid/operators/elementwise/CMakeLists.txt @@ -1,5 +1,9 @@ include(operators) -register_operators() +if(WITH_UNITY_BUILD) + # Load Unity Build rules for operators in paddle/fluid/operators/elementwise. + include(unity_build_rule.cmake) +endif() +register_operators(DEPS op_version_registry) cc_test(test_elementwise_add_op_inplace SRCS test_elementwise_add_op_inplace.cc DEPS op_registry elementwise_add_op scope device_context enforce executor) cc_test(test_elementwise_div_grad_grad SRCS test_elementwise_div_grad_grad.cc DEPS op_registry elementwise_div_op scope device_context enforce executor) diff --git a/paddle/fluid/operators/elementwise/elementwise_add_op.cc b/paddle/fluid/operators/elementwise/elementwise_add_op.cc index 9885e9c095..29aa5df27c 100644 --- a/paddle/fluid/operators/elementwise/elementwise_add_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_add_op.cc @@ -17,7 +17,6 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex64.h" @@ -178,3 +177,13 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseAddKernel); + +REGISTER_OP_VERSION(elementwise_add) + .AddCheckpoint( + R"ROC(Register elementwise_add for adding the attribute of + Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_add.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_div_op.cc b/paddle/fluid/operators/elementwise/elementwise_div_op.cc index f14aee8e49..0252e6dfff 100644 --- a/paddle/fluid/operators/elementwise/elementwise_div_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_div_op.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_div_op.h" #include #include + #include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex64.h" @@ -162,3 +163,12 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseDivDoubleGradKernel); + +REGISTER_OP_VERSION(elementwise_div) + .AddCheckpoint( + R"ROC(Register elementwise_div for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_div.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc b/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc index ddd69203fd..b28f713256 100644 --- a/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc @@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseFloorDivKernel, ops::ElementwiseFloorDivKernel); + +REGISTER_OP_VERSION(elementwise_floordiv) + .AddCheckpoint( + R"ROC(Register elementwise_floordiv for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_floordiv.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_max_op.cc b/paddle/fluid/operators/elementwise/elementwise_max_op.cc index 38607d4558..dde65c8199 100644 --- a/paddle/fluid/operators/elementwise/elementwise_max_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_max_op.cc @@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseMaxGradKernel, ops::ElementwiseMaxGradKernel, ops::ElementwiseMaxGradKernel); + +REGISTER_OP_VERSION(elementwise_max) + .AddCheckpoint( + R"ROC(Register elementwise_max for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_max.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_min_op.cc b/paddle/fluid/operators/elementwise/elementwise_min_op.cc index 8f544c7865..174684e3c8 100644 --- a/paddle/fluid/operators/elementwise/elementwise_min_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_min_op.cc @@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseMinGradKernel, ops::ElementwiseMinGradKernel, ops::ElementwiseMinGradKernel); + +REGISTER_OP_VERSION(elementwise_min) + .AddCheckpoint( + R"ROC(Register elementwise_min for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_min.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_mod_op.cc b/paddle/fluid/operators/elementwise/elementwise_mod_op.cc index d8ad0a353c..2ac3aa6ebd 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mod_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_mod_op.cc @@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwiseModKernel, ops::ElementwiseModFPKernel, ops::ElementwiseModFPKernel); + +REGISTER_OP_VERSION(elementwise_mod) + .AddCheckpoint( + R"ROC(Register elementwise_mod for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_mod.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_op.cc b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc index 28b131e729..6bf296f0e0 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mul_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc @@ -161,3 +161,12 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseMulDoubleGradKernel); + +REGISTER_OP_VERSION(elementwise_mul) + .AddCheckpoint( + R"ROC(Register elementwise_mul for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_mul.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index f426a54f79..be10376f61 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -22,6 +22,7 @@ limitations under the License. */ #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/common_infer_shape_functions.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" diff --git a/paddle/fluid/operators/elementwise/elementwise_pow_op.cc b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc index ea0e8e7c01..d564cc3717 100644 --- a/paddle/fluid/operators/elementwise/elementwise_pow_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc @@ -83,3 +83,12 @@ REGISTER_OP_CPU_KERNEL( ops::ElementwisePowGradKernel, ops::ElementwisePowGradKernel, ops::ElementwisePowGradKernel); + +REGISTER_OP_VERSION(elementwise_pow) + .AddCheckpoint( + R"ROC(Register elementwise_pow for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_pow.", + 1.0f)); diff --git a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc index d72eacbfd4..80ce42109a 100644 --- a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc @@ -156,3 +156,12 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::ElementwiseSubDoubleGradKernel); + +REGISTER_OP_VERSION(elementwise_sub) + .AddCheckpoint( + R"ROC(Register elementwise_sub for adding the attribute of Scale_y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_y", + "In order to support the function of scaling the input Y when " + "using the operator of elementwise_sub.", + 1.0f)); diff --git a/paddle/fluid/operators/fake_dequantize_op.cc b/paddle/fluid/operators/fake_dequantize_op.cc index 9b0328b094..b70fe78e1a 100644 --- a/paddle/fluid/operators/fake_dequantize_op.cc +++ b/paddle/fluid/operators/fake_dequantize_op.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/fake_dequantize_op.h" #include #include +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace operators { @@ -238,3 +239,10 @@ REGISTER_OPERATOR( REGISTER_OP_CPU_KERNEL(fake_channel_wise_dequantize_max_abs, ops::FakeChannelWiseDequantizeMaxAbsKernel, ops::FakeChannelWiseDequantizeMaxAbsKernel); + +REGISTER_OP_VERSION(fake_channel_wise_dequantize_max_abs) + .AddCheckpoint( + R"ROC(add new attributes [quant_axis] for applying per-channel " + "dequantization to conv2d_tranpose and mul ops.)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "quant_axis", "The axis for dequantization.", 0)); diff --git a/paddle/fluid/operators/fake_quantize_op.cc b/paddle/fluid/operators/fake_quantize_op.cc index 04fa8db9a5..df4debb620 100644 --- a/paddle/fluid/operators/fake_quantize_op.cc +++ b/paddle/fluid/operators/fake_quantize_op.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/clip_op.h" #include "paddle/fluid/platform/transform.h" @@ -805,3 +806,10 @@ REGISTER_OPERATOR(fake_channel_wise_quantize_dequantize_abs_max, REGISTER_OP_CPU_KERNEL( fake_channel_wise_quantize_dequantize_abs_max, ops::FakeChannelWiseQuantizeDequantizeAbsMaxKernel); + +REGISTER_OP_VERSION(fake_channel_wise_quantize_abs_max) + .AddCheckpoint( + R"ROC(add new attributes [quant_axis] for applying per-channel " + "quantization to conv2d_tranpose and mul ops.)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "quant_axis", "The axis for quantization.", 0)); diff --git a/paddle/fluid/operators/flip_op.cc b/paddle/fluid/operators/flip_op.cc index fc17657594..d7ed5fb767 100644 --- a/paddle/fluid/operators/flip_op.cc +++ b/paddle/fluid/operators/flip_op.cc @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/flip_op.h" - #include #include #include +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace operators { @@ -154,3 +154,12 @@ REGISTER_OP_CPU_KERNEL( ops::FlipKernel, ops::FlipKernel, ops::FlipKernel); + +/* ========================== register checkpoint ===========================*/ +REGISTER_OP_VERSION(flip) + .AddCheckpoint( + R"ROC(Upgrade flip, add new attr [axis] and delete attr [dims].)ROC", + paddle::framework::compatible::OpVersionDesc() + .NewAttr("axis", "The added attr 'axis' doesn't set default value.", + boost::none) + .DeleteAttr("dims", "The attr 'dims' is deleted.")); diff --git a/paddle/fluid/operators/fused/fusion_gru_op.cc b/paddle/fluid/operators/fused/fusion_gru_op.cc index f5904039d4..71dccad0b5 100644 --- a/paddle/fluid/operators/fused/fusion_gru_op.cc +++ b/paddle/fluid/operators/fused/fusion_gru_op.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include // for memcpy #include #include +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/jit/kernels.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/fc.h" @@ -479,3 +480,13 @@ REGISTER_OPERATOR(fusion_gru, ops::FusionGRUOp, ops::FusionGRUOpMaker); REGISTER_OP_CPU_KERNEL(fusion_gru, ops::FusionGRUKernel, ops::FusionGRUKernel); + +/* ========================== register checkpoint ===========================*/ +REGISTER_OP_VERSION(fusion_gru) + .AddCheckpoint( + R"ROC(Upgrade fusion_gru add a new attribute [Scale_weights])ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "Scale_weights", + "The added attribute 'Scale_weights' is not yet " + "registered.", + std::vector{1.0f})); diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index 840975f754..3ae9f6bdae 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -213,4 +213,4 @@ REGISTER_OP_VERSION(gaussian_random) .ModifyAttr( "shape", "Add the default value of shape, the default value is {}.", - {})); + std::vector{})); diff --git a/paddle/fluid/operators/grid_sampler_op.cc b/paddle/fluid/operators/grid_sampler_op.cc index 3d34a3d15c..e357133be4 100644 --- a/paddle/fluid/operators/grid_sampler_op.cc +++ b/paddle/fluid/operators/grid_sampler_op.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/cudnn_helper.h" #endif @@ -237,3 +238,11 @@ REGISTER_OP_CPU_KERNEL( grid_sampler_grad, ops::GridSampleGradOpKernel, ops::GridSampleGradOpKernel); + +REGISTER_OP_VERSION(grid_sampler) + .AddCheckpoint( + R"ROC( + Upgrade grid_sampler add a new attribute [mode]. + )ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "mode", "In order to specify interpolation mode", "bilinear")); diff --git a/paddle/fluid/operators/instance_norm_op.cc b/paddle/fluid/operators/instance_norm_op.cc index 1018adcd93..28643ac1c0 100644 --- a/paddle/fluid/operators/instance_norm_op.cc +++ b/paddle/fluid/operators/instance_norm_op.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/data_layout.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/math/math_function.h" namespace paddle { @@ -701,3 +702,20 @@ REGISTER_OP_CPU_KERNEL( float>, ops::InstanceNormDoubleGradKernel); + +REGISTER_OP_VERSION(instance_norm) + .AddCheckpoint( + R"ROC( + Change dispensable of attribute from False to True in instance_norm. + )ROC", + paddle::framework::compatible::OpVersionDesc() + .ModifyAttr( + "Bias", + "The arg 'dispensable' of Input 'Bias' is changed: from " + "'False' to 'True'.", + true) + .ModifyAttr( + "Scale", + "The arg 'dispensable' of Input 'Scale' is changed: from " + "'False' to 'True'.", + true)); diff --git a/paddle/fluid/operators/linspace_op.cc b/paddle/fluid/operators/linspace_op.cc index 7cc07383bf..fe271fa5e8 100644 --- a/paddle/fluid/operators/linspace_op.cc +++ b/paddle/fluid/operators/linspace_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/linspace_op.h" #include +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace operators { @@ -92,3 +93,11 @@ REGISTER_OP_CPU_KERNEL(linspace, ops::CPULinspaceKernel, ops::CPULinspaceKernel, ops::CPULinspaceKernel, ops::CPULinspaceKernel); + +REGISTER_OP_VERSION(linspace) + .AddCheckpoint( + R"ROC( + Upgrade linspace to add a new attribute [dtype]. + )ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "dtype", "In order to change output data type ", 5)); diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index d45669a9f0..668445d242 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/math/blas.h" #ifdef PADDLE_WITH_MKLDNN #include "paddle/fluid/platform/mkldnn_helper.h" @@ -932,3 +933,14 @@ REGISTER_OP_CUDA_KERNEL( ops::MatMulDoubleGradKernel, ops::MatMulDoubleGradKernel); #endif + +REGISTER_OP_VERSION(matmul) + .AddCheckpoint( + R"ROC(Register matmul for adding the attribute of + fused_reshape_Y)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "fused_reshape_Y", + "In order to support the function of fused the input Y " + " and input X into the input X when " + "using the operator of matmul, and get raw shape of input Y.", + std::vector{})); diff --git a/paddle/fluid/operators/pixel_shuffle_op.cc b/paddle/fluid/operators/pixel_shuffle_op.cc index 111a82c6cc..cb9bbe727d 100644 --- a/paddle/fluid/operators/pixel_shuffle_op.cc +++ b/paddle/fluid/operators/pixel_shuffle_op.cc @@ -11,6 +11,7 @@ limitations under the License. */ #include "paddle/fluid/operators/pixel_shuffle_op.h" #include +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace operators { @@ -185,3 +186,10 @@ REGISTER_OP_CPU_KERNEL( pixel_shuffle_grad, ops::PixelShuffleGradOpKernel, ops::PixelShuffleGradOpKernel); + +REGISTER_OP_VERSION(pixel_shuffle) + .AddCheckpoint( + R"ROC( + Compatible upgrade of pixel_shuffle, add a new attribute [data_format])ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "data_format", "Specify the data format of the input data", true)); diff --git a/paddle/fluid/operators/print_op.cc b/paddle/fluid/operators/print_op.cc index 80faf833be..c558f1852f 100644 --- a/paddle/fluid/operators/print_op.cc +++ b/paddle/fluid/operators/print_op.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/operators/tensor_formatter.h" namespace paddle { @@ -173,3 +174,11 @@ REGISTER_OPERATOR(print, ops::PrintOp, ops::PrintOpProtoAndCheckMaker, ops::PrintOpGradientMaker, ops::PrintOpGradientMaker, ops::PrintOpInferShape, ops::PrintOpVarTypeInference); + +REGISTER_OP_VERSION(print) + .AddCheckpoint( + R"ROC(Upgrade print add a new attribute [print_tensor_layout] to " + "contorl whether to print tensor's layout.)ROC", + paddle::framework::compatible::OpVersionDesc().NewAttr( + "print_tensor_layout", "Whether to print the tensor's layout.", + true)); diff --git a/paddle/fluid/operators/rank_attention_op.cc b/paddle/fluid/operators/rank_attention_op.cc index 7c2b4a8b48..e5332da647 100644 --- a/paddle/fluid/operators/rank_attention_op.cc +++ b/paddle/fluid/operators/rank_attention_op.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include #include #include +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace operators { @@ -178,3 +179,18 @@ REGISTER_OP_CPU_KERNEL( rank_attention, ops::RankAttentionKernel, ops::RankAttentionKernel); + +REGISTER_OP_VERSION(rank_attention) + .AddCheckpoint( + R"ROC( + Upgrade rank_attention, add 1 outputs [InputHelp] and 1 attribute + [MaxSize]. + )ROC", + paddle::framework::compatible::OpVersionDesc() + .NewOutput("InputHelp", + "Output tensor of rank_attention_Op operator " + "in order to assist calculation in the reverse process.") + .NewAttr( + "MaxSize", + "Forward calculation to set the pre-applied video memory size", + 0)); diff --git a/paddle/fluid/operators/roi_align_op.cc b/paddle/fluid/operators/roi_align_op.cc index 0eeb7e0bb2..6a4a88a004 100644 --- a/paddle/fluid/operators/roi_align_op.cc +++ b/paddle/fluid/operators/roi_align_op.cc @@ -233,6 +233,13 @@ REGISTER_OP_CPU_KERNEL( ops::CPUROIAlignGradOpKernel, ops::CPUROIAlignGradOpKernel); REGISTER_OP_VERSION(roi_align) + .AddCheckpoint( + R"ROC( + Incompatible upgrade of input [RpnRoisLod])ROC", + paddle::framework::compatible::OpVersionDesc().DeleteInput( + "RpnRoisLod", + "Delete RpnRoisLod due to incorrect input name and " + "it is not used in object detection models yet.")) .AddCheckpoint( R"ROC( Upgrade roi_align add a new input [RoisNum])ROC", diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index be3187b751..a512e7dcd6 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -227,6 +227,13 @@ REGISTER_OP_CPU_KERNEL( ops::CPUROIPoolGradOpKernel, ops::CPUROIPoolGradOpKernel); REGISTER_OP_VERSION(roi_pool) + .AddCheckpoint( + R"ROC( + Incompatible upgrade of input [RpnRoisLod])ROC", + paddle::framework::compatible::OpVersionDesc().DeleteInput( + "RpnRoisLod", + "Delete RpnRoisLod due to incorrect input name and " + "it is not used in object detection models yet.")) .AddCheckpoint( R"ROC( Upgrade roi_pool add a new input [RoisNum])ROC", diff --git a/paddle/fluid/operators/trace_op.cc b/paddle/fluid/operators/trace_op.cc index 1b9e7c10eb..623d4c7fc2 100644 --- a/paddle/fluid/operators/trace_op.cc +++ b/paddle/fluid/operators/trace_op.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/fluid/operators/trace_op.h" +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace operators { @@ -89,13 +90,13 @@ class TraceOpMaker : public framework::OpProtoAndCheckerMaker { R"DOC((int, default 0), the first axis of the 2-D planes from which the diagonals should be taken. Can be either positive or negative. Default: 0. )DOC") - .SetDefault(-2); + .SetDefault(0); AddAttr( "axis2", R"DOC((int, default 1), the second axis of the 2-D planes from which the diagonals should be taken. Can be either positive or negative. Default: 1. )DOC") - .SetDefault(-1); + .SetDefault(1); AddComment(R"DOC( Trace Operator. Return the sum along diagonals of the input tensor. @@ -178,3 +179,21 @@ REGISTER_OP_CPU_KERNEL( paddle::platform::complex64>, ops::TraceGradKernel); + +/* ========================== register checkpoint ===========================*/ +REGISTER_OP_VERSION(trace) + .AddCheckpoint( + R"ROC(Upgrade trace add a new attribute [axis2])ROC", + paddle::framework::compatible::OpVersionDesc() + .NewAttr("axis1", + "The added attribute 'axis1' is not yet registered.", + std::vector{0.0f}) + .NewAttr("axis2", + "The added attribute 'axis2' is not yet registered.", + std::vector{1.0f}) + .DeleteAttr("dim1", + "The attribute 'dim1' is not recommend according to " + "the specification 2.0.") + .DeleteAttr("dim2", + "The attribute 'dim2' is not recommend according to " + "the specification 2.0.")); diff --git a/paddle/fluid/operators/unique_op.cc b/paddle/fluid/operators/unique_op.cc index aed919e996..82f894a3a3 100644 --- a/paddle/fluid/operators/unique_op.cc +++ b/paddle/fluid/operators/unique_op.cc @@ -184,7 +184,7 @@ REGISTER_OP_VERSION(unique) .NewAttr("axis", "The axis to apply unique. If None, the input will be " "flattened.", - {}) + std::vector{}) .NewAttr("is_sorted", "If True, the unique elements of X are in ascending order." "Otherwise, the unique elements are not sorted.", -- GitLab