未验证 提交 5eab1a38 编写于 作者: 石晓伟 提交者: GitHub

git cherry-pick the commits of operator version registries, test=release/2.0 (#30292)

* Register op version for grid_sampler, test=op_version (#29916)

* add op version for fake_quant and fake_dequant ops, test=op_version (#29923)

* Register op version for print, test=op_version (#29945)

* add gru op_register_version; test=op_version; (#29931)

* Register op version for coalesce_tensor. (#29940)

* register op version for conv2d_transpose, conv3d_transpose and depthwise_conv2d_transpose, test=op_version (#29937)

* add op_register_version for allclose op; test=op_version (#29968)

* register ModifyAttr for instance_norm, test=op_version (#29938)

* add op_version for flip op [test=op_version] (#30019)

* add the op version check for the elementwise ops, test=op_version (#30010)

* add the support the op version check for matmul, test=op_version (#30011)

* Revert "register ModifyAttr for instance_norm, test=op_version (#29938)"

* add REGISTER_OP_VERSION for generate_proposals, roi_align, roi_pool test=op_version (#30034)

* Fix rank_attention op_version, test=op_version (#30006)

* fix rank_attention, test=op_version

* Register op version for linspace,test=op_version (#30025)

* fix op_register_version for compare ops, test=op_version (#30007)
Co-authored-by: Nzhoushunjie <zhoushunjie@baidu.com>

* register ModifyAttr for instance_norm, test=op_version (#30065)

* register instance norm, test=op_version

* add trace op_register_version and fix version bug; test=op_version (#30000)

* fix a bug in op_version_registry, test=develop, test=op_version (#29994)

* Add version checking, test=op_version (#30129)

* fix a bug in gaussian_random_op version, test=release/2.0
Co-authored-by: NLielinJiang <50691816+LielinJiang@users.noreply.github.com>
Co-authored-by: Ncc <52520497+juncaipeng@users.noreply.github.com>
Co-authored-by: NQi Li <qili93@qq.com>
Co-authored-by: NJack Zhou <zhoushunjie@baidu.com>
Co-authored-by: NGuo Sheng <whucsgs@163.com>
Co-authored-by: Nwangxinxin08 <69842442+wangxinxin08@users.noreply.github.com>
Co-authored-by: Nwawltor <fangzeyang0904@hotmail.com>
Co-authored-by: NFlyingQianMM <245467267@qq.com>
Co-authored-by: Nceci3 <ceci3@users.noreply.github.com>
Co-authored-by: Nhutuxian <hutuxian2011@sina.cn>
Co-authored-by: Nchalsliu <45041955+chalsliu@users.noreply.github.com>
Co-authored-by: Nwangguanzhong <jerrywgz@126.com>
Co-authored-by: NShenLiang <shenliang03@baidu.com>
Co-authored-by: Nyinhaofeng <66763551+yinhaofeng@users.noreply.github.com>
Co-authored-by: Nchannings <chenlingchi@baidu.com>
Co-authored-by: Nchentianyu03 <chentianyu03@baidu.com>
Co-authored-by: Nruri <shipeng1108@163.com>
上级 0fbfbeac
...@@ -244,5 +244,5 @@ REGISTER_PASS_CAPABILITY(conv_eltwiseadd_affine_channel_fuse_pass) ...@@ -244,5 +244,5 @@ REGISTER_PASS_CAPABILITY(conv_eltwiseadd_affine_channel_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1) .LE("conv2d", 1)
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("affine_channel", 0)); .EQ("affine_channel", 0));
...@@ -389,5 +389,5 @@ REGISTER_PASS_CAPABILITY(conv_eltwiseadd_bn_fuse_pass) ...@@ -389,5 +389,5 @@ REGISTER_PASS_CAPABILITY(conv_eltwiseadd_bn_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1) .LE("conv2d", 1)
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("batch_norm", 0)); .EQ("batch_norm", 0));
...@@ -122,6 +122,6 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add2_act_fuse_pass) ...@@ -122,6 +122,6 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add2_act_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1) .LE("conv2d", 1)
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("relu", 0) .EQ("relu", 0)
.EQ("identity", 0)); .EQ("identity", 0));
...@@ -109,6 +109,6 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_act_fuse_pass) ...@@ -109,6 +109,6 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_act_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1) .LE("conv2d", 1)
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("relu", 0) .EQ("relu", 0)
.EQ("identity", 0)); .EQ("identity", 0));
...@@ -95,4 +95,4 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_fuse_pass) ...@@ -95,4 +95,4 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1) .LE("conv2d", 1)
.EQ("elementwise_add", 0)); .LE("elementwise_add", 1));
...@@ -263,6 +263,6 @@ REGISTER_PASS_CAPABILITY(embedding_fc_lstm_fuse_pass) ...@@ -263,6 +263,6 @@ REGISTER_PASS_CAPABILITY(embedding_fc_lstm_fuse_pass)
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("lookup_table_v2", 0) .EQ("lookup_table_v2", 0)
.EQ("mul", 0) .EQ("mul", 0)
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("lstm", 0) .EQ("lstm", 0)
.EQ("fused_embedding_fc_lstm", 0)); .EQ("fused_embedding_fc_lstm", 0));
...@@ -187,6 +187,6 @@ REGISTER_PASS_CAPABILITY(fc_fuse_pass) ...@@ -187,6 +187,6 @@ REGISTER_PASS_CAPABILITY(fc_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("mul", 0) .EQ("mul", 0)
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("relu", 0) .EQ("relu", 0)
.EQ("fc", 0)); .EQ("fc", 0));
...@@ -203,11 +203,11 @@ REGISTER_PASS_CAPABILITY(mul_gru_fuse_pass) ...@@ -203,11 +203,11 @@ REGISTER_PASS_CAPABILITY(mul_gru_fuse_pass)
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("mul", 0) .EQ("mul", 0)
.EQ("gru", 0) .EQ("gru", 0)
.EQ("fusion_gru", 0)); .LE("fusion_gru", 1));
REGISTER_PASS_CAPABILITY(fc_gru_fuse_pass) REGISTER_PASS_CAPABILITY(fc_gru_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("mul", 0) .EQ("mul", 0)
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("gru", 0) .EQ("gru", 0)
.EQ("fusion_gru", 0)); .LE("fusion_gru", 1));
...@@ -202,7 +202,7 @@ REGISTER_PASS_CAPABILITY(fc_lstm_fuse_pass) ...@@ -202,7 +202,7 @@ REGISTER_PASS_CAPABILITY(fc_lstm_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("mul", 0) .EQ("mul", 0)
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("lstm", 0) .EQ("lstm", 0)
.EQ("fusion_lstm", 0)); .EQ("fusion_lstm", 0));
REGISTER_PASS_CAPABILITY(mul_lstm_fuse_pass) REGISTER_PASS_CAPABILITY(mul_lstm_fuse_pass)
......
...@@ -227,7 +227,7 @@ REGISTER_PASS(map_matmul_to_mul_pass, paddle::framework::ir::MapMatmul2MulPass); ...@@ -227,7 +227,7 @@ REGISTER_PASS(map_matmul_to_mul_pass, paddle::framework::ir::MapMatmul2MulPass);
REGISTER_PASS_CAPABILITY(map_matmul_to_mul_pass) REGISTER_PASS_CAPABILITY(map_matmul_to_mul_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("matmul", 0) .LE("matmul", 1)
.EQ("mul", 0)); .EQ("mul", 0));
REGISTER_PASS(squeeze2_matmul_fuse_pass, REGISTER_PASS(squeeze2_matmul_fuse_pass,
...@@ -235,7 +235,7 @@ REGISTER_PASS(squeeze2_matmul_fuse_pass, ...@@ -235,7 +235,7 @@ REGISTER_PASS(squeeze2_matmul_fuse_pass,
REGISTER_PASS_CAPABILITY(squeeze2_matmul_fuse_pass) REGISTER_PASS_CAPABILITY(squeeze2_matmul_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("matmul", 0) .LE("matmul", 1)
.EQ("squeeze2", 0) .EQ("squeeze2", 0)
.EQ("mul", 0)); .EQ("mul", 0));
...@@ -244,6 +244,6 @@ REGISTER_PASS(reshape2_matmul_fuse_pass, ...@@ -244,6 +244,6 @@ REGISTER_PASS(reshape2_matmul_fuse_pass,
REGISTER_PASS_CAPABILITY(reshape2_matmul_fuse_pass) REGISTER_PASS_CAPABILITY(reshape2_matmul_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("matmul", 0) .LE("matmul", 1)
.EQ("reshape2", 0) .EQ("reshape2", 0)
.EQ("mul", 0)); .EQ("mul", 0));
...@@ -153,7 +153,7 @@ REGISTER_PASS_CAPABILITY(conv_bias_mkldnn_fuse_pass) ...@@ -153,7 +153,7 @@ REGISTER_PASS_CAPABILITY(conv_bias_mkldnn_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1) .LE("conv2d", 1)
.EQ("elementwise_add", 0)); .LE("elementwise_add", 1));
REGISTER_PASS(conv_transpose_bias_mkldnn_fuse_pass, REGISTER_PASS(conv_transpose_bias_mkldnn_fuse_pass,
paddle::framework::ir::Conv2DTransposeBiasFusePass); paddle::framework::ir::Conv2DTransposeBiasFusePass);
...@@ -161,7 +161,7 @@ REGISTER_PASS_CAPABILITY(conv_transpose_bias_mkldnn_fuse_pass) ...@@ -161,7 +161,7 @@ REGISTER_PASS_CAPABILITY(conv_transpose_bias_mkldnn_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d_transpose", 1) .LE("conv2d_transpose", 1)
.EQ("elementwise_add", 0)); .LE("elementwise_add", 1));
REGISTER_PASS(conv3d_bias_mkldnn_fuse_pass, REGISTER_PASS(conv3d_bias_mkldnn_fuse_pass,
paddle::framework::ir::Conv3DBiasFusePass); paddle::framework::ir::Conv3DBiasFusePass);
...@@ -228,20 +228,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsX( ...@@ -228,20 +228,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsX(
pattern->NewNode(elementwise_add_pattern.elementwise_add_y_repr())); pattern->NewNode(elementwise_add_pattern.elementwise_add_y_repr()));
conv_output->AsIntermediate(); conv_output->AsIntermediate();
auto get_node_from_elementwise_add = auto get_node_from_elementwise_add = [&elementwise_add_pattern](
[&elementwise_add_pattern]( const GraphPatternDetector::subgraph_t& subgraph)
const GraphPatternDetector::subgraph_t& subgraph)
-> std::tuple<Node*, Node*, Node*> { -> std::tuple<Node*, Node*, Node*> {
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op,
elementwise_add_pattern); elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y, GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y,
elementwise_add_pattern); elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out,
elementwise_add_pattern); elementwise_add_pattern);
return std::make_tuple(elementwise_add_op, elementwise_add_y, return std::make_tuple(elementwise_add_op, elementwise_add_y,
elementwise_add_out); elementwise_add_out);
}; };
return ExecuteHandleOnGraph<IdentityFuseHandle>( return ExecuteHandleOnGraph<IdentityFuseHandle>(
&gpd, graph_with_stats, &gpd, graph_with_stats,
...@@ -266,20 +265,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsY( ...@@ -266,20 +265,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsY(
conv_output); conv_output);
conv_output->AsIntermediate(); conv_output->AsIntermediate();
auto get_node_from_elementwise_add = auto get_node_from_elementwise_add = [&elementwise_add_pattern](
[&elementwise_add_pattern]( const GraphPatternDetector::subgraph_t& subgraph)
const GraphPatternDetector::subgraph_t& subgraph)
-> std::tuple<Node*, Node*, Node*> { -> std::tuple<Node*, Node*, Node*> {
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op,
elementwise_add_pattern); elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x, GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x,
elementwise_add_pattern); elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out,
elementwise_add_pattern); elementwise_add_pattern);
return std::make_tuple(elementwise_add_op, elementwise_add_x, return std::make_tuple(elementwise_add_op, elementwise_add_x,
elementwise_add_out); elementwise_add_out);
}; };
return ExecuteHandleOnGraph<IdentityFuseHandle>( return ExecuteHandleOnGraph<IdentityFuseHandle>(
&gpd, graph_with_stats, &gpd, graph_with_stats,
...@@ -306,17 +304,16 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseProjectionConv( ...@@ -306,17 +304,16 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseProjectionConv(
conv_x_output->AsIntermediate(); conv_x_output->AsIntermediate();
conv_y_output->AsIntermediate(); conv_y_output->AsIntermediate();
auto get_node_from_elementwise_add = auto get_node_from_elementwise_add = [&elementwise_add_pattern](
[&elementwise_add_pattern]( const GraphPatternDetector::subgraph_t& subgraph)
const GraphPatternDetector::subgraph_t& subgraph)
-> std::tuple<Node*, Node*> { -> std::tuple<Node*, Node*> {
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op,
elementwise_add_pattern); elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out,
elementwise_add_pattern); elementwise_add_pattern);
return std::make_tuple(elementwise_add_op, elementwise_add_out); return std::make_tuple(elementwise_add_op, elementwise_add_out);
}; };
return ExecuteHandleOnGraph<ProjectionFuseHandle>( return ExecuteHandleOnGraph<ProjectionFuseHandle>(
&gpd, graph_with_stats, &gpd, graph_with_stats,
...@@ -351,4 +348,4 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_mkldnn_fuse_pass) ...@@ -351,4 +348,4 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_mkldnn_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1) .LE("conv2d", 1)
.EQ("elementwise_add", 0)); .LE("elementwise_add", 1));
...@@ -103,6 +103,6 @@ REGISTER_PASS(matmul_transpose_reshape_fuse_pass, ...@@ -103,6 +103,6 @@ REGISTER_PASS(matmul_transpose_reshape_fuse_pass,
REGISTER_PASS_CAPABILITY(matmul_transpose_reshape_fuse_pass) REGISTER_PASS_CAPABILITY(matmul_transpose_reshape_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("matmul", 0) .LE("matmul", 1)
.EQ("transpose", 0) .EQ("transpose", 0)
.EQ("reshape", 0)); .EQ("reshape", 0));
...@@ -221,5 +221,5 @@ REGISTER_PASS_CAPABILITY(mkldnn_inplace_pass) ...@@ -221,5 +221,5 @@ REGISTER_PASS_CAPABILITY(mkldnn_inplace_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("softmax", 0) .EQ("softmax", 0)
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("tanh", 0)); .EQ("tanh", 0));
...@@ -96,4 +96,4 @@ REGISTER_PASS_CAPABILITY(scale_matmul_fuse_pass) ...@@ -96,4 +96,4 @@ REGISTER_PASS_CAPABILITY(scale_matmul_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("scale", 0) .EQ("scale", 0)
.EQ("matmul", 0)); .LE("matmul", 1));
...@@ -716,9 +716,9 @@ REGISTER_PASS_CAPABILITY(multihead_matmul_fuse_pass_v2) ...@@ -716,9 +716,9 @@ REGISTER_PASS_CAPABILITY(multihead_matmul_fuse_pass_v2)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("mul", 0) .EQ("mul", 0)
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("reshape2", 0) .EQ("reshape2", 0)
.EQ("transpose2", 0) .EQ("transpose2", 0)
.EQ("scale", 0) .EQ("scale", 0)
.EQ("matmul", 0) .LE("matmul", 1)
.EQ("softmax", 0)); .EQ("softmax", 0));
...@@ -262,7 +262,7 @@ REGISTER_PASS_CAPABILITY(seq_concat_fc_fuse_pass) ...@@ -262,7 +262,7 @@ REGISTER_PASS_CAPABILITY(seq_concat_fc_fuse_pass)
.EQ("sequence_expand", 0) .EQ("sequence_expand", 0)
.EQ("concat", 0) .EQ("concat", 0)
.EQ("mul", 0) .EQ("mul", 0)
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("sigmoid", 0) .EQ("sigmoid", 0)
.EQ("tanh", 0) .EQ("tanh", 0)
.EQ("relu", 0) .EQ("relu", 0)
......
...@@ -106,5 +106,5 @@ REGISTER_PASS_CAPABILITY(seqconv_eltadd_relu_fuse_pass) ...@@ -106,5 +106,5 @@ REGISTER_PASS_CAPABILITY(seqconv_eltadd_relu_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("sequence_conv", 0) .EQ("sequence_conv", 0)
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("relu", 0)); .EQ("relu", 0));
...@@ -193,5 +193,5 @@ REGISTER_PASS(skip_layernorm_fuse_pass, ...@@ -193,5 +193,5 @@ REGISTER_PASS(skip_layernorm_fuse_pass,
REGISTER_PASS_CAPABILITY(skip_layernorm_fuse_pass) REGISTER_PASS_CAPABILITY(skip_layernorm_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("layer_norm", 0)); .EQ("layer_norm", 0));
...@@ -389,10 +389,10 @@ REGISTER_PASS(squared_mat_sub_fuse_pass, ...@@ -389,10 +389,10 @@ REGISTER_PASS(squared_mat_sub_fuse_pass,
REGISTER_PASS_CAPABILITY(squared_mat_sub_fuse_pass) REGISTER_PASS_CAPABILITY(squared_mat_sub_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("matmul", 0) .LE("matmul", 1)
.EQ("matmul_v2", 0) .EQ("matmul_v2", 0)
.EQ("square", 0) .EQ("square", 0)
.EQ("elementwise_mul", 0) .LE("elementwise_mul", 1)
.EQ("elementwise_sub", 0) .LE("elementwise_sub", 1)
.EQ("fill_constant", 1) .EQ("fill_constant", 1)
.EQ("fusion_squared_mat_sub", 0)); .EQ("fusion_squared_mat_sub", 0));
...@@ -131,4 +131,4 @@ REGISTER_PASS_CAPABILITY(unsqueeze2_eltwise_fuse_pass) ...@@ -131,4 +131,4 @@ REGISTER_PASS_CAPABILITY(unsqueeze2_eltwise_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("unsqueeze2", 0) .EQ("unsqueeze2", 0)
.EQ("elementwise_mul", 0)); .LE("elementwise_mul", 1));
...@@ -18,29 +18,6 @@ namespace paddle { ...@@ -18,29 +18,6 @@ namespace paddle {
namespace framework { namespace framework {
namespace compatible { namespace compatible {
namespace {
template <OpUpdateType type__, typename InfoType>
OpUpdate<InfoType, type__>* new_update(InfoType&& info) {
return new OpUpdate<InfoType, type__>(info);
}
}
OpVersionDesc&& OpVersionDesc::ModifyAttr(const std::string& name,
const std::string& remark,
const OpAttrVariantT& default_value) {
infos_.emplace_back(new_update<OpUpdateType::kModifyAttr>(
OpAttrInfo(name, remark, default_value)));
return std::move(*this);
}
OpVersionDesc&& OpVersionDesc::NewAttr(const std::string& name,
const std::string& remark,
const OpAttrVariantT& default_value) {
infos_.emplace_back(new_update<OpUpdateType::kNewAttr>(
OpAttrInfo(name, remark, default_value)));
return std::move(*this);
}
OpVersionDesc&& OpVersionDesc::NewInput(const std::string& name, OpVersionDesc&& OpVersionDesc::NewInput(const std::string& name,
const std::string& remark) { const std::string& remark) {
infos_.emplace_back( infos_.emplace_back(
......
...@@ -118,13 +118,44 @@ class OpUpdate : public OpUpdateBase { ...@@ -118,13 +118,44 @@ class OpUpdate : public OpUpdateBase {
OpUpdateType type_; OpUpdateType type_;
}; };
template <OpUpdateType type__, typename InfoType>
OpUpdate<InfoType, type__>* new_update(InfoType&& info) {
return new OpUpdate<InfoType, type__>(info);
}
template <typename T>
OpAttrVariantT op_attr_wrapper(const T& val) {
return OpAttrVariantT{val};
}
template <int N>
OpAttrVariantT op_attr_wrapper(const char (&val)[N]) {
PADDLE_ENFORCE_EQ(
val[N - 1], 0,
platform::errors::InvalidArgument(
"The argument of operator register %c is illegal.", val[N - 1]));
return OpAttrVariantT{std::string{val}};
}
class OpVersionDesc { class OpVersionDesc {
public: public:
/* Compatibility upgrade */ /* Compatibility upgrade */
template <typename T>
OpVersionDesc&& ModifyAttr(const std::string& name, const std::string& remark, OpVersionDesc&& ModifyAttr(const std::string& name, const std::string& remark,
const OpAttrVariantT& default_value); const T& default_value) {
infos_.emplace_back(new_update<OpUpdateType::kModifyAttr>(
OpAttrInfo(name, remark, op_attr_wrapper(default_value))));
return std::move(*this);
}
template <typename T>
OpVersionDesc&& NewAttr(const std::string& name, const std::string& remark, OpVersionDesc&& NewAttr(const std::string& name, const std::string& remark,
const OpAttrVariantT& default_value); const T& default_value) {
infos_.emplace_back(new_update<OpUpdateType::kNewAttr>(
OpAttrInfo(name, remark, op_attr_wrapper(default_value))));
return std::move(*this);
}
OpVersionDesc&& NewInput(const std::string& name, const std::string& remark); OpVersionDesc&& NewInput(const std::string& name, const std::string& remark);
OpVersionDesc&& NewOutput(const std::string& name, const std::string& remark); OpVersionDesc&& NewOutput(const std::string& name, const std::string& remark);
OpVersionDesc&& BugfixWithBehaviorChanged(const std::string& remark); OpVersionDesc&& BugfixWithBehaviorChanged(const std::string& remark);
......
...@@ -390,8 +390,8 @@ REGISTER_PASS_CAPABILITY(tensorrt_subgraph_pass) ...@@ -390,8 +390,8 @@ REGISTER_PASS_CAPABILITY(tensorrt_subgraph_pass)
.EQ("concat", 0) .EQ("concat", 0)
.EQ("tanh", 0) .EQ("tanh", 0)
.EQ("pad", 0) .EQ("pad", 0)
.EQ("elementwise_add", 0) .LE("elementwise_add", 1)
.EQ("elementwise_mul", 0) .LE("elementwise_mul", 1)
.EQ("prelu", 0) .EQ("prelu", 0)
.LE("conv2d_transpose", 1) .LE("conv2d_transpose", 1)
.LE("leaky_relu", 1) .LE("leaky_relu", 1)
...@@ -399,8 +399,8 @@ REGISTER_PASS_CAPABILITY(tensorrt_subgraph_pass) ...@@ -399,8 +399,8 @@ REGISTER_PASS_CAPABILITY(tensorrt_subgraph_pass)
.EQ("shuffle_channel", 0) .EQ("shuffle_channel", 0)
.EQ("swish", 0) .EQ("swish", 0)
.EQ("split", 0) .EQ("split", 0)
.EQ("instance_norm", 0) .LE("instance_norm", 1)
.EQ("gelu", 0) .EQ("gelu", 0)
.EQ("layer_norm", 0) .EQ("layer_norm", 0)
.EQ("scale", 0) .EQ("scale", 0)
.EQ("matmul", 0)); .LE("matmul", 1));
...@@ -160,12 +160,37 @@ REGISTER_OPERATOR( ...@@ -160,12 +160,37 @@ REGISTER_OPERATOR(
REGISTER_OP_CPU_KERNEL(allclose, ops::AllcloseKernel<CPU, float>, REGISTER_OP_CPU_KERNEL(allclose, ops::AllcloseKernel<CPU, float>,
ops::AllcloseKernel<CPU, double>); ops::AllcloseKernel<CPU, double>);
/* ========================== register checkpoint ===========================*/
REGISTER_OP_VERSION(allclose) REGISTER_OP_VERSION(allclose)
.AddCheckpoint( .AddCheckpoint(
R"ROC( R"ROC(Upgrade allclose, add two new inputs [Rtol] and [Atol].)ROC",
Upgrade allclose add 2 attributes [atol, rtol].
)ROC",
paddle::framework::compatible::OpVersionDesc() paddle::framework::compatible::OpVersionDesc()
.NewInput("Rtol",
"The added input 'Rtol' is not"
"dispensable.")
.NewInput("Atol",
"The added input 'Atol' is not"
"dispensable."))
.AddCheckpoint(
R"ROC(Delete two float attributes [rtol] and [atol],
then add 2 string attributes [atol, rtol]. Don't be surprised.
This is because float cannot represent hight-precision
floating-point values, and our framework doesn't support
the use of double attributes. As a result, string instead
of double is used here to represent high-precision
floating-point values.
)ROC",
paddle::framework::compatible::OpVersionDesc()
.DeleteAttr("rtol",
"The attribute 'rtol' is deleted."
"The reason why it is deleted is that"
"attributes do not support a float64 value"
"and it is changed to a tensor.")
.DeleteAttr("atol",
"The attribute 'atol' is deleted."
"The reason why it is deleted is that"
"attributes do not support a float64 value"
"and it is changed to a tensor.")
.NewAttr("rtol", .NewAttr("rtol",
"(string) The relative tolerance. Default: :math:`1e-5` .", "(string) The relative tolerance. Default: :math:`1e-5` .",
std::string("1e-5")) std::string("1e-5"))
......
...@@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_max) ...@@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_max)
false) false)
.ModifyAttr( .ModifyAttr(
"dtype", "dtype",
"change the default value of dtype, the older version " "Change the default value of dtype from -1 to 3"
"is -1, means return the int64 indices." ", means return the int64 indices directly. The rearse why "
"The new version is 3, return the int64 indices directly." "changing the default value is that the int64 value in "
"And supporting the dtype of -1 in new version.", "VarType is 3 in the frameworke.proto.",
3)); 3));
...@@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_min) ...@@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_min)
false) false)
.ModifyAttr( .ModifyAttr(
"dtype", "dtype",
"change the default value of dtype, the older version " "Change the default value of dtype from -1 to 3"
"is -1, means return the int64 indices." ", means return the int64 indices directly. The rearse why "
"The new version is 3, return the int64 indices directly." "changing the default value is that the int64 value in "
"And supporting the dtype of -1 in new version.", "VarType is 3 in the frameworke.proto.",
3)); 3));
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <sstream> #include <sstream>
#include <vector> #include <vector>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/framework/var_type.h"
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
...@@ -297,3 +298,14 @@ REGISTER_OP_CUDA_KERNEL( ...@@ -297,3 +298,14 @@ REGISTER_OP_CUDA_KERNEL(
ops::CoalesceTensorOpKernel<paddle::platform::CUDADeviceContext, float>, ops::CoalesceTensorOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::CoalesceTensorOpKernel<paddle::platform::CUDADeviceContext, double>); ops::CoalesceTensorOpKernel<paddle::platform::CUDADeviceContext, double>);
#endif #endif
REGISTER_OP_VERSION(coalesce_tensor)
.AddCheckpoint(
R"ROC(
Upgrade coalesce_tensor: add a new attribute [use_align].)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"use_align",
"In order to optionally take memory alignment into account when "
"coalescing tensors. The default value is true to be compatible "
"with before.",
true));
...@@ -133,9 +133,9 @@ class CompareOp : public framework::OperatorWithKernel { ...@@ -133,9 +133,9 @@ class CompareOp : public framework::OperatorWithKernel {
REGISTER_OP_VERSION(op_type) \ REGISTER_OP_VERSION(op_type) \
.AddCheckpoint( \ .AddCheckpoint( \
R"ROC(Upgrade compare ops, add a new attribute [force_cpu])ROC", \ R"ROC(Upgrade compare ops, add a new attribute [force_cpu])ROC", \
paddle::framework::compatible::OpVersionDesc().NewAttr( \ paddle::framework::compatible::OpVersionDesc().ModifyAttr( \
"force_cpu", \ "force_cpu", \
"In order to force fill output variable to cpu memory.", \ "In order to force fill output variable to gpu memory.", \
false)); false));
#define REGISTER_COMPARE_OP(op_type, _equation) \ #define REGISTER_COMPARE_OP(op_type, _equation) \
......
...@@ -578,4 +578,37 @@ REGISTER_OP_VERSION(conv_transpose) ...@@ -578,4 +578,37 @@ REGISTER_OP_VERSION(conv_transpose)
"output_padding", "output_padding",
"In order to add additional size to one side of each dimension " "In order to add additional size to one side of each dimension "
"in the output", "in the output",
{})); std::vector<int>{}));
REGISTER_OP_VERSION(conv2d_transpose)
.AddCheckpoint(
R"ROC(
Upgrade conv2d transpose to add a new attribute [output_padding].
)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"output_padding",
"In order to add additional size to one side of each dimension "
"in the output",
std::vector<int>{}));
REGISTER_OP_VERSION(conv3d_transpose)
.AddCheckpoint(
R"ROC(
Upgrade conv3d transpose to add a new attribute [output_padding].
)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"output_padding",
"In order to add additional size to one side of each dimension "
"in the output",
std::vector<int>{}));
REGISTER_OP_VERSION(depthwise_conv2d_transpose)
.AddCheckpoint(
R"ROC(
Upgrade depthwise conv2d transpose to add a new attribute [output_padding].
)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"output_padding",
"In order to add additional size to one side of each dimension "
"in the output",
std::vector<int>{}));
...@@ -303,6 +303,13 @@ REGISTER_OPERATOR( ...@@ -303,6 +303,13 @@ REGISTER_OPERATOR(
REGISTER_OP_CPU_KERNEL(generate_proposals, ops::GenerateProposalsKernel<float>, REGISTER_OP_CPU_KERNEL(generate_proposals, ops::GenerateProposalsKernel<float>,
ops::GenerateProposalsKernel<double>); ops::GenerateProposalsKernel<double>);
REGISTER_OP_VERSION(generate_proposals) REGISTER_OP_VERSION(generate_proposals)
.AddCheckpoint(
R"ROC(
Incompatible upgrade of output [RpnRoisLod])ROC",
paddle::framework::compatible::OpVersionDesc().DeleteOutput(
"RpnRoisLod",
"Delete RpnRoisLod due to incorrect output name and "
"it is not used in object detection models yet."))
.AddCheckpoint( .AddCheckpoint(
R"ROC( R"ROC(
Upgrade generate_proposals add a new output [RpnRoisNum])ROC", Upgrade generate_proposals add a new output [RpnRoisNum])ROC",
......
include(operators) include(operators)
register_operators() if(WITH_UNITY_BUILD)
# Load Unity Build rules for operators in paddle/fluid/operators/elementwise.
include(unity_build_rule.cmake)
endif()
register_operators(DEPS op_version_registry)
cc_test(test_elementwise_add_op_inplace SRCS test_elementwise_add_op_inplace.cc DEPS op_registry elementwise_add_op scope device_context enforce executor) cc_test(test_elementwise_add_op_inplace SRCS test_elementwise_add_op_inplace.cc DEPS op_registry elementwise_add_op scope device_context enforce executor)
cc_test(test_elementwise_div_grad_grad SRCS test_elementwise_div_grad_grad.cc DEPS op_registry elementwise_div_op scope device_context enforce executor) cc_test(test_elementwise_div_grad_grad SRCS test_elementwise_div_grad_grad.cc DEPS op_registry elementwise_div_op scope device_context enforce executor)
......
...@@ -17,7 +17,6 @@ limitations under the License. */ ...@@ -17,7 +17,6 @@ limitations under the License. */
#include <memory> #include <memory>
#include <string> #include <string>
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op.h"
#include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex128.h"
#include "paddle/fluid/platform/complex64.h" #include "paddle/fluid/platform/complex64.h"
...@@ -178,3 +177,13 @@ REGISTER_OP_CPU_KERNEL( ...@@ -178,3 +177,13 @@ REGISTER_OP_CPU_KERNEL(
paddle::platform::complex64>, paddle::platform::complex64>,
ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex128>); paddle::platform::complex128>);
REGISTER_OP_VERSION(elementwise_add)
.AddCheckpoint(
R"ROC(Register elementwise_add for adding the attribute of
Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_add.",
1.0f));
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_div_op.h" #include "paddle/fluid/operators/elementwise/elementwise_div_op.h"
#include <memory> #include <memory>
#include <string> #include <string>
#include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op.h"
#include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex128.h"
#include "paddle/fluid/platform/complex64.h" #include "paddle/fluid/platform/complex64.h"
...@@ -162,3 +163,12 @@ REGISTER_OP_CPU_KERNEL( ...@@ -162,3 +163,12 @@ REGISTER_OP_CPU_KERNEL(
paddle::platform::complex64>, paddle::platform::complex64>,
ops::ElementwiseDivDoubleGradKernel<paddle::platform::CPUDeviceContext, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex128>); paddle::platform::complex128>);
REGISTER_OP_VERSION(elementwise_div)
.AddCheckpoint(
R"ROC(Register elementwise_div for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_div.",
1.0f));
...@@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL( ...@@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL(
ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext, int>, ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext, ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext,
int64_t>); int64_t>);
REGISTER_OP_VERSION(elementwise_floordiv)
.AddCheckpoint(
R"ROC(Register elementwise_floordiv for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_floordiv.",
1.0f));
...@@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL( ...@@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL(
ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext, double>, ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext, int>, ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext, int64_t>); ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext, int64_t>);
REGISTER_OP_VERSION(elementwise_max)
.AddCheckpoint(
R"ROC(Register elementwise_max for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_max.",
1.0f));
...@@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL( ...@@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL(
ops::ElementwiseMinGradKernel<paddle::platform::CPUDeviceContext, double>, ops::ElementwiseMinGradKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwiseMinGradKernel<paddle::platform::CPUDeviceContext, int>, ops::ElementwiseMinGradKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseMinGradKernel<paddle::platform::CPUDeviceContext, int64_t>); ops::ElementwiseMinGradKernel<paddle::platform::CPUDeviceContext, int64_t>);
REGISTER_OP_VERSION(elementwise_min)
.AddCheckpoint(
R"ROC(Register elementwise_min for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_min.",
1.0f));
...@@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL( ...@@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL(
ops::ElementwiseModKernel<paddle::platform::CPUDeviceContext, int64_t>, ops::ElementwiseModKernel<paddle::platform::CPUDeviceContext, int64_t>,
ops::ElementwiseModFPKernel<paddle::platform::CPUDeviceContext, float>, ops::ElementwiseModFPKernel<paddle::platform::CPUDeviceContext, float>,
ops::ElementwiseModFPKernel<paddle::platform::CPUDeviceContext, double>); ops::ElementwiseModFPKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_VERSION(elementwise_mod)
.AddCheckpoint(
R"ROC(Register elementwise_mod for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_mod.",
1.0f));
...@@ -161,3 +161,12 @@ REGISTER_OP_CPU_KERNEL( ...@@ -161,3 +161,12 @@ REGISTER_OP_CPU_KERNEL(
paddle::platform::complex64>, paddle::platform::complex64>,
ops::ElementwiseMulDoubleGradKernel<paddle::platform::CPUDeviceContext, ops::ElementwiseMulDoubleGradKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex128>); paddle::platform::complex128>);
REGISTER_OP_VERSION(elementwise_mul)
.AddCheckpoint(
R"ROC(Register elementwise_mul for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_mul.",
1.0f));
...@@ -22,6 +22,7 @@ limitations under the License. */ ...@@ -22,6 +22,7 @@ limitations under the License. */
#include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/common_infer_shape_functions.h" #include "paddle/fluid/operators/common_infer_shape_functions.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
......
...@@ -83,3 +83,12 @@ REGISTER_OP_CPU_KERNEL( ...@@ -83,3 +83,12 @@ REGISTER_OP_CPU_KERNEL(
ops::ElementwisePowGradKernel<paddle::platform::CPUDeviceContext, double>, ops::ElementwisePowGradKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwisePowGradKernel<paddle::platform::CPUDeviceContext, int>, ops::ElementwisePowGradKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwisePowGradKernel<paddle::platform::CPUDeviceContext, int64_t>); ops::ElementwisePowGradKernel<paddle::platform::CPUDeviceContext, int64_t>);
REGISTER_OP_VERSION(elementwise_pow)
.AddCheckpoint(
R"ROC(Register elementwise_pow for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_pow.",
1.0f));
...@@ -156,3 +156,12 @@ REGISTER_OP_CPU_KERNEL( ...@@ -156,3 +156,12 @@ REGISTER_OP_CPU_KERNEL(
paddle::platform::complex64>, paddle::platform::complex64>,
ops::ElementwiseSubDoubleGradKernel<paddle::platform::CPUDeviceContext, ops::ElementwiseSubDoubleGradKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex128>); paddle::platform::complex128>);
REGISTER_OP_VERSION(elementwise_sub)
.AddCheckpoint(
R"ROC(Register elementwise_sub for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_sub.",
1.0f));
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/fluid/operators/fake_dequantize_op.h" #include "paddle/fluid/operators/fake_dequantize_op.h"
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/op_version_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -238,3 +239,10 @@ REGISTER_OPERATOR( ...@@ -238,3 +239,10 @@ REGISTER_OPERATOR(
REGISTER_OP_CPU_KERNEL(fake_channel_wise_dequantize_max_abs, REGISTER_OP_CPU_KERNEL(fake_channel_wise_dequantize_max_abs,
ops::FakeChannelWiseDequantizeMaxAbsKernel<CPU, float>, ops::FakeChannelWiseDequantizeMaxAbsKernel<CPU, float>,
ops::FakeChannelWiseDequantizeMaxAbsKernel<CPU, double>); ops::FakeChannelWiseDequantizeMaxAbsKernel<CPU, double>);
REGISTER_OP_VERSION(fake_channel_wise_dequantize_max_abs)
.AddCheckpoint(
R"ROC(add new attributes [quant_axis] for applying per-channel "
"dequantization to conv2d_tranpose and mul ops.)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"quant_axis", "The axis for dequantization.", 0));
...@@ -16,6 +16,7 @@ limitations under the License. */ ...@@ -16,6 +16,7 @@ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <string> #include <string>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/clip_op.h" #include "paddle/fluid/operators/clip_op.h"
#include "paddle/fluid/platform/transform.h" #include "paddle/fluid/platform/transform.h"
...@@ -805,3 +806,10 @@ REGISTER_OPERATOR(fake_channel_wise_quantize_dequantize_abs_max, ...@@ -805,3 +806,10 @@ REGISTER_OPERATOR(fake_channel_wise_quantize_dequantize_abs_max,
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
fake_channel_wise_quantize_dequantize_abs_max, fake_channel_wise_quantize_dequantize_abs_max,
ops::FakeChannelWiseQuantizeDequantizeAbsMaxKernel<CPU, float>); ops::FakeChannelWiseQuantizeDequantizeAbsMaxKernel<CPU, float>);
REGISTER_OP_VERSION(fake_channel_wise_quantize_abs_max)
.AddCheckpoint(
R"ROC(add new attributes [quant_axis] for applying per-channel "
"quantization to conv2d_tranpose and mul ops.)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"quant_axis", "The axis for quantization.", 0));
...@@ -13,10 +13,10 @@ See the License for the specific language governing permissions and ...@@ -13,10 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/flip_op.h" #include "paddle/fluid/operators/flip_op.h"
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "paddle/fluid/framework/op_version_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -154,3 +154,12 @@ REGISTER_OP_CPU_KERNEL( ...@@ -154,3 +154,12 @@ REGISTER_OP_CPU_KERNEL(
ops::FlipKernel<paddle::platform::CPUDeviceContext, int32_t>, ops::FlipKernel<paddle::platform::CPUDeviceContext, int32_t>,
ops::FlipKernel<paddle::platform::CPUDeviceContext, int64_t>, ops::FlipKernel<paddle::platform::CPUDeviceContext, int64_t>,
ops::FlipKernel<paddle::platform::CPUDeviceContext, bool>); ops::FlipKernel<paddle::platform::CPUDeviceContext, bool>);
/* ========================== register checkpoint ===========================*/
REGISTER_OP_VERSION(flip)
.AddCheckpoint(
R"ROC(Upgrade flip, add new attr [axis] and delete attr [dims].)ROC",
paddle::framework::compatible::OpVersionDesc()
.NewAttr("axis", "The added attr 'axis' doesn't set default value.",
boost::none)
.DeleteAttr("dims", "The attr 'dims' is deleted."));
...@@ -16,6 +16,7 @@ limitations under the License. */ ...@@ -16,6 +16,7 @@ limitations under the License. */
#include <cstring> // for memcpy #include <cstring> // for memcpy
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/jit/kernels.h" #include "paddle/fluid/operators/jit/kernels.h"
#include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/fc.h" #include "paddle/fluid/operators/math/fc.h"
...@@ -479,3 +480,13 @@ REGISTER_OPERATOR(fusion_gru, ops::FusionGRUOp, ops::FusionGRUOpMaker); ...@@ -479,3 +480,13 @@ REGISTER_OPERATOR(fusion_gru, ops::FusionGRUOp, ops::FusionGRUOpMaker);
REGISTER_OP_CPU_KERNEL(fusion_gru, ops::FusionGRUKernel<float>, REGISTER_OP_CPU_KERNEL(fusion_gru, ops::FusionGRUKernel<float>,
ops::FusionGRUKernel<double>); ops::FusionGRUKernel<double>);
/* ========================== register checkpoint ===========================*/
REGISTER_OP_VERSION(fusion_gru)
.AddCheckpoint(
R"ROC(Upgrade fusion_gru add a new attribute [Scale_weights])ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_weights",
"The added attribute 'Scale_weights' is not yet "
"registered.",
std::vector<float>{1.0f}));
...@@ -213,4 +213,4 @@ REGISTER_OP_VERSION(gaussian_random) ...@@ -213,4 +213,4 @@ REGISTER_OP_VERSION(gaussian_random)
.ModifyAttr( .ModifyAttr(
"shape", "shape",
"Add the default value of shape, the default value is {}.", "Add the default value of shape, the default value is {}.",
{})); std::vector<int64_t>{}));
...@@ -16,6 +16,7 @@ limitations under the License. */ ...@@ -16,6 +16,7 @@ limitations under the License. */
#include <memory> #include <memory>
#include <string> #include <string>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/cudnn_helper.h"
#endif #endif
...@@ -237,3 +238,11 @@ REGISTER_OP_CPU_KERNEL( ...@@ -237,3 +238,11 @@ REGISTER_OP_CPU_KERNEL(
grid_sampler_grad, grid_sampler_grad,
ops::GridSampleGradOpKernel<paddle::platform::CPUDeviceContext, float>, ops::GridSampleGradOpKernel<paddle::platform::CPUDeviceContext, float>,
ops::GridSampleGradOpKernel<paddle::platform::CPUDeviceContext, double>); ops::GridSampleGradOpKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_VERSION(grid_sampler)
.AddCheckpoint(
R"ROC(
Upgrade grid_sampler add a new attribute [mode].
)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"mode", "In order to specify interpolation mode", "bilinear"));
...@@ -17,6 +17,7 @@ limitations under the License. */ ...@@ -17,6 +17,7 @@ limitations under the License. */
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
namespace paddle { namespace paddle {
...@@ -701,3 +702,20 @@ REGISTER_OP_CPU_KERNEL( ...@@ -701,3 +702,20 @@ REGISTER_OP_CPU_KERNEL(
float>, float>,
ops::InstanceNormDoubleGradKernel<paddle::platform::CPUDeviceContext, ops::InstanceNormDoubleGradKernel<paddle::platform::CPUDeviceContext,
double>); double>);
REGISTER_OP_VERSION(instance_norm)
.AddCheckpoint(
R"ROC(
Change dispensable of attribute from False to True in instance_norm.
)ROC",
paddle::framework::compatible::OpVersionDesc()
.ModifyAttr(
"Bias",
"The arg 'dispensable' of Input 'Bias' is changed: from "
"'False' to 'True'.",
true)
.ModifyAttr(
"Scale",
"The arg 'dispensable' of Input 'Scale' is changed: from "
"'False' to 'True'.",
true));
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/linspace_op.h" #include "paddle/fluid/operators/linspace_op.h"
#include <string> #include <string>
#include "paddle/fluid/framework/op_version_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -92,3 +93,11 @@ REGISTER_OP_CPU_KERNEL(linspace, ops::CPULinspaceKernel<float>, ...@@ -92,3 +93,11 @@ REGISTER_OP_CPU_KERNEL(linspace, ops::CPULinspaceKernel<float>,
ops::CPULinspaceKernel<int32_t>, ops::CPULinspaceKernel<int32_t>,
ops::CPULinspaceKernel<int64_t>, ops::CPULinspaceKernel<int64_t>,
ops::CPULinspaceKernel<double>); ops::CPULinspaceKernel<double>);
REGISTER_OP_VERSION(linspace)
.AddCheckpoint(
R"ROC(
Upgrade linspace to add a new attribute [dtype].
)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"dtype", "In order to change output data type ", 5));
...@@ -16,6 +16,7 @@ limitations under the License. */ ...@@ -16,6 +16,7 @@ limitations under the License. */
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/blas.h"
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h" #include "paddle/fluid/platform/mkldnn_helper.h"
...@@ -932,3 +933,14 @@ REGISTER_OP_CUDA_KERNEL( ...@@ -932,3 +933,14 @@ REGISTER_OP_CUDA_KERNEL(
ops::MatMulDoubleGradKernel<paddle::platform::CUDADeviceContext, float>, ops::MatMulDoubleGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::MatMulDoubleGradKernel<paddle::platform::CUDADeviceContext, double>); ops::MatMulDoubleGradKernel<paddle::platform::CUDADeviceContext, double>);
#endif #endif
REGISTER_OP_VERSION(matmul)
.AddCheckpoint(
R"ROC(Register matmul for adding the attribute of
fused_reshape_Y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"fused_reshape_Y",
"In order to support the function of fused the input Y "
" and input X into the input X when "
"using the operator of matmul, and get raw shape of input Y.",
std::vector<int>{}));
...@@ -11,6 +11,7 @@ limitations under the License. */ ...@@ -11,6 +11,7 @@ limitations under the License. */
#include "paddle/fluid/operators/pixel_shuffle_op.h" #include "paddle/fluid/operators/pixel_shuffle_op.h"
#include <memory> #include <memory>
#include "paddle/fluid/framework/op_version_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -185,3 +186,10 @@ REGISTER_OP_CPU_KERNEL( ...@@ -185,3 +186,10 @@ REGISTER_OP_CPU_KERNEL(
pixel_shuffle_grad, pixel_shuffle_grad,
ops::PixelShuffleGradOpKernel<paddle::platform::CPUDeviceContext, float>, ops::PixelShuffleGradOpKernel<paddle::platform::CPUDeviceContext, float>,
ops::PixelShuffleGradOpKernel<paddle::platform::CPUDeviceContext, double>); ops::PixelShuffleGradOpKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_VERSION(pixel_shuffle)
.AddCheckpoint(
R"ROC(
Compatible upgrade of pixel_shuffle, add a new attribute [data_format])ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"data_format", "Specify the data format of the input data", true));
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/tensor_formatter.h" #include "paddle/fluid/operators/tensor_formatter.h"
namespace paddle { namespace paddle {
...@@ -173,3 +174,11 @@ REGISTER_OPERATOR(print, ops::PrintOp, ops::PrintOpProtoAndCheckMaker, ...@@ -173,3 +174,11 @@ REGISTER_OPERATOR(print, ops::PrintOp, ops::PrintOpProtoAndCheckMaker,
ops::PrintOpGradientMaker<paddle::framework::OpDesc>, ops::PrintOpGradientMaker<paddle::framework::OpDesc>,
ops::PrintOpGradientMaker<paddle::imperative::OpBase>, ops::PrintOpGradientMaker<paddle::imperative::OpBase>,
ops::PrintOpInferShape, ops::PrintOpVarTypeInference); ops::PrintOpInferShape, ops::PrintOpVarTypeInference);
REGISTER_OP_VERSION(print)
.AddCheckpoint(
R"ROC(Upgrade print add a new attribute [print_tensor_layout] to "
"contorl whether to print tensor's layout.)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"print_tensor_layout", "Whether to print the tensor's layout.",
true));
...@@ -13,6 +13,7 @@ limitations under the License. */ ...@@ -13,6 +13,7 @@ limitations under the License. */
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/op_version_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -178,3 +179,18 @@ REGISTER_OP_CPU_KERNEL( ...@@ -178,3 +179,18 @@ REGISTER_OP_CPU_KERNEL(
rank_attention, rank_attention,
ops::RankAttentionKernel<paddle::platform::CPUDeviceContext, float>, ops::RankAttentionKernel<paddle::platform::CPUDeviceContext, float>,
ops::RankAttentionKernel<paddle::platform::CPUDeviceContext, double>); ops::RankAttentionKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_VERSION(rank_attention)
.AddCheckpoint(
R"ROC(
Upgrade rank_attention, add 1 outputs [InputHelp] and 1 attribute
[MaxSize].
)ROC",
paddle::framework::compatible::OpVersionDesc()
.NewOutput("InputHelp",
"Output tensor of rank_attention_Op operator "
"in order to assist calculation in the reverse process.")
.NewAttr(
"MaxSize",
"Forward calculation to set the pre-applied video memory size",
0));
...@@ -233,6 +233,13 @@ REGISTER_OP_CPU_KERNEL( ...@@ -233,6 +233,13 @@ REGISTER_OP_CPU_KERNEL(
ops::CPUROIAlignGradOpKernel<paddle::platform::CPUDeviceContext, double>, ops::CPUROIAlignGradOpKernel<paddle::platform::CPUDeviceContext, double>,
ops::CPUROIAlignGradOpKernel<paddle::platform::CPUDeviceContext, int>); ops::CPUROIAlignGradOpKernel<paddle::platform::CPUDeviceContext, int>);
REGISTER_OP_VERSION(roi_align) REGISTER_OP_VERSION(roi_align)
.AddCheckpoint(
R"ROC(
Incompatible upgrade of input [RpnRoisLod])ROC",
paddle::framework::compatible::OpVersionDesc().DeleteInput(
"RpnRoisLod",
"Delete RpnRoisLod due to incorrect input name and "
"it is not used in object detection models yet."))
.AddCheckpoint( .AddCheckpoint(
R"ROC( R"ROC(
Upgrade roi_align add a new input [RoisNum])ROC", Upgrade roi_align add a new input [RoisNum])ROC",
......
...@@ -227,6 +227,13 @@ REGISTER_OP_CPU_KERNEL( ...@@ -227,6 +227,13 @@ REGISTER_OP_CPU_KERNEL(
ops::CPUROIPoolGradOpKernel<paddle::platform::CPUDeviceContext, double>, ops::CPUROIPoolGradOpKernel<paddle::platform::CPUDeviceContext, double>,
ops::CPUROIPoolGradOpKernel<paddle::platform::CPUDeviceContext, int>); ops::CPUROIPoolGradOpKernel<paddle::platform::CPUDeviceContext, int>);
REGISTER_OP_VERSION(roi_pool) REGISTER_OP_VERSION(roi_pool)
.AddCheckpoint(
R"ROC(
Incompatible upgrade of input [RpnRoisLod])ROC",
paddle::framework::compatible::OpVersionDesc().DeleteInput(
"RpnRoisLod",
"Delete RpnRoisLod due to incorrect input name and "
"it is not used in object detection models yet."))
.AddCheckpoint( .AddCheckpoint(
R"ROC( R"ROC(
Upgrade roi_pool add a new input [RoisNum])ROC", Upgrade roi_pool add a new input [RoisNum])ROC",
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/operators/trace_op.h" #include "paddle/fluid/operators/trace_op.h"
#include "paddle/fluid/framework/op_version_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -89,13 +90,13 @@ class TraceOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -89,13 +90,13 @@ class TraceOpMaker : public framework::OpProtoAndCheckerMaker {
R"DOC((int, default 0), the first axis of the 2-D planes from which the diagonals should be taken. R"DOC((int, default 0), the first axis of the 2-D planes from which the diagonals should be taken.
Can be either positive or negative. Default: 0. Can be either positive or negative. Default: 0.
)DOC") )DOC")
.SetDefault(-2); .SetDefault(0);
AddAttr<int>( AddAttr<int>(
"axis2", "axis2",
R"DOC((int, default 1), the second axis of the 2-D planes from which the diagonals should be taken. R"DOC((int, default 1), the second axis of the 2-D planes from which the diagonals should be taken.
Can be either positive or negative. Default: 1. Can be either positive or negative. Default: 1.
)DOC") )DOC")
.SetDefault(-1); .SetDefault(1);
AddComment(R"DOC( AddComment(R"DOC(
Trace Operator. Trace Operator.
Return the sum along diagonals of the input tensor. Return the sum along diagonals of the input tensor.
...@@ -178,3 +179,21 @@ REGISTER_OP_CPU_KERNEL( ...@@ -178,3 +179,21 @@ REGISTER_OP_CPU_KERNEL(
paddle::platform::complex64>, paddle::platform::complex64>,
ops::TraceGradKernel<paddle::platform::CPUDeviceContext, ops::TraceGradKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex128>); paddle::platform::complex128>);
/* ========================== register checkpoint ===========================*/
REGISTER_OP_VERSION(trace)
.AddCheckpoint(
R"ROC(Upgrade trace add a new attribute [axis2])ROC",
paddle::framework::compatible::OpVersionDesc()
.NewAttr("axis1",
"The added attribute 'axis1' is not yet registered.",
std::vector<float>{0.0f})
.NewAttr("axis2",
"The added attribute 'axis2' is not yet registered.",
std::vector<float>{1.0f})
.DeleteAttr("dim1",
"The attribute 'dim1' is not recommend according to "
"the specification 2.0.")
.DeleteAttr("dim2",
"The attribute 'dim2' is not recommend according to "
"the specification 2.0."));
...@@ -184,7 +184,7 @@ REGISTER_OP_VERSION(unique) ...@@ -184,7 +184,7 @@ REGISTER_OP_VERSION(unique)
.NewAttr("axis", .NewAttr("axis",
"The axis to apply unique. If None, the input will be " "The axis to apply unique. If None, the input will be "
"flattened.", "flattened.",
{}) std::vector<int>{})
.NewAttr("is_sorted", .NewAttr("is_sorted",
"If True, the unique elements of X are in ascending order." "If True, the unique elements of X are in ascending order."
"Otherwise, the unique elements are not sorted.", "Otherwise, the unique elements are not sorted.",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册