未验证 提交 5eab1a38 编写于 作者: 石晓伟 提交者: GitHub

git cherry-pick the commits of operator version registries, test=release/2.0 (#30292)

* Register op version for grid_sampler, test=op_version (#29916)

* add op version for fake_quant and fake_dequant ops, test=op_version (#29923)

* Register op version for print, test=op_version (#29945)

* add gru op_register_version; test=op_version; (#29931)

* Register op version for coalesce_tensor. (#29940)

* register op version for conv2d_transpose, conv3d_transpose and depthwise_conv2d_transpose, test=op_version (#29937)

* add op_register_version for allclose op; test=op_version (#29968)

* register ModifyAttr for instance_norm, test=op_version (#29938)

* add op_version for flip op [test=op_version] (#30019)

* add the op version check for the elementwise ops, test=op_version (#30010)

* add the support the op version check for matmul, test=op_version (#30011)

* Revert "register ModifyAttr for instance_norm, test=op_version (#29938)"

* add REGISTER_OP_VERSION for generate_proposals, roi_align, roi_pool test=op_version (#30034)

* Fix rank_attention op_version, test=op_version (#30006)

* fix rank_attention, test=op_version

* Register op version for linspace,test=op_version (#30025)

* fix op_register_version for compare ops, test=op_version (#30007)
Co-authored-by: Nzhoushunjie <zhoushunjie@baidu.com>

* register ModifyAttr for instance_norm, test=op_version (#30065)

* register instance norm, test=op_version

* add trace op_register_version and fix version bug; test=op_version (#30000)

* fix a bug in op_version_registry, test=develop, test=op_version (#29994)

* Add version checking, test=op_version (#30129)

* fix a bug in gaussian_random_op version, test=release/2.0
Co-authored-by: NLielinJiang <50691816+LielinJiang@users.noreply.github.com>
Co-authored-by: Ncc <52520497+juncaipeng@users.noreply.github.com>
Co-authored-by: NQi Li <qili93@qq.com>
Co-authored-by: NJack Zhou <zhoushunjie@baidu.com>
Co-authored-by: NGuo Sheng <whucsgs@163.com>
Co-authored-by: Nwangxinxin08 <69842442+wangxinxin08@users.noreply.github.com>
Co-authored-by: Nwawltor <fangzeyang0904@hotmail.com>
Co-authored-by: NFlyingQianMM <245467267@qq.com>
Co-authored-by: Nceci3 <ceci3@users.noreply.github.com>
Co-authored-by: Nhutuxian <hutuxian2011@sina.cn>
Co-authored-by: Nchalsliu <45041955+chalsliu@users.noreply.github.com>
Co-authored-by: Nwangguanzhong <jerrywgz@126.com>
Co-authored-by: NShenLiang <shenliang03@baidu.com>
Co-authored-by: Nyinhaofeng <66763551+yinhaofeng@users.noreply.github.com>
Co-authored-by: Nchannings <chenlingchi@baidu.com>
Co-authored-by: Nchentianyu03 <chentianyu03@baidu.com>
Co-authored-by: Nruri <shipeng1108@163.com>
上级 0fbfbeac
......@@ -244,5 +244,5 @@ REGISTER_PASS_CAPABILITY(conv_eltwiseadd_affine_channel_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1)
.EQ("elementwise_add", 0)
.LE("elementwise_add", 1)
.EQ("affine_channel", 0));
......@@ -389,5 +389,5 @@ REGISTER_PASS_CAPABILITY(conv_eltwiseadd_bn_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1)
.EQ("elementwise_add", 0)
.LE("elementwise_add", 1)
.EQ("batch_norm", 0));
......@@ -122,6 +122,6 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add2_act_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1)
.EQ("elementwise_add", 0)
.LE("elementwise_add", 1)
.EQ("relu", 0)
.EQ("identity", 0));
......@@ -109,6 +109,6 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_act_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1)
.EQ("elementwise_add", 0)
.LE("elementwise_add", 1)
.EQ("relu", 0)
.EQ("identity", 0));
......@@ -95,4 +95,4 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1)
.EQ("elementwise_add", 0));
.LE("elementwise_add", 1));
......@@ -263,6 +263,6 @@ REGISTER_PASS_CAPABILITY(embedding_fc_lstm_fuse_pass)
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("lookup_table_v2", 0)
.EQ("mul", 0)
.EQ("elementwise_add", 0)
.LE("elementwise_add", 1)
.EQ("lstm", 0)
.EQ("fused_embedding_fc_lstm", 0));
......@@ -187,6 +187,6 @@ REGISTER_PASS_CAPABILITY(fc_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("mul", 0)
.EQ("elementwise_add", 0)
.LE("elementwise_add", 1)
.EQ("relu", 0)
.EQ("fc", 0));
......@@ -203,11 +203,11 @@ REGISTER_PASS_CAPABILITY(mul_gru_fuse_pass)
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("mul", 0)
.EQ("gru", 0)
.EQ("fusion_gru", 0));
.LE("fusion_gru", 1));
REGISTER_PASS_CAPABILITY(fc_gru_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("mul", 0)
.EQ("elementwise_add", 0)
.LE("elementwise_add", 1)
.EQ("gru", 0)
.EQ("fusion_gru", 0));
.LE("fusion_gru", 1));
......@@ -202,7 +202,7 @@ REGISTER_PASS_CAPABILITY(fc_lstm_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("mul", 0)
.EQ("elementwise_add", 0)
.LE("elementwise_add", 1)
.EQ("lstm", 0)
.EQ("fusion_lstm", 0));
REGISTER_PASS_CAPABILITY(mul_lstm_fuse_pass)
......
......@@ -227,7 +227,7 @@ REGISTER_PASS(map_matmul_to_mul_pass, paddle::framework::ir::MapMatmul2MulPass);
REGISTER_PASS_CAPABILITY(map_matmul_to_mul_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("matmul", 0)
.LE("matmul", 1)
.EQ("mul", 0));
REGISTER_PASS(squeeze2_matmul_fuse_pass,
......@@ -235,7 +235,7 @@ REGISTER_PASS(squeeze2_matmul_fuse_pass,
REGISTER_PASS_CAPABILITY(squeeze2_matmul_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("matmul", 0)
.LE("matmul", 1)
.EQ("squeeze2", 0)
.EQ("mul", 0));
......@@ -244,6 +244,6 @@ REGISTER_PASS(reshape2_matmul_fuse_pass,
REGISTER_PASS_CAPABILITY(reshape2_matmul_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("matmul", 0)
.LE("matmul", 1)
.EQ("reshape2", 0)
.EQ("mul", 0));
......@@ -153,7 +153,7 @@ REGISTER_PASS_CAPABILITY(conv_bias_mkldnn_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1)
.EQ("elementwise_add", 0));
.LE("elementwise_add", 1));
REGISTER_PASS(conv_transpose_bias_mkldnn_fuse_pass,
paddle::framework::ir::Conv2DTransposeBiasFusePass);
......@@ -161,7 +161,7 @@ REGISTER_PASS_CAPABILITY(conv_transpose_bias_mkldnn_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d_transpose", 1)
.EQ("elementwise_add", 0));
.LE("elementwise_add", 1));
REGISTER_PASS(conv3d_bias_mkldnn_fuse_pass,
paddle::framework::ir::Conv3DBiasFusePass);
......@@ -228,20 +228,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsX(
pattern->NewNode(elementwise_add_pattern.elementwise_add_y_repr()));
conv_output->AsIntermediate();
auto get_node_from_elementwise_add =
[&elementwise_add_pattern](
const GraphPatternDetector::subgraph_t& subgraph)
auto get_node_from_elementwise_add = [&elementwise_add_pattern](
const GraphPatternDetector::subgraph_t& subgraph)
-> std::tuple<Node*, Node*, Node*> {
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op,
elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y,
elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out,
elementwise_add_pattern);
return std::make_tuple(elementwise_add_op, elementwise_add_y,
elementwise_add_out);
};
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op,
elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y,
elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out,
elementwise_add_pattern);
return std::make_tuple(elementwise_add_op, elementwise_add_y,
elementwise_add_out);
};
return ExecuteHandleOnGraph<IdentityFuseHandle>(
&gpd, graph_with_stats,
......@@ -266,20 +265,19 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsY(
conv_output);
conv_output->AsIntermediate();
auto get_node_from_elementwise_add =
[&elementwise_add_pattern](
const GraphPatternDetector::subgraph_t& subgraph)
auto get_node_from_elementwise_add = [&elementwise_add_pattern](
const GraphPatternDetector::subgraph_t& subgraph)
-> std::tuple<Node*, Node*, Node*> {
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op,
elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x,
elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out,
elementwise_add_pattern);
return std::make_tuple(elementwise_add_op, elementwise_add_x,
elementwise_add_out);
};
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op,
elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x,
elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out,
elementwise_add_pattern);
return std::make_tuple(elementwise_add_op, elementwise_add_x,
elementwise_add_out);
};
return ExecuteHandleOnGraph<IdentityFuseHandle>(
&gpd, graph_with_stats,
......@@ -306,17 +304,16 @@ GraphWithStats ResidualConnectionMKLDNNFusePass::FuseProjectionConv(
conv_x_output->AsIntermediate();
conv_y_output->AsIntermediate();
auto get_node_from_elementwise_add =
[&elementwise_add_pattern](
const GraphPatternDetector::subgraph_t& subgraph)
auto get_node_from_elementwise_add = [&elementwise_add_pattern](
const GraphPatternDetector::subgraph_t& subgraph)
-> std::tuple<Node*, Node*> {
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op,
elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out,
elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op,
elementwise_add_pattern);
GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out,
elementwise_add_pattern);
return std::make_tuple(elementwise_add_op, elementwise_add_out);
};
return std::make_tuple(elementwise_add_op, elementwise_add_out);
};
return ExecuteHandleOnGraph<ProjectionFuseHandle>(
&gpd, graph_with_stats,
......@@ -351,4 +348,4 @@ REGISTER_PASS_CAPABILITY(conv_elementwise_add_mkldnn_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.LE("conv2d", 1)
.EQ("elementwise_add", 0));
.LE("elementwise_add", 1));
......@@ -103,6 +103,6 @@ REGISTER_PASS(matmul_transpose_reshape_fuse_pass,
REGISTER_PASS_CAPABILITY(matmul_transpose_reshape_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("matmul", 0)
.LE("matmul", 1)
.EQ("transpose", 0)
.EQ("reshape", 0));
......@@ -221,5 +221,5 @@ REGISTER_PASS_CAPABILITY(mkldnn_inplace_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("softmax", 0)
.EQ("elementwise_add", 0)
.LE("elementwise_add", 1)
.EQ("tanh", 0));
......@@ -96,4 +96,4 @@ REGISTER_PASS_CAPABILITY(scale_matmul_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("scale", 0)
.EQ("matmul", 0));
.LE("matmul", 1));
......@@ -716,9 +716,9 @@ REGISTER_PASS_CAPABILITY(multihead_matmul_fuse_pass_v2)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("mul", 0)
.EQ("elementwise_add", 0)
.LE("elementwise_add", 1)
.EQ("reshape2", 0)
.EQ("transpose2", 0)
.EQ("scale", 0)
.EQ("matmul", 0)
.LE("matmul", 1)
.EQ("softmax", 0));
......@@ -262,7 +262,7 @@ REGISTER_PASS_CAPABILITY(seq_concat_fc_fuse_pass)
.EQ("sequence_expand", 0)
.EQ("concat", 0)
.EQ("mul", 0)
.EQ("elementwise_add", 0)
.LE("elementwise_add", 1)
.EQ("sigmoid", 0)
.EQ("tanh", 0)
.EQ("relu", 0)
......
......@@ -106,5 +106,5 @@ REGISTER_PASS_CAPABILITY(seqconv_eltadd_relu_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("sequence_conv", 0)
.EQ("elementwise_add", 0)
.LE("elementwise_add", 1)
.EQ("relu", 0));
......@@ -193,5 +193,5 @@ REGISTER_PASS(skip_layernorm_fuse_pass,
REGISTER_PASS_CAPABILITY(skip_layernorm_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("elementwise_add", 0)
.LE("elementwise_add", 1)
.EQ("layer_norm", 0));
......@@ -389,10 +389,10 @@ REGISTER_PASS(squared_mat_sub_fuse_pass,
REGISTER_PASS_CAPABILITY(squared_mat_sub_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("matmul", 0)
.LE("matmul", 1)
.EQ("matmul_v2", 0)
.EQ("square", 0)
.EQ("elementwise_mul", 0)
.EQ("elementwise_sub", 0)
.LE("elementwise_mul", 1)
.LE("elementwise_sub", 1)
.EQ("fill_constant", 1)
.EQ("fusion_squared_mat_sub", 0));
......@@ -131,4 +131,4 @@ REGISTER_PASS_CAPABILITY(unsqueeze2_eltwise_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("unsqueeze2", 0)
.EQ("elementwise_mul", 0));
.LE("elementwise_mul", 1));
......@@ -18,29 +18,6 @@ namespace paddle {
namespace framework {
namespace compatible {
namespace {
template <OpUpdateType type__, typename InfoType>
OpUpdate<InfoType, type__>* new_update(InfoType&& info) {
return new OpUpdate<InfoType, type__>(info);
}
}
OpVersionDesc&& OpVersionDesc::ModifyAttr(const std::string& name,
const std::string& remark,
const OpAttrVariantT& default_value) {
infos_.emplace_back(new_update<OpUpdateType::kModifyAttr>(
OpAttrInfo(name, remark, default_value)));
return std::move(*this);
}
OpVersionDesc&& OpVersionDesc::NewAttr(const std::string& name,
const std::string& remark,
const OpAttrVariantT& default_value) {
infos_.emplace_back(new_update<OpUpdateType::kNewAttr>(
OpAttrInfo(name, remark, default_value)));
return std::move(*this);
}
OpVersionDesc&& OpVersionDesc::NewInput(const std::string& name,
const std::string& remark) {
infos_.emplace_back(
......
......@@ -118,13 +118,44 @@ class OpUpdate : public OpUpdateBase {
OpUpdateType type_;
};
template <OpUpdateType type__, typename InfoType>
OpUpdate<InfoType, type__>* new_update(InfoType&& info) {
return new OpUpdate<InfoType, type__>(info);
}
template <typename T>
OpAttrVariantT op_attr_wrapper(const T& val) {
return OpAttrVariantT{val};
}
template <int N>
OpAttrVariantT op_attr_wrapper(const char (&val)[N]) {
PADDLE_ENFORCE_EQ(
val[N - 1], 0,
platform::errors::InvalidArgument(
"The argument of operator register %c is illegal.", val[N - 1]));
return OpAttrVariantT{std::string{val}};
}
class OpVersionDesc {
public:
/* Compatibility upgrade */
template <typename T>
OpVersionDesc&& ModifyAttr(const std::string& name, const std::string& remark,
const OpAttrVariantT& default_value);
const T& default_value) {
infos_.emplace_back(new_update<OpUpdateType::kModifyAttr>(
OpAttrInfo(name, remark, op_attr_wrapper(default_value))));
return std::move(*this);
}
template <typename T>
OpVersionDesc&& NewAttr(const std::string& name, const std::string& remark,
const OpAttrVariantT& default_value);
const T& default_value) {
infos_.emplace_back(new_update<OpUpdateType::kNewAttr>(
OpAttrInfo(name, remark, op_attr_wrapper(default_value))));
return std::move(*this);
}
OpVersionDesc&& NewInput(const std::string& name, const std::string& remark);
OpVersionDesc&& NewOutput(const std::string& name, const std::string& remark);
OpVersionDesc&& BugfixWithBehaviorChanged(const std::string& remark);
......
......@@ -390,8 +390,8 @@ REGISTER_PASS_CAPABILITY(tensorrt_subgraph_pass)
.EQ("concat", 0)
.EQ("tanh", 0)
.EQ("pad", 0)
.EQ("elementwise_add", 0)
.EQ("elementwise_mul", 0)
.LE("elementwise_add", 1)
.LE("elementwise_mul", 1)
.EQ("prelu", 0)
.LE("conv2d_transpose", 1)
.LE("leaky_relu", 1)
......@@ -399,8 +399,8 @@ REGISTER_PASS_CAPABILITY(tensorrt_subgraph_pass)
.EQ("shuffle_channel", 0)
.EQ("swish", 0)
.EQ("split", 0)
.EQ("instance_norm", 0)
.LE("instance_norm", 1)
.EQ("gelu", 0)
.EQ("layer_norm", 0)
.EQ("scale", 0)
.EQ("matmul", 0));
.LE("matmul", 1));
......@@ -160,12 +160,37 @@ REGISTER_OPERATOR(
REGISTER_OP_CPU_KERNEL(allclose, ops::AllcloseKernel<CPU, float>,
ops::AllcloseKernel<CPU, double>);
/* ========================== register checkpoint ===========================*/
REGISTER_OP_VERSION(allclose)
.AddCheckpoint(
R"ROC(
Upgrade allclose add 2 attributes [atol, rtol].
)ROC",
R"ROC(Upgrade allclose, add two new inputs [Rtol] and [Atol].)ROC",
paddle::framework::compatible::OpVersionDesc()
.NewInput("Rtol",
"The added input 'Rtol' is not"
"dispensable.")
.NewInput("Atol",
"The added input 'Atol' is not"
"dispensable."))
.AddCheckpoint(
R"ROC(Delete two float attributes [rtol] and [atol],
then add 2 string attributes [atol, rtol]. Don't be surprised.
This is because float cannot represent hight-precision
floating-point values, and our framework doesn't support
the use of double attributes. As a result, string instead
of double is used here to represent high-precision
floating-point values.
)ROC",
paddle::framework::compatible::OpVersionDesc()
.DeleteAttr("rtol",
"The attribute 'rtol' is deleted."
"The reason why it is deleted is that"
"attributes do not support a float64 value"
"and it is changed to a tensor.")
.DeleteAttr("atol",
"The attribute 'atol' is deleted."
"The reason why it is deleted is that"
"attributes do not support a float64 value"
"and it is changed to a tensor.")
.NewAttr("rtol",
"(string) The relative tolerance. Default: :math:`1e-5` .",
std::string("1e-5"))
......
......@@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_max)
false)
.ModifyAttr(
"dtype",
"change the default value of dtype, the older version "
"is -1, means return the int64 indices."
"The new version is 3, return the int64 indices directly."
"And supporting the dtype of -1 in new version.",
"Change the default value of dtype from -1 to 3"
", means return the int64 indices directly. The rearse why "
"changing the default value is that the int64 value in "
"VarType is 3 in the frameworke.proto.",
3));
......@@ -44,8 +44,8 @@ REGISTER_OP_VERSION(arg_min)
false)
.ModifyAttr(
"dtype",
"change the default value of dtype, the older version "
"is -1, means return the int64 indices."
"The new version is 3, return the int64 indices directly."
"And supporting the dtype of -1 in new version.",
"Change the default value of dtype from -1 to 3"
", means return the int64 indices directly. The rearse why "
"changing the default value is that the int64 value in "
"VarType is 3 in the frameworke.proto.",
3));
......@@ -15,6 +15,7 @@
#include <sstream>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/var_type.h"
#include "paddle/fluid/operators/math/math_function.h"
......@@ -297,3 +298,14 @@ REGISTER_OP_CUDA_KERNEL(
ops::CoalesceTensorOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::CoalesceTensorOpKernel<paddle::platform::CUDADeviceContext, double>);
#endif
REGISTER_OP_VERSION(coalesce_tensor)
.AddCheckpoint(
R"ROC(
Upgrade coalesce_tensor: add a new attribute [use_align].)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"use_align",
"In order to optionally take memory alignment into account when "
"coalescing tensors. The default value is true to be compatible "
"with before.",
true));
......@@ -133,9 +133,9 @@ class CompareOp : public framework::OperatorWithKernel {
REGISTER_OP_VERSION(op_type) \
.AddCheckpoint( \
R"ROC(Upgrade compare ops, add a new attribute [force_cpu])ROC", \
paddle::framework::compatible::OpVersionDesc().NewAttr( \
paddle::framework::compatible::OpVersionDesc().ModifyAttr( \
"force_cpu", \
"In order to force fill output variable to cpu memory.", \
"In order to force fill output variable to gpu memory.", \
false));
#define REGISTER_COMPARE_OP(op_type, _equation) \
......
......@@ -578,4 +578,37 @@ REGISTER_OP_VERSION(conv_transpose)
"output_padding",
"In order to add additional size to one side of each dimension "
"in the output",
{}));
std::vector<int>{}));
REGISTER_OP_VERSION(conv2d_transpose)
.AddCheckpoint(
R"ROC(
Upgrade conv2d transpose to add a new attribute [output_padding].
)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"output_padding",
"In order to add additional size to one side of each dimension "
"in the output",
std::vector<int>{}));
REGISTER_OP_VERSION(conv3d_transpose)
.AddCheckpoint(
R"ROC(
Upgrade conv3d transpose to add a new attribute [output_padding].
)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"output_padding",
"In order to add additional size to one side of each dimension "
"in the output",
std::vector<int>{}));
REGISTER_OP_VERSION(depthwise_conv2d_transpose)
.AddCheckpoint(
R"ROC(
Upgrade depthwise conv2d transpose to add a new attribute [output_padding].
)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"output_padding",
"In order to add additional size to one side of each dimension "
"in the output",
std::vector<int>{}));
......@@ -303,6 +303,13 @@ REGISTER_OPERATOR(
REGISTER_OP_CPU_KERNEL(generate_proposals, ops::GenerateProposalsKernel<float>,
ops::GenerateProposalsKernel<double>);
REGISTER_OP_VERSION(generate_proposals)
.AddCheckpoint(
R"ROC(
Incompatible upgrade of output [RpnRoisLod])ROC",
paddle::framework::compatible::OpVersionDesc().DeleteOutput(
"RpnRoisLod",
"Delete RpnRoisLod due to incorrect output name and "
"it is not used in object detection models yet."))
.AddCheckpoint(
R"ROC(
Upgrade generate_proposals add a new output [RpnRoisNum])ROC",
......
include(operators)
register_operators()
if(WITH_UNITY_BUILD)
# Load Unity Build rules for operators in paddle/fluid/operators/elementwise.
include(unity_build_rule.cmake)
endif()
register_operators(DEPS op_version_registry)
cc_test(test_elementwise_add_op_inplace SRCS test_elementwise_add_op_inplace.cc DEPS op_registry elementwise_add_op scope device_context enforce executor)
cc_test(test_elementwise_div_grad_grad SRCS test_elementwise_div_grad_grad.cc DEPS op_registry elementwise_div_op scope device_context enforce executor)
......
......@@ -17,7 +17,6 @@ limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/elementwise/elementwise_op.h"
#include "paddle/fluid/platform/complex128.h"
#include "paddle/fluid/platform/complex64.h"
......@@ -178,3 +177,13 @@ REGISTER_OP_CPU_KERNEL(
paddle::platform::complex64>,
ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex128>);
REGISTER_OP_VERSION(elementwise_add)
.AddCheckpoint(
R"ROC(Register elementwise_add for adding the attribute of
Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_add.",
1.0f));
......@@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_div_op.h"
#include <memory>
#include <string>
#include "paddle/fluid/operators/elementwise/elementwise_op.h"
#include "paddle/fluid/platform/complex128.h"
#include "paddle/fluid/platform/complex64.h"
......@@ -162,3 +163,12 @@ REGISTER_OP_CPU_KERNEL(
paddle::platform::complex64>,
ops::ElementwiseDivDoubleGradKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex128>);
REGISTER_OP_VERSION(elementwise_div)
.AddCheckpoint(
R"ROC(Register elementwise_div for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_div.",
1.0f));
......@@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL(
ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseFloorDivKernel<paddle::platform::CPUDeviceContext,
int64_t>);
REGISTER_OP_VERSION(elementwise_floordiv)
.AddCheckpoint(
R"ROC(Register elementwise_floordiv for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_floordiv.",
1.0f));
......@@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL(
ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext, int64_t>);
REGISTER_OP_VERSION(elementwise_max)
.AddCheckpoint(
R"ROC(Register elementwise_max for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_max.",
1.0f));
......@@ -94,3 +94,12 @@ REGISTER_OP_CPU_KERNEL(
ops::ElementwiseMinGradKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwiseMinGradKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseMinGradKernel<paddle::platform::CPUDeviceContext, int64_t>);
REGISTER_OP_VERSION(elementwise_min)
.AddCheckpoint(
R"ROC(Register elementwise_min for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_min.",
1.0f));
......@@ -69,3 +69,12 @@ REGISTER_OP_CPU_KERNEL(
ops::ElementwiseModKernel<paddle::platform::CPUDeviceContext, int64_t>,
ops::ElementwiseModFPKernel<paddle::platform::CPUDeviceContext, float>,
ops::ElementwiseModFPKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_VERSION(elementwise_mod)
.AddCheckpoint(
R"ROC(Register elementwise_mod for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_mod.",
1.0f));
......@@ -161,3 +161,12 @@ REGISTER_OP_CPU_KERNEL(
paddle::platform::complex64>,
ops::ElementwiseMulDoubleGradKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex128>);
REGISTER_OP_VERSION(elementwise_mul)
.AddCheckpoint(
R"ROC(Register elementwise_mul for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_mul.",
1.0f));
......@@ -22,6 +22,7 @@ limitations under the License. */
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/common_infer_shape_functions.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
......
......@@ -83,3 +83,12 @@ REGISTER_OP_CPU_KERNEL(
ops::ElementwisePowGradKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwisePowGradKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwisePowGradKernel<paddle::platform::CPUDeviceContext, int64_t>);
REGISTER_OP_VERSION(elementwise_pow)
.AddCheckpoint(
R"ROC(Register elementwise_pow for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_pow.",
1.0f));
......@@ -156,3 +156,12 @@ REGISTER_OP_CPU_KERNEL(
paddle::platform::complex64>,
ops::ElementwiseSubDoubleGradKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex128>);
REGISTER_OP_VERSION(elementwise_sub)
.AddCheckpoint(
R"ROC(Register elementwise_sub for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_sub.",
1.0f));
......@@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/fluid/operators/fake_dequantize_op.h"
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_version_registry.h"
namespace paddle {
namespace operators {
......@@ -238,3 +239,10 @@ REGISTER_OPERATOR(
REGISTER_OP_CPU_KERNEL(fake_channel_wise_dequantize_max_abs,
ops::FakeChannelWiseDequantizeMaxAbsKernel<CPU, float>,
ops::FakeChannelWiseDequantizeMaxAbsKernel<CPU, double>);
REGISTER_OP_VERSION(fake_channel_wise_dequantize_max_abs)
.AddCheckpoint(
R"ROC(add new attributes [quant_axis] for applying per-channel "
"dequantization to conv2d_tranpose and mul ops.)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"quant_axis", "The axis for dequantization.", 0));
......@@ -16,6 +16,7 @@ limitations under the License. */
#include <algorithm>
#include <string>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/clip_op.h"
#include "paddle/fluid/platform/transform.h"
......@@ -805,3 +806,10 @@ REGISTER_OPERATOR(fake_channel_wise_quantize_dequantize_abs_max,
REGISTER_OP_CPU_KERNEL(
fake_channel_wise_quantize_dequantize_abs_max,
ops::FakeChannelWiseQuantizeDequantizeAbsMaxKernel<CPU, float>);
REGISTER_OP_VERSION(fake_channel_wise_quantize_abs_max)
.AddCheckpoint(
R"ROC(add new attributes [quant_axis] for applying per-channel "
"quantization to conv2d_tranpose and mul ops.)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"quant_axis", "The axis for quantization.", 0));
......@@ -13,10 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/flip_op.h"
#include <string>
#include <unordered_map>
#include <vector>
#include "paddle/fluid/framework/op_version_registry.h"
namespace paddle {
namespace operators {
......@@ -154,3 +154,12 @@ REGISTER_OP_CPU_KERNEL(
ops::FlipKernel<paddle::platform::CPUDeviceContext, int32_t>,
ops::FlipKernel<paddle::platform::CPUDeviceContext, int64_t>,
ops::FlipKernel<paddle::platform::CPUDeviceContext, bool>);
/* ========================== register checkpoint ===========================*/
REGISTER_OP_VERSION(flip)
.AddCheckpoint(
R"ROC(Upgrade flip, add new attr [axis] and delete attr [dims].)ROC",
paddle::framework::compatible::OpVersionDesc()
.NewAttr("axis", "The added attr 'axis' doesn't set default value.",
boost::none)
.DeleteAttr("dims", "The attr 'dims' is deleted."));
......@@ -16,6 +16,7 @@ limitations under the License. */
#include <cstring> // for memcpy
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/jit/kernels.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/fc.h"
......@@ -479,3 +480,13 @@ REGISTER_OPERATOR(fusion_gru, ops::FusionGRUOp, ops::FusionGRUOpMaker);
REGISTER_OP_CPU_KERNEL(fusion_gru, ops::FusionGRUKernel<float>,
ops::FusionGRUKernel<double>);
/* ========================== register checkpoint ===========================*/
REGISTER_OP_VERSION(fusion_gru)
.AddCheckpoint(
R"ROC(Upgrade fusion_gru add a new attribute [Scale_weights])ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_weights",
"The added attribute 'Scale_weights' is not yet "
"registered.",
std::vector<float>{1.0f}));
......@@ -213,4 +213,4 @@ REGISTER_OP_VERSION(gaussian_random)
.ModifyAttr(
"shape",
"Add the default value of shape, the default value is {}.",
{}));
std::vector<int64_t>{}));
......@@ -16,6 +16,7 @@ limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
......@@ -237,3 +238,11 @@ REGISTER_OP_CPU_KERNEL(
grid_sampler_grad,
ops::GridSampleGradOpKernel<paddle::platform::CPUDeviceContext, float>,
ops::GridSampleGradOpKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_VERSION(grid_sampler)
.AddCheckpoint(
R"ROC(
Upgrade grid_sampler add a new attribute [mode].
)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"mode", "In order to specify interpolation mode", "bilinear"));
......@@ -17,6 +17,7 @@ limitations under the License. */
#include <string>
#include <unordered_map>
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
namespace paddle {
......@@ -701,3 +702,20 @@ REGISTER_OP_CPU_KERNEL(
float>,
ops::InstanceNormDoubleGradKernel<paddle::platform::CPUDeviceContext,
double>);
REGISTER_OP_VERSION(instance_norm)
.AddCheckpoint(
R"ROC(
Change dispensable of attribute from False to True in instance_norm.
)ROC",
paddle::framework::compatible::OpVersionDesc()
.ModifyAttr(
"Bias",
"The arg 'dispensable' of Input 'Bias' is changed: from "
"'False' to 'True'.",
true)
.ModifyAttr(
"Scale",
"The arg 'dispensable' of Input 'Scale' is changed: from "
"'False' to 'True'.",
true));
......@@ -14,6 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/linspace_op.h"
#include <string>
#include "paddle/fluid/framework/op_version_registry.h"
namespace paddle {
namespace operators {
......@@ -92,3 +93,11 @@ REGISTER_OP_CPU_KERNEL(linspace, ops::CPULinspaceKernel<float>,
ops::CPULinspaceKernel<int32_t>,
ops::CPULinspaceKernel<int64_t>,
ops::CPULinspaceKernel<double>);
REGISTER_OP_VERSION(linspace)
.AddCheckpoint(
R"ROC(
Upgrade linspace to add a new attribute [dtype].
)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"dtype", "In order to change output data type ", 5));
......@@ -16,6 +16,7 @@ limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/math/blas.h"
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
......@@ -932,3 +933,14 @@ REGISTER_OP_CUDA_KERNEL(
ops::MatMulDoubleGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::MatMulDoubleGradKernel<paddle::platform::CUDADeviceContext, double>);
#endif
REGISTER_OP_VERSION(matmul)
.AddCheckpoint(
R"ROC(Register matmul for adding the attribute of
fused_reshape_Y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"fused_reshape_Y",
"In order to support the function of fused the input Y "
" and input X into the input X when "
"using the operator of matmul, and get raw shape of input Y.",
std::vector<int>{}));
......@@ -11,6 +11,7 @@ limitations under the License. */
#include "paddle/fluid/operators/pixel_shuffle_op.h"
#include <memory>
#include "paddle/fluid/framework/op_version_registry.h"
namespace paddle {
namespace operators {
......@@ -185,3 +186,10 @@ REGISTER_OP_CPU_KERNEL(
pixel_shuffle_grad,
ops::PixelShuffleGradOpKernel<paddle::platform::CPUDeviceContext, float>,
ops::PixelShuffleGradOpKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_VERSION(pixel_shuffle)
.AddCheckpoint(
R"ROC(
Compatible upgrade of pixel_shuffle, add a new attribute [data_format])ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"data_format", "Specify the data format of the input data", true));
......@@ -13,6 +13,7 @@
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/tensor_formatter.h"
namespace paddle {
......@@ -173,3 +174,11 @@ REGISTER_OPERATOR(print, ops::PrintOp, ops::PrintOpProtoAndCheckMaker,
ops::PrintOpGradientMaker<paddle::framework::OpDesc>,
ops::PrintOpGradientMaker<paddle::imperative::OpBase>,
ops::PrintOpInferShape, ops::PrintOpVarTypeInference);
REGISTER_OP_VERSION(print)
.AddCheckpoint(
R"ROC(Upgrade print add a new attribute [print_tensor_layout] to "
"contorl whether to print tensor's layout.)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"print_tensor_layout", "Whether to print the tensor's layout.",
true));
......@@ -13,6 +13,7 @@ limitations under the License. */
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_version_registry.h"
namespace paddle {
namespace operators {
......@@ -178,3 +179,18 @@ REGISTER_OP_CPU_KERNEL(
rank_attention,
ops::RankAttentionKernel<paddle::platform::CPUDeviceContext, float>,
ops::RankAttentionKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_VERSION(rank_attention)
.AddCheckpoint(
R"ROC(
Upgrade rank_attention, add 1 outputs [InputHelp] and 1 attribute
[MaxSize].
)ROC",
paddle::framework::compatible::OpVersionDesc()
.NewOutput("InputHelp",
"Output tensor of rank_attention_Op operator "
"in order to assist calculation in the reverse process.")
.NewAttr(
"MaxSize",
"Forward calculation to set the pre-applied video memory size",
0));
......@@ -233,6 +233,13 @@ REGISTER_OP_CPU_KERNEL(
ops::CPUROIAlignGradOpKernel<paddle::platform::CPUDeviceContext, double>,
ops::CPUROIAlignGradOpKernel<paddle::platform::CPUDeviceContext, int>);
REGISTER_OP_VERSION(roi_align)
.AddCheckpoint(
R"ROC(
Incompatible upgrade of input [RpnRoisLod])ROC",
paddle::framework::compatible::OpVersionDesc().DeleteInput(
"RpnRoisLod",
"Delete RpnRoisLod due to incorrect input name and "
"it is not used in object detection models yet."))
.AddCheckpoint(
R"ROC(
Upgrade roi_align add a new input [RoisNum])ROC",
......
......@@ -227,6 +227,13 @@ REGISTER_OP_CPU_KERNEL(
ops::CPUROIPoolGradOpKernel<paddle::platform::CPUDeviceContext, double>,
ops::CPUROIPoolGradOpKernel<paddle::platform::CPUDeviceContext, int>);
REGISTER_OP_VERSION(roi_pool)
.AddCheckpoint(
R"ROC(
Incompatible upgrade of input [RpnRoisLod])ROC",
paddle::framework::compatible::OpVersionDesc().DeleteInput(
"RpnRoisLod",
"Delete RpnRoisLod due to incorrect input name and "
"it is not used in object detection models yet."))
.AddCheckpoint(
R"ROC(
Upgrade roi_pool add a new input [RoisNum])ROC",
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#include "paddle/fluid/operators/trace_op.h"
#include "paddle/fluid/framework/op_version_registry.h"
namespace paddle {
namespace operators {
......@@ -89,13 +90,13 @@ class TraceOpMaker : public framework::OpProtoAndCheckerMaker {
R"DOC((int, default 0), the first axis of the 2-D planes from which the diagonals should be taken.
Can be either positive or negative. Default: 0.
)DOC")
.SetDefault(-2);
.SetDefault(0);
AddAttr<int>(
"axis2",
R"DOC((int, default 1), the second axis of the 2-D planes from which the diagonals should be taken.
Can be either positive or negative. Default: 1.
)DOC")
.SetDefault(-1);
.SetDefault(1);
AddComment(R"DOC(
Trace Operator.
Return the sum along diagonals of the input tensor.
......@@ -178,3 +179,21 @@ REGISTER_OP_CPU_KERNEL(
paddle::platform::complex64>,
ops::TraceGradKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex128>);
/* ========================== register checkpoint ===========================*/
REGISTER_OP_VERSION(trace)
.AddCheckpoint(
R"ROC(Upgrade trace add a new attribute [axis2])ROC",
paddle::framework::compatible::OpVersionDesc()
.NewAttr("axis1",
"The added attribute 'axis1' is not yet registered.",
std::vector<float>{0.0f})
.NewAttr("axis2",
"The added attribute 'axis2' is not yet registered.",
std::vector<float>{1.0f})
.DeleteAttr("dim1",
"The attribute 'dim1' is not recommend according to "
"the specification 2.0.")
.DeleteAttr("dim2",
"The attribute 'dim2' is not recommend according to "
"the specification 2.0."));
......@@ -184,7 +184,7 @@ REGISTER_OP_VERSION(unique)
.NewAttr("axis",
"The axis to apply unique. If None, the input will be "
"flattened.",
{})
std::vector<int>{})
.NewAttr("is_sorted",
"If True, the unique elements of X are in ascending order."
"Otherwise, the unique elements are not sorted.",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册