提交 80eab822 编写于 作者: L Leo Chen 提交者: Zeng Jinle

Remove unused DefaultGradOpDescMaker in REGISTER_OPERATOR() (#19166)

* remove unused DefaultGradOpDescMaker in REGISTER_OPERATOR(), test=develop

* remove SplitIdsOpGradMaker since it is buggy and not tested, update spec file, test=develop
上级 c70a97f4
attention_lstm
conv_shift conv_shift
cos_sim cos_sim
dequantize dequantize
fc fc
flatten flatten
fsp fsp
fused_embedding_fc_lstm
fused_embedding_seq_pool fused_embedding_seq_pool
fusion_gru
fusion_lstm
fusion_repeated_fc_relu
fusion_seqconv_eltadd_relu
fusion_seqexpand_concat_fc
fusion_seqpool_concat
fusion_squared_mat_sub
gru gru
lrn lrn
lstm_unit lstm_unit
......
...@@ -419,8 +419,7 @@ class AttentionLSTMKernel : public framework::OpKernel<T> { ...@@ -419,8 +419,7 @@ class AttentionLSTMKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(attention_lstm, ops::AttentionLSTMOp, REGISTER_OPERATOR(attention_lstm, ops::AttentionLSTMOp,
ops::AttentionLSTMOpMaker, ops::AttentionLSTMOpMaker);
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OP_CPU_KERNEL(attention_lstm, ops::AttentionLSTMKernel<float>, REGISTER_OP_CPU_KERNEL(attention_lstm, ops::AttentionLSTMKernel<float>,
ops::AttentionLSTMKernel<double>); ops::AttentionLSTMKernel<double>);
...@@ -81,27 +81,12 @@ class SplitIdsOpInferVarType : public framework::VarTypeInference { ...@@ -81,27 +81,12 @@ class SplitIdsOpInferVarType : public framework::VarTypeInference {
} }
}; };
class SplitIdsOpGradMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
std::unique_ptr<framework::OpDesc> Apply() const override {
auto grad = new framework::OpDesc();
grad->SetType("concat");
grad->SetInput("X", OutputGrad("Out"));
grad->SetOutput("Out", InputGrad("Ids"));
grad->SetAttr("axis", 0);
return std::unique_ptr<framework::OpDesc>(grad);
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(split_ids, ops::SplitIdsOp, ops::SplitIdsOpMaker, REGISTER_OPERATOR(split_ids, ops::SplitIdsOp, ops::SplitIdsOpMaker,
ops::SplitIdsOpGradMaker, ops::SplitIdsOpInferVarType); ops::SplitIdsOpInferVarType);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
split_ids, ops::SplitIdsOpKernel<paddle::platform::CPUPlace, int64_t>, split_ids, ops::SplitIdsOpKernel<paddle::platform::CPUPlace, int64_t>,
......
...@@ -589,8 +589,7 @@ class FusedEmbeddingFCLSTMKernel : public framework::OpKernel<T> { ...@@ -589,8 +589,7 @@ class FusedEmbeddingFCLSTMKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(fused_embedding_fc_lstm, ops::FusedEmbeddingFCLSTMOp, REGISTER_OPERATOR(fused_embedding_fc_lstm, ops::FusedEmbeddingFCLSTMOp,
ops::FusedEmbeddingFCLSTMOpMaker, ops::FusedEmbeddingFCLSTMOpMaker);
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OP_CPU_KERNEL(fused_embedding_fc_lstm, REGISTER_OP_CPU_KERNEL(fused_embedding_fc_lstm,
ops::FusedEmbeddingFCLSTMKernel<float>, ops::FusedEmbeddingFCLSTMKernel<float>,
......
...@@ -396,7 +396,7 @@ class FusionGRUKernel : public framework::OpKernel<T> { ...@@ -396,7 +396,7 @@ class FusionGRUKernel : public framework::OpKernel<T> {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_gru, ops::FusionGRUOp, ops::FusionGRUOpMaker, REGISTER_OPERATOR(fusion_gru, ops::FusionGRUOp, ops::FusionGRUOpMaker);
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OP_CPU_KERNEL(fusion_gru, ops::FusionGRUKernel<float>, REGISTER_OP_CPU_KERNEL(fusion_gru, ops::FusionGRUKernel<float>,
ops::FusionGRUKernel<double>); ops::FusionGRUKernel<double>);
...@@ -474,8 +474,7 @@ class FuisonLSTMKernel : public framework::OpKernel<T> { ...@@ -474,8 +474,7 @@ class FuisonLSTMKernel : public framework::OpKernel<T> {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_lstm, ops::FusionLSTMOp, ops::FusionLSTMOpMaker, REGISTER_OPERATOR(fusion_lstm, ops::FusionLSTMOp, ops::FusionLSTMOpMaker);
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OP_CPU_KERNEL(fusion_lstm, ops::FuisonLSTMKernel<float>, REGISTER_OP_CPU_KERNEL(fusion_lstm, ops::FuisonLSTMKernel<float>,
ops::FuisonLSTMKernel<double>); ops::FuisonLSTMKernel<double>);
...@@ -144,8 +144,7 @@ class FusionRepeatedFCReluKernel : public framework::OpKernel<T> { ...@@ -144,8 +144,7 @@ class FusionRepeatedFCReluKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_repeated_fc_relu, ops::FusionRepeatedFCReluOp, REGISTER_OPERATOR(fusion_repeated_fc_relu, ops::FusionRepeatedFCReluOp,
ops::FusionRepeatedFCReluOpMaker, ops::FusionRepeatedFCReluOpMaker);
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OP_CPU_KERNEL(fusion_repeated_fc_relu, REGISTER_OP_CPU_KERNEL(fusion_repeated_fc_relu,
ops::FusionRepeatedFCReluKernel<float>, ops::FusionRepeatedFCReluKernel<float>,
......
...@@ -220,8 +220,7 @@ class FusionSeqConvEltAddReluKernel : public framework::OpKernel<T> { ...@@ -220,8 +220,7 @@ class FusionSeqConvEltAddReluKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_seqconv_eltadd_relu, ops::FusionSeqConvEltAddReluOp, REGISTER_OPERATOR(fusion_seqconv_eltadd_relu, ops::FusionSeqConvEltAddReluOp,
ops::FusionSeqConvEltAddReluOpMaker, ops::FusionSeqConvEltAddReluOpMaker);
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OP_CPU_KERNEL(fusion_seqconv_eltadd_relu, REGISTER_OP_CPU_KERNEL(fusion_seqconv_eltadd_relu,
ops::FusionSeqConvEltAddReluKernel<float>, ops::FusionSeqConvEltAddReluKernel<float>,
......
...@@ -197,8 +197,7 @@ class FusionSeqExpandConcatFCOpKernel : public framework::OpKernel<T> { ...@@ -197,8 +197,7 @@ class FusionSeqExpandConcatFCOpKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_seqexpand_concat_fc, ops::FusionSeqExpandConcatFCOp, REGISTER_OPERATOR(fusion_seqexpand_concat_fc, ops::FusionSeqExpandConcatFCOp,
ops::FusionSeqExpandConcatFCOpMaker, ops::FusionSeqExpandConcatFCOpMaker);
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OP_CPU_KERNEL(fusion_seqexpand_concat_fc, REGISTER_OP_CPU_KERNEL(fusion_seqexpand_concat_fc,
ops::FusionSeqExpandConcatFCOpKernel<float>, ops::FusionSeqExpandConcatFCOpKernel<float>,
......
...@@ -126,8 +126,7 @@ class FusionSeqPoolConcatKernel : public framework::OpKernel<T> { ...@@ -126,8 +126,7 @@ class FusionSeqPoolConcatKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_seqpool_concat, ops::FusionSeqPoolConcatOp, REGISTER_OPERATOR(fusion_seqpool_concat, ops::FusionSeqPoolConcatOp,
ops::FusionSeqPoolConcatOpMaker, ops::FusionSeqPoolConcatOpMaker);
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OP_CPU_KERNEL(fusion_seqpool_concat, REGISTER_OP_CPU_KERNEL(fusion_seqpool_concat,
ops::FusionSeqPoolConcatKernel<float>, ops::FusionSeqPoolConcatKernel<float>,
......
...@@ -136,8 +136,7 @@ class FusionSquaredMatSubKernel : public framework::OpKernel<T> { ...@@ -136,8 +136,7 @@ class FusionSquaredMatSubKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_squared_mat_sub, ops::FusionSquaredMatSubOp, REGISTER_OPERATOR(fusion_squared_mat_sub, ops::FusionSquaredMatSubOp,
ops::FusionSquaredMatSubOpMaker, ops::FusionSquaredMatSubOpMaker);
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OP_CPU_KERNEL(fusion_squared_mat_sub, REGISTER_OP_CPU_KERNEL(fusion_squared_mat_sub,
ops::FusionSquaredMatSubKernel<float>, ops::FusionSquaredMatSubKernel<float>,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册