diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index 7f8da2da5a0d42ff065265c5d173d0e6167dc08a..3e71a0a592861b32861c0e402911e274208fe714 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -227,6 +227,12 @@ make mul_op USE_CPU_ONLY_OP(gather); ``` + 如果OP不带Kernel,则使用`USE_NO_KENREL_OP`: + + ``` + USE_NO_KENREL_OP(recurrent); + ``` + 使用`USE_OP`告知编译器需要链接该Op的目标文件,具体解释参考[代码注释](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h#L81)。 diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 6b4c612cd8d9263258e3987914c44002e7bca92c..c5d46622156c56acb98fb77e7db5ee7bca8c937a 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -182,7 +182,7 @@ static std::unique_ptr BackwardRecursive( }); // process recurrent gradient op as a special operator. - if (forwardOp.Type() == "recurrent_op") { + if (forwardOp.Type() == "recurrent") { // NOTE clean up cycle call somewhere (RNN's stepnet constains itself), or // this will result in infinite loop. const auto& rnnop = diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 2d09cde41e3f5086279f9441e0fdc52549bed5ab..572dff860a306bb03ba9e6702fec85e4a2ea1b54 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -199,6 +199,8 @@ class OpKernelRegistrar : public Registrar { USE_OP_DEVICE_KERNEL(op_type, GPU) #endif +#define USE_NO_KERNEL_OP(op_type) USE_OP_ITSELF(op_type); + #define USE_CPU_ONLY_OP(op_type) \ USE_OP_ITSELF(op_type); \ USE_OP_DEVICE_KERNEL(op_type, CPU); diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index 1eee9644babbdfac68821ca774845ad8ebbd5aee..069fb5e1abc657aa02a50fde352ce88d078c36e1 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -79,7 +79,7 @@ class MinusGradOp : public NetOp { } // namespace paddle USE_OP(scale); -USE_OP_ITSELF(identity); +USE_NO_KERNEL_OP(identity); namespace ops = paddle::operators; REGISTER_OP(minus, ops::MinusOp, ops::MinusOpMaker, minus_grad, ops::MinusGradOp); diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 16bd249cb3d989c695ec9378f09d48833d70be58..e826703c60ca82e1fe690eb78c3d4f92981ef3a2 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -235,5 +235,5 @@ RecurrentGradientOp::RecurrentGradientOp( } // namespace paddle REGISTER_OP_WITHOUT_GRADIENT( - recurrent_op, paddle::operators::RecurrentOp, + recurrent, paddle::operators::RecurrentOp, paddle::operators::RecurrentAlgorithmProtoAndCheckerMaker); diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 3bc150ccb7af2885439cc2344aa0db9ba3b1ca03..6896422617be0a3c73dc7b0d7cc1113075fa2f4b 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -39,12 +39,12 @@ USE_OP(sigmoid); USE_OP(softmax); USE_OP(rowwise_add); USE_OP(fill_zeros_like); -USE_OP_ITSELF(recurrent_op); +USE_NO_KERNEL_OP(recurrent); USE_OP(gaussian_random); USE_OP(uniform_random); USE_OP(lookup_table); USE_OP(scale); -USE_OP_ITSELF(identity); +USE_NO_KERNEL_OP(identity); USE_OP(minus); USE_CPU_ONLY_OP(gather); USE_CPU_ONLY_OP(scatter); diff --git a/python/paddle/v2/framework/op.py b/python/paddle/v2/framework/op.py index 6ac656321e72f5b0c91008091753ee50ac8200a6..e7e932f6fe46f2db8de37cecfd9875b310bdcbe5 100644 --- a/python/paddle/v2/framework/op.py +++ b/python/paddle/v2/framework/op.py @@ -179,7 +179,7 @@ class OperatorFactory(object): class __RecurrentOp__(object): __proto__ = None - type = 'recurrent_op' + type = 'recurrent' def __init__(self): # cache recurrent_op's proto