提交 b3463bff 编写于 作者: T Tao Luo 提交者: GitHub

Merge pull request #3851 from luotao1/no_kernel_op

package a new USE_NO_KERNEL_OP for USE_OP_ITSELF
...@@ -227,6 +227,12 @@ make mul_op ...@@ -227,6 +227,12 @@ make mul_op
USE_CPU_ONLY_OP(gather); USE_CPU_ONLY_OP(gather);
``` ```
如果OP不带Kernel,则使用`USE_NO_KENREL_OP`:
```
USE_NO_KENREL_OP(recurrent);
```
使用`USE_OP`告知编译器需要链接该Op的目标文件,具体解释参考[代码注释](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h#L81)。 使用`USE_OP`告知编译器需要链接该Op的目标文件,具体解释参考[代码注释](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h#L81)。
......
...@@ -182,7 +182,7 @@ static std::unique_ptr<OperatorBase> BackwardRecursive( ...@@ -182,7 +182,7 @@ static std::unique_ptr<OperatorBase> BackwardRecursive(
}); });
// process recurrent gradient op as a special operator. // process recurrent gradient op as a special operator.
if (forwardOp.Type() == "recurrent_op") { if (forwardOp.Type() == "recurrent") {
// NOTE clean up cycle call somewhere (RNN's stepnet constains itself), or // NOTE clean up cycle call somewhere (RNN's stepnet constains itself), or
// this will result in infinite loop. // this will result in infinite loop.
const auto& rnnop = const auto& rnnop =
......
...@@ -199,6 +199,8 @@ class OpKernelRegistrar : public Registrar { ...@@ -199,6 +199,8 @@ class OpKernelRegistrar : public Registrar {
USE_OP_DEVICE_KERNEL(op_type, GPU) USE_OP_DEVICE_KERNEL(op_type, GPU)
#endif #endif
#define USE_NO_KERNEL_OP(op_type) USE_OP_ITSELF(op_type);
#define USE_CPU_ONLY_OP(op_type) \ #define USE_CPU_ONLY_OP(op_type) \
USE_OP_ITSELF(op_type); \ USE_OP_ITSELF(op_type); \
USE_OP_DEVICE_KERNEL(op_type, CPU); USE_OP_DEVICE_KERNEL(op_type, CPU);
......
...@@ -79,7 +79,7 @@ class MinusGradOp : public NetOp { ...@@ -79,7 +79,7 @@ class MinusGradOp : public NetOp {
} // namespace paddle } // namespace paddle
USE_OP(scale); USE_OP(scale);
USE_OP_ITSELF(identity); USE_NO_KERNEL_OP(identity);
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(minus, ops::MinusOp, ops::MinusOpMaker, minus_grad, REGISTER_OP(minus, ops::MinusOp, ops::MinusOpMaker, minus_grad,
ops::MinusGradOp<float>); ops::MinusGradOp<float>);
......
...@@ -235,5 +235,5 @@ RecurrentGradientOp::RecurrentGradientOp( ...@@ -235,5 +235,5 @@ RecurrentGradientOp::RecurrentGradientOp(
} // namespace paddle } // namespace paddle
REGISTER_OP_WITHOUT_GRADIENT( REGISTER_OP_WITHOUT_GRADIENT(
recurrent_op, paddle::operators::RecurrentOp, recurrent, paddle::operators::RecurrentOp,
paddle::operators::RecurrentAlgorithmProtoAndCheckerMaker); paddle::operators::RecurrentAlgorithmProtoAndCheckerMaker);
...@@ -39,12 +39,12 @@ USE_OP(sigmoid); ...@@ -39,12 +39,12 @@ USE_OP(sigmoid);
USE_OP(softmax); USE_OP(softmax);
USE_OP(rowwise_add); USE_OP(rowwise_add);
USE_OP(fill_zeros_like); USE_OP(fill_zeros_like);
USE_OP_ITSELF(recurrent_op); USE_NO_KERNEL_OP(recurrent);
USE_OP(gaussian_random); USE_OP(gaussian_random);
USE_OP(uniform_random); USE_OP(uniform_random);
USE_OP(lookup_table); USE_OP(lookup_table);
USE_OP(scale); USE_OP(scale);
USE_OP_ITSELF(identity); USE_NO_KERNEL_OP(identity);
USE_OP(minus); USE_OP(minus);
USE_CPU_ONLY_OP(gather); USE_CPU_ONLY_OP(gather);
USE_CPU_ONLY_OP(scatter); USE_CPU_ONLY_OP(scatter);
......
...@@ -179,7 +179,7 @@ class OperatorFactory(object): ...@@ -179,7 +179,7 @@ class OperatorFactory(object):
class __RecurrentOp__(object): class __RecurrentOp__(object):
__proto__ = None __proto__ = None
type = 'recurrent_op' type = 'recurrent'
def __init__(self): def __init__(self):
# cache recurrent_op's proto # cache recurrent_op's proto
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册