From f784741d4aad7d57417fc60d9f956320c4779a9f Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 11 Aug 2017 11:59:22 -0700 Subject: [PATCH] Refine macro --- paddle/framework/backward_test.cc | 27 ++++++++++++------------ paddle/framework/grad_op_builder_test.cc | 8 +++---- paddle/framework/op_registry_test.cc | 8 +++---- paddle/framework/operator_test.cc | 10 +++++---- paddle/framework/pybind.cc | 4 ++-- paddle/operators/add_op.cc | 4 ++-- paddle/operators/cross_entropy_op.cc | 4 ++-- paddle/operators/fc_op.cc | 3 ++- paddle/operators/fill_zeros_like_op.cc | 3 ++- paddle/operators/gaussian_random_op.cc | 3 ++- paddle/operators/mean_op.cc | 4 ++-- paddle/operators/mul_op.cc | 4 ++-- paddle/operators/recurrent_op.cc | 5 +++-- paddle/operators/rowwise_add_op.cc | 3 ++- paddle/operators/sgd_op.cc | 2 +- paddle/operators/sigmoid_op.cc | 4 ++-- paddle/operators/softmax_op.cc | 4 ++-- paddle/operators/uniform_random_op.cc | 4 ++-- 18 files changed, 56 insertions(+), 48 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 1677a3ed4c8..38194b716dc 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -150,19 +150,20 @@ class AddOpMaker : public OpProtoAndCheckerMaker { namespace f = paddle::framework; namespace ops = paddle::operators; using EnforceNotMet = paddle::platform::EnforceNotMet; -REGISTER_OP(rowwise_add, f::EmptyOp, f::RowWiseAddOpMaker); -REGISTER_GRADIENT_OP(rowwise_add, rowwise_add_grad, f::EmptyOp); -REGISTER_OP(mul, f::EmptyOp, f::MulOpMaker); -REGISTER_GRADIENT_OP(mul, mul_grad, f::EmptyOp); -REGISTER_OP(sigmoid, f::EmptyOp, f::SigmoidOpMaker); -REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, f::EmptyOp); -REGISTER_OP(nograd, f::EmptyOp, f::NoGradOpMaker); -REGISTER_OP(fill_zeros_like, f::EmptyOp, f::FillZeroOpMaker); -REGISTER_OP(add, f::EmptyOp, f::AddOpMaker); -REGISTER_GRADIENT_OP(add, add_grad, f::EmptyOp); -REGISTER_OP(fc, f::FcOp, f::FcOpMaker); -REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker); -REGISTER_GRADIENT_OP(many_output_op, many_output_op_grad, f::EmptyOp); +REGISTER_OP(rowwise_add, f::EmptyOp, f::RowWiseAddOpMaker, rowwise_add_grad); +REGISTER_GRADIENT_OP(rowwise_add_grad, f::EmptyOp); +REGISTER_OP(mul, f::EmptyOp, f::MulOpMaker, mul_grad); +REGISTER_GRADIENT_OP(mul_grad, f::EmptyOp); +REGISTER_OP(sigmoid, f::EmptyOp, f::SigmoidOpMaker, sigmoid_grad); +REGISTER_GRADIENT_OP(sigmoid_grad, f::EmptyOp); +REGISTER_OP_WITHOUT_GRADIENT(nograd, f::EmptyOp, f::NoGradOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, f::EmptyOp, f::FillZeroOpMaker); +REGISTER_OP(add, f::EmptyOp, f::AddOpMaker, add_grad); +REGISTER_GRADIENT_OP(add_grad, f::EmptyOp); +REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); +REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker, + many_output_op_grad); +REGISTER_GRADIENT_OP(many_output_op_grad, f::EmptyOp); TEST(Backward, simple_op_grad) { auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index f1ebbae52f1..ad61b482e05 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -61,10 +61,10 @@ TEST(GradOpBuilder, AddTwo) { EXPECT_EQ(grad_add_op->Output("Y@GRAD"), "y@GRAD"); } -REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker); -REGISTER_GRADIENT_OP(mult_io, mult_io_grad, f::NOP); -REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker); -REGISTER_GRADIENT_OP(io_ignored, io_ignored_grad, f::NOP); +REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad); +REGISTER_GRADIENT_OP(mult_io_grad, f::NOP); +REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker, io_ignored_grad); +REGISTER_GRADIENT_OP(io_ignored_grad, f::NOP); TEST(GradOpBuilder, MutiInOut) { f::AttributeMap attrs{{"input_format", std::vector{0, 1, 4, 5}}, diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index 9894928a7aa..6f21ffc8a4b 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -49,10 +49,10 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { } // namespace framework } // namespace paddle -REGISTER_OP(cos_sim, paddle::framework::CosineOp, - paddle::framework::CosineOpProtoAndCheckerMaker); -REGISTER_OP(my_test_op, paddle::framework::MyTestOp, - paddle::framework::MyTestOpProtoAndCheckerMaker); +REGISTER_OP_WITHOUT_GRADIENT(cos_sim, paddle::framework::CosineOp, + paddle::framework::CosineOpProtoAndCheckerMaker); +REGISTER_OP_WITHOUT_GRADIENT(my_test_op, paddle::framework::MyTestOp, + paddle::framework::MyTestOpProtoAndCheckerMaker); TEST(OpRegistry, CreateOp) { paddle::framework::OpDesc op_desc; diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index 387aada749b..b1976a65149 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -54,8 +54,9 @@ class OpeWithoutKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { } // namespace framework } // namespace paddle -REGISTER_OP(test_operator, paddle::framework::OpWithoutKernelTest, - paddle::framework::OpeWithoutKernelTestProtoAndCheckerMaker); +REGISTER_OP_WITHOUT_GRADIENT( + test_operator, paddle::framework::OpWithoutKernelTest, + paddle::framework::OpeWithoutKernelTestProtoAndCheckerMaker); TEST(OperatorBase, all) { paddle::framework::OpDesc op_desc; @@ -212,8 +213,9 @@ TEST(OpKernel, all) { ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 1); } -REGISTER_OP(op_multi_inputs_with_kernel, paddle::framework::OpWithKernelTest, - paddle::framework::OpKernelTestMultiInputsProtoAndCheckerMaker); +REGISTER_OP_WITHOUT_GRADIENT( + op_multi_inputs_with_kernel, paddle::framework::OpWithKernelTest, + paddle::framework::OpKernelTestMultiInputsProtoAndCheckerMaker); REGISTER_OP_CPU_KERNEL(op_multi_inputs_with_kernel, paddle::framework::CPUKernalMultiInputsTest); diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index 412b4162666..0416793d3aa 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -30,9 +30,9 @@ limitations under the License. */ namespace py = pybind11; USE_OP(add_two); -USE_CPU_OP(onehot_cross_entropy); +USE_CPU_ONLY_OP(onehot_cross_entropy); USE_OP_ITSELF(fc); -USE_NO_GRAD_OP(sgd); +USE_OP(sgd); USE_OP(mul); USE_OP(mean); USE_OP(sigmoid); diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc index 086245ef62d..e8e26cbe9b4 100644 --- a/paddle/operators/add_op.cc +++ b/paddle/operators/add_op.cc @@ -55,8 +55,8 @@ class AddOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker); -REGISTER_GRADIENT_OP(add_two, add_two_grad, ops::AddOpGrad); +REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker, add_two_grad); +REGISTER_GRADIENT_OP(add_two_grad, ops::AddOpGrad); REGISTER_OP_CPU_KERNEL(add_two, ops::AddKernel); diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index c813d54e17f..7d0e74e5e4d 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -69,11 +69,11 @@ OnehotCrossEntropy Operator. namespace ops = paddle::operators; REGISTER_OP(onehot_cross_entropy, ops::OnehotCrossEntropyOp, - ops::OnehotCrossEntropyOpMaker); + ops::OnehotCrossEntropyOpMaker, onehot_cross_entropy_grad); REGISTER_OP_CPU_KERNEL( onehot_cross_entropy, ops::OnehotCrossEntropyOpKernel); -REGISTER_GRADIENT_OP(onehot_cross_entropy, onehot_cross_entropy_grad, +REGISTER_GRADIENT_OP(onehot_cross_entropy_grad, ops::OnehotCrossEntropyGradientOp); REGISTER_OP_CPU_KERNEL( onehot_cross_entropy_grad, diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index 01a1a81206f..9d32f327bfe 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -73,4 +73,5 @@ USE_OP(sigmoid); USE_OP(softmax); namespace ops = paddle::operators; -REGISTER_OP(fc, ops::FullyConnectedOp, ops::FullyConnectedOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(fc, ops::FullyConnectedOp, + ops::FullyConnectedOpMaker); diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index 3759a886780..d6fd368b078 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -51,7 +51,8 @@ The output will have the same size with input. } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(fill_zeros_like, ops::FillZerosLikeOp, ops::FillZerosLikeOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, ops::FillZerosLikeOp, + ops::FillZerosLikeOpMaker); REGISTER_OP_CPU_KERNEL( fill_zeros_like, ops::FillZerosLikeKernel); diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index ef417ae2f06..0bbbeaa08a1 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -78,5 +78,6 @@ Use to initialize tensor with gaussian random generator. } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(gaussian_random, ops::GaussianRandomOp, ops::GaussianRandomOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(gaussian_random, ops::GaussianRandomOp, + ops::GaussianRandomOpMaker); REGISTER_OP_CPU_KERNEL(gaussian_random, ops::GaussianRandomKernel); diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 2ea049cb360..15e0708c468 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -50,9 +50,9 @@ class MeanGradOp : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(mean, ops::MeanOp, ops::MeanOpMaker); +REGISTER_OP(mean, ops::MeanOp, ops::MeanOpMaker, mean_grad); REGISTER_OP_CPU_KERNEL(mean, ops::MeanKernel); -REGISTER_GRADIENT_OP(mean, mean_grad, ops::MeanGradOp); +REGISTER_GRADIENT_OP(mean_grad, ops::MeanGradOp); REGISTER_OP_CPU_KERNEL(mean_grad, ops::MeanGradKernel); diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index db81fd555d1..60550a27425 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -65,7 +65,7 @@ class MulOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker); -REGISTER_GRADIENT_OP(mul, mul_grad, ops::MulOpGrad); +REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad); +REGISTER_GRADIENT_OP(mul_grad, ops::MulOpGrad); REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 24383742056..91be1ce5193 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -235,5 +235,6 @@ void RecurrentGradientOp::Init() { } // namespace operators } // namespace paddle -REGISTER_OP(recurrent_op, paddle::operators::RecurrentOp, - paddle::operators::RecurrentAlgorithmProtoAndCheckerMaker); +REGISTER_OP_WITHOUT_GRADIENT( + recurrent_op, paddle::operators::RecurrentOp, + paddle::operators::RecurrentAlgorithmProtoAndCheckerMaker); diff --git a/paddle/operators/rowwise_add_op.cc b/paddle/operators/rowwise_add_op.cc index 55ed1c2f4c3..262a4127ef9 100644 --- a/paddle/operators/rowwise_add_op.cc +++ b/paddle/operators/rowwise_add_op.cc @@ -53,6 +53,7 @@ for i in xrange(X.shape[0]): } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(rowwise_add, ops::RowWiseAddOp, ops::RowWiseAddOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(rowwise_add, ops::RowWiseAddOp, + ops::RowWiseAddOpMaker); REGISTER_OP_CPU_KERNEL( rowwise_add, ops::RowWiseAddKernel); diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index f9a28ff8a6a..94d0fe0466a 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -52,6 +52,6 @@ param_out = param - learning_rate * grad; } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(sgd, ops::SGDOp, ops::SGDOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(sgd, ops::SGDOp, ops::SGDOpMaker); REGISTER_OP_CPU_KERNEL(sgd, ops::SGDOpKernel); diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc index bc5e0bbb183..fb27ffbfa1d 100644 --- a/paddle/operators/sigmoid_op.cc +++ b/paddle/operators/sigmoid_op.cc @@ -48,8 +48,8 @@ class SigmoidOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker); -REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, ops::SigmoidOpGrad); +REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker, sigmoid_grad); +REGISTER_GRADIENT_OP(sigmoid_grad, ops::SigmoidOpGrad); REGISTER_OP_CPU_KERNEL(sigmoid, ops::SigmoidKernel); diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index 3dd4e86918a..abc21337c52 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -64,9 +64,9 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; -REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker); +REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker, softmax_grad); REGISTER_OP_CPU_KERNEL(softmax, ops::SoftmaxKernel); -REGISTER_GRADIENT_OP(softmax, softmax_grad, ops::SoftmaxOpGrad); +REGISTER_GRADIENT_OP(softmax_grad, ops::SoftmaxOpGrad); REGISTER_OP_CPU_KERNEL( softmax_grad, ops::SoftmaxGradKernel); diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 405b84b76d2..37ec7fe4270 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -78,7 +78,7 @@ Used to initialize tensor with uniform random generator. } // namespace operators } // namespace paddle -REGISTER_OP(uniform_random, paddle::operators::UniformRandomOp, - paddle::operators::UniformRandomOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(uniform_random, paddle::operators::UniformRandomOp, + paddle::operators::UniformRandomOpMaker); REGISTER_OP_CPU_KERNEL(uniform_random, paddle::operators::CPUUniformRandomKernel); -- GitLab