From c18ebc3022961f404265a80400fcc29d216b4534 Mon Sep 17 00:00:00 2001 From: qijun Date: Thu, 14 Sep 2017 07:10:43 +0800 Subject: [PATCH] remove macros --- paddle/operators/activation_op.cc | 134 ++++++++++++++---------- paddle/operators/activation_op.h | 162 ++++++++++++++++++++++-------- paddle/pybind/pybind.cc | 2 +- 3 files changed, 203 insertions(+), 95 deletions(-) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index d2c2378fef..e713b5a211 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -14,33 +14,55 @@ #include "paddle/operators/activation_op.h" -#define FILL_ACTIVATION_OP \ - public: \ - using framework::OperatorWithKernel::OperatorWithKernel; \ - \ - protected: \ - void InferShape(const framework::InferShapeContext &ctx) const override { \ - ctx.Output("Y")->Resize( \ - ctx.Input("X")->dims()); \ - } - -#define FILL_ACTIVATION_GRAD_OP \ - public: \ - using framework::OperatorWithKernel::OperatorWithKernel; \ - \ - protected: \ - void InferShape(const framework::InferShapeContext &ctx) const override { \ - ctx.Output(framework::GradVarName("X")) \ - ->Resize(ctx.Input("Y")->dims()); \ - } +// #define FILL_ACTIVATION_OP \ +// public: \ +// using framework::OperatorWithKernel::OperatorWithKernel; \ +// \ +// protected: \ +// void InferShape(const framework::InferShapeContext &ctx) const override { \ +// ctx.Output("Y")->Resize( \ +// ctx.Input("X")->dims()); \ +// } + +// #define FILL_ACTIVATION_GRAD_OP \ +// public: \ +// using framework::OperatorWithKernel::OperatorWithKernel; \ +// \ +// protected: \ +// void InferShape(const framework::InferShapeContext &ctx) const override { \ +// ctx.Output(framework::GradVarName("X")) \ +// ->Resize(ctx.Input("Y")->dims()); \ +// } namespace paddle { namespace operators { -class SigmoidOp : public framework::OperatorWithKernel { - FILL_ACTIVATION_OP +class ActivationOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + ctx.Output("Y")->Resize( + ctx.Input("X")->dims()); + } }; +class ActivationOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + ctx.Output(framework::GradVarName("X")) + ->Resize(ctx.Input("Y")->dims()); + } +}; + +// class SigmoidOp : public framework::OperatorWithKernel { +// FILL_ACTIVATION_OP +// }; + class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker { public: SigmoidOpMaker(framework::OpProto *proto, @@ -52,13 +74,13 @@ class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker { } }; -class SigmoidOpGrad : public framework::OperatorWithKernel { - FILL_ACTIVATION_GRAD_OP -}; +// class SigmoidOpGrad : public framework::OperatorWithKernel { +// FILL_ACTIVATION_GRAD_OP +// }; -class ExpOp : public framework::OperatorWithKernel { - FILL_ACTIVATION_OP -}; +// class ExpOp : public framework::OperatorWithKernel { +// FILL_ACTIVATION_OP +// }; class ExpOpMaker : public framework::OpProtoAndCheckerMaker { public: @@ -70,13 +92,13 @@ class ExpOpMaker : public framework::OpProtoAndCheckerMaker { } }; -class ExpOpGrad : public framework::OperatorWithKernel { - FILL_ACTIVATION_GRAD_OP -}; +// class ExpOpGrad : public framework::OperatorWithKernel { +// FILL_ACTIVATION_GRAD_OP +// }; -class ReluOp : public framework::OperatorWithKernel { - FILL_ACTIVATION_OP -}; +// class ReluOp : public framework::OperatorWithKernel { +// FILL_ACTIVATION_OP +// }; class ReluOpMaker : public framework::OpProtoAndCheckerMaker { public: @@ -88,28 +110,36 @@ class ReluOpMaker : public framework::OpProtoAndCheckerMaker { } }; -class ReluOpGrad : public framework::OperatorWithKernel { - FILL_ACTIVATION_GRAD_OP -}; +// class ReluOpGrad : public framework::OperatorWithKernel { +// FILL_ACTIVATION_GRAD_OP +// }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker, sigmoid_grad, - ops::SigmoidOpGrad); -REGISTER_OP_CPU_KERNEL(sigmoid, - ops::SigmoidKernel); +REGISTER_OP(sigmoid, ops::ActivationOp, ops::SigmoidOpMaker, sigmoid_grad, + ops::ActivationOpGrad); +REGISTER_OP_CPU_KERNEL( + sigmoid, + ops::ActivationKernel); +REGISTER_OP_CPU_KERNEL(sigmoid_grad, + ops::ActivationGradKernel); + +REGISTER_OP(exp, ops::ActivationOp, ops::ExpOpMaker, exp_grad, + ops::ActivationOpGrad); +REGISTER_OP_CPU_KERNEL( + exp, ops::ActivationKernel); REGISTER_OP_CPU_KERNEL( - sigmoid_grad, ops::SigmoidGradKernel); - -REGISTER_OP(exp, ops::ExpOp, ops::ExpOpMaker, exp_grad, ops::ExpOpGrad); -REGISTER_OP_CPU_KERNEL(exp, ops::ExpKernel); -REGISTER_OP_CPU_KERNEL(exp_grad, - ops::ExpGradKernel); - -REGISTER_OP(relu, ops::ReluOp, ops::ReluOpMaker, relu_grad, ops::ReluOpGrad); -REGISTER_OP_CPU_KERNEL(relu, - ops::ReluKernel); -REGISTER_OP_CPU_KERNEL(relu_grad, - ops::ReluGradKernel); + exp_grad, + ops::ActivationGradKernel); + +// REGISTER_OP(relu, ops::ActivationOp, ops::ReluOpMaker, relu_grad, +// ops::ActivationOpGrad); +// REGISTER_OP_CPU_KERNEL(relu, +// ops::ReluKernel); +// REGISTER_OP_CPU_KERNEL(relu_grad, +// ops::ReluGradKernel); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index 9e4101805e..7d5c5bb26f 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -15,57 +15,135 @@ #pragma once #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" -#include "paddle/operators/math/activation_functor.h" - -#define ACTIVATION_KERNEL_NAME(ACTIVATION_NAME) ACTIVATION_NAME##Kernel - -#define DEFINE_ACTIVATION_KERNEL(ACTIVATION_NAME) \ - template \ - class ACTIVATION_KERNEL_NAME(ACTIVATION_NAME) : public framework::OpKernel { \ - public: \ - void Compute(const framework::ExecutionContext& context) const override { \ - auto* X = context.Input("X"); \ - auto* Y = context.Output("Y"); \ - Y->mutable_data(context.GetPlace()); \ - math::ACTIVATION_NAME functor; \ - auto* device_context = context.device_context(); \ - functor(*device_context, *X, Y); \ - } \ - }; - -#define DEFINE_ACTIVATION_GRAD_KERNEL(ACTIVATION_GRAD_NAME) \ - template \ - class ACTIVATION_KERNEL_NAME(ACTIVATION_GRAD_NAME) \ - : public framework::OpKernel { \ - public: \ - void Compute(const framework::ExecutionContext& context) const override { \ - auto* X = context.Input("X"); \ - auto* Y = context.Input("Y"); \ - auto* dY = \ - context.Input(framework::GradVarName("Y")); \ - auto* dX = \ - context.Output(framework::GradVarName("X")); \ - dX->mutable_data(context.GetPlace()); \ - math::ACTIVATION_GRAD_NAME functor; \ - auto* device_context = context.device_context(); \ - functor(*device_context, *X, *Y, *dY, dX); \ - } \ - }; +// #include "paddle/operators/math/activation_functor.h" + +// #define ACTIVATION_KERNEL_NAME(ACTIVATION_NAME) ACTIVATION_NAME##Kernel + +// #define DEFINE_ACTIVATION_KERNEL(ACTIVATION_NAME) \ +// template \ +// class ACTIVATION_KERNEL_NAME(ACTIVATION_NAME) : public framework::OpKernel { \ +// public: \ +// void Compute(const framework::ExecutionContext& context) const override { \ +// auto* X = context.Input("X"); \ +// auto* Y = context.Output("Y"); \ +// Y->mutable_data(context.GetPlace()); \ +// math::ACTIVATION_NAME functor; \ +// auto* device_context = context.device_context(); \ +// functor(*device_context, *X, Y); \ +// } \ +// }; + +// #define DEFINE_ACTIVATION_GRAD_KERNEL(ACTIVATION_GRAD_NAME) \ +// template \ +// class ACTIVATION_KERNEL_NAME(ACTIVATION_GRAD_NAME) \ +// : public framework::OpKernel { \ +// public: \ +// void Compute(const framework::ExecutionContext& context) const override { \ +// auto* X = context.Input("X"); \ +// auto* Y = context.Input("Y"); \ +// auto* dY = \ +// context.Input(framework::GradVarName("Y")); \ +// auto* dX = \ +// context.Output(framework::GradVarName("X")); \ +// dX->mutable_data(context.GetPlace()); \ +// math::ACTIVATION_GRAD_NAME functor; \ +// auto* device_context = context.device_context(); \ +// functor(*device_context, *X, *Y, *dY, dX); \ +// } \ +// }; namespace paddle { namespace operators { -DEFINE_ACTIVATION_KERNEL(Sigmoid); +template +class ActivationKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* X = context.Input("X"); + auto* Y = context.Output("Y"); + Y->mutable_data(context.GetPlace()); + + auto x = framework::EigenVector::Flatten(*X); + auto y = framework::EigenVector::Flatten(*Y); + auto place = context.GetEigenDevice(); + Functor functor; + functor(place, x, y); + } +}; + +template +class ActivationGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* X = context.Input("X"); + auto* Y = context.Input("Y"); + auto* dY = context.Input(framework::GradVarName("Y")); + auto* dX = context.Output(framework::GradVarName("X")); + dX->mutable_data(context.GetPlace()); + + auto dy = framework::EigenVector::Flatten(*dY); + auto x = framework::EigenVector::Flatten(*X); + auto y = framework::EigenVector::Flatten(*Y); + auto dx = framework::EigenVector::Flatten(*dX); + auto place = context.GetEigenDevice(); + Functor functor; + functor(place, x, y, dy, dx); + } +}; + +struct Sigmoid { + template + void operator()(Device d, X x, Y y) { + y.device(d) = 1. / (1. + (-x).exp()); + } +}; + +struct SigmoidGrad { + template + void operator()(Device d, X x, Y y, dY dy, dX dx) { + dx.device(d) = dy * y * (1. - y); + } +}; + +struct Exp { + template + void operator()(Device d, X x, Y y) { + y.device(d) = x.exp(); + } +}; + +struct ExpGrad { + template + void operator()(Device d, X x, Y y, dY dy, dX dx) { + dx.device(d) = y; + } +}; + +// template +// struct Relu { +// void operator()(Device d, X x, Y y) { +// y.device(d) = x.cwiseMax(static_cast(0)); +// } +// }; + +// template +// struct ReluGrad { +// void operator()(Device d, X x, Y y, dY dy, dX dx) { +// dx.device(d) = dy * (x > static_cast(0)).template cast(); +// } +// }; + +// DEFINE_ACTIVATION_KERNEL(Sigmoid); -DEFINE_ACTIVATION_GRAD_KERNEL(SigmoidGrad); +// DEFINE_ACTIVATION_GRAD_KERNEL(SigmoidGrad); -DEFINE_ACTIVATION_KERNEL(Exp); +// DEFINE_ACTIVATION_KERNEL(Exp); -DEFINE_ACTIVATION_GRAD_KERNEL(ExpGrad); +// DEFINE_ACTIVATION_GRAD_KERNEL(ExpGrad); -DEFINE_ACTIVATION_KERNEL(Relu); +// DEFINE_ACTIVATION_KERNEL(Relu); -DEFINE_ACTIVATION_GRAD_KERNEL(ReluGrad); +// DEFINE_ACTIVATION_GRAD_KERNEL(ReluGrad); } // namespace operators } // namespace paddle diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index bd964c5d07..bed35d7822 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -56,7 +56,7 @@ USE_OP(sum); USE_OP(reshape); USE_OP(sigmoid); USE_OP(exp); -USE_OP(relu); +// USE_OP(relu); namespace paddle { namespace framework { -- GitLab