From 6ee8a2e1dbdc35a347677ee15a68963af7731b77 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 15 Jan 2018 17:03:33 +0800 Subject: [PATCH] remove unnecessary functor1 --- paddle/operators/elementwise_add_op.h | 18 ------------------ paddle/operators/elementwise_div_op.h | 1 - paddle/operators/elementwise_mul_op.h | 1 - paddle/operators/elementwise_op_function.h | 3 +-- paddle/operators/elementwise_sub_op.h | 18 ------------------ 5 files changed, 1 insertion(+), 40 deletions(-) diff --git a/paddle/operators/elementwise_add_op.h b/paddle/operators/elementwise_add_op.h index 59abbb57d1d..6478e1e0c2e 100644 --- a/paddle/operators/elementwise_add_op.h +++ b/paddle/operators/elementwise_add_op.h @@ -81,23 +81,6 @@ struct ElementwiseAddGradFunctor { } }; -template -struct ElementwiseAddOneGradFunctor { - template - void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) { - auto dz_e = framework::EigenVector::Flatten(*dz); - if (dx) { - auto dx_e = framework::EigenVector::Flatten(*dx); - dx_e.device(d) = dz_e; - } - if (dy) { - auto dy_e = framework::EigenVector::Flatten(*dy); - dy_e.device(d) = dz_e.sum(); - } - } -}; - template struct ElementwiseAddBroadCastGradFunctor { template { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseGradCompute, - ElementwiseAddOneGradFunctor, ElementwiseAddBroadCastGradFunctor, ElementwiseAddBroadCast2GradFunctor>(ctx); } diff --git a/paddle/operators/elementwise_div_op.h b/paddle/operators/elementwise_div_op.h index 875abd313ff..7783875e24e 100644 --- a/paddle/operators/elementwise_div_op.h +++ b/paddle/operators/elementwise_div_op.h @@ -107,7 +107,6 @@ class ElementwiseDivGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseGradCompute, - ElementwiseDivGradFunctor, ElementwiseDivBroadCastGradFunctor, ElementwiseDivBroadCast2GradFunctor>(ctx); } diff --git a/paddle/operators/elementwise_mul_op.h b/paddle/operators/elementwise_mul_op.h index 3ee50207c07..0e6559eacc0 100644 --- a/paddle/operators/elementwise_mul_op.h +++ b/paddle/operators/elementwise_mul_op.h @@ -106,7 +106,6 @@ class ElementwiseMulGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseGradCompute, - ElementwiseMulGradFunctor, ElementwiseMulBroadCastGradFunctor, ElementwiseMulBroadCast2GradFunctor>(ctx); } diff --git a/paddle/operators/elementwise_op_function.h b/paddle/operators/elementwise_op_function.h index 560247cb108..0c75276b031 100644 --- a/paddle/operators/elementwise_op_function.h +++ b/paddle/operators/elementwise_op_function.h @@ -311,8 +311,7 @@ EIGEN_FUNCTOR(Mul, EIGEN_MUL); EIGEN_FUNCTOR(Div, EIGEN_DIV); template + typename broadcastfunctor, typename broadcast2functor> void ElementwiseGradCompute(const framework::ExecutionContext& ctx) { using Tensor = framework::Tensor; diff --git a/paddle/operators/elementwise_sub_op.h b/paddle/operators/elementwise_sub_op.h index 66edf8672d1..347e92f87c7 100644 --- a/paddle/operators/elementwise_sub_op.h +++ b/paddle/operators/elementwise_sub_op.h @@ -43,23 +43,6 @@ struct ElementwiseSubGradFunctor { } }; -template -struct ElementwiseSubOneGradFunctor { - template - void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) { - auto dz_e = framework::EigenVector::Flatten(*dz); - if (dx) { - auto dx_e = framework::EigenVector::Flatten(*dx); - dx_e.device(d) = dz_e; - } - if (dy) { - auto dy_e = framework::EigenVector::Flatten(*dy); - dy_e.device(d) = (-1.0) * dz_e.sum(); - } - } -}; - template struct ElementwiseSubBroadCastGradFunctor { template { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseGradCompute, - ElementwiseSubOneGradFunctor, ElementwiseSubBroadCastGradFunctor, ElementwiseSubBroadCast2GradFunctor>(ctx); } -- GitLab