From efc119b43b1e2e296682c20d3a244234eb427405 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 25 Jul 2017 15:21:59 +0800 Subject: [PATCH] Add type_alias to import framework into ops Make implement an operator less noisy. --- paddle/operators/add_op.cc | 29 +++++++--------- paddle/operators/add_op.cu | 5 ++- paddle/operators/add_op.h | 19 +++++------ paddle/operators/cross_entropy_op.cc | 28 ++++++--------- paddle/operators/cross_entropy_op.cu | 4 +-- paddle/operators/cross_entropy_op.h | 14 ++++---- paddle/operators/fc_op.cc | 39 ++++++++++----------- paddle/operators/mul_op.cc | 29 +++++++--------- paddle/operators/mul_op.cu | 5 +-- paddle/operators/mul_op.h | 21 +++++------- paddle/operators/rowwise_add_op.cc | 24 +++++-------- paddle/operators/rowwise_add_op.cu | 6 ++-- paddle/operators/rowwise_add_op.h | 20 +++++------ paddle/operators/sgd_op.cc | 21 +++++------- paddle/operators/sgd_op.cu | 4 +-- paddle/operators/sgd_op.h | 20 +++++------ paddle/operators/sigmoid_op.cc | 32 +++++++---------- paddle/operators/sigmoid_op.cu | 4 +-- paddle/operators/sigmoid_op.h | 16 ++++----- paddle/operators/softmax_op.cc | 27 ++++++--------- paddle/operators/softmax_op.cu | 3 +- paddle/operators/softmax_op.h | 16 ++++----- paddle/operators/type_alias.h | 51 ++++++++++++++++++++++++++++ 23 files changed, 205 insertions(+), 232 deletions(-) create mode 100644 paddle/operators/type_alias.h diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc index 8d415fbd2e7..1424b028437 100644 --- a/paddle/operators/add_op.cc +++ b/paddle/operators/add_op.cc @@ -13,17 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/add_op.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/tensor.h" namespace paddle { namespace operators { -class AddOp : public framework::OperatorWithKernel { +class AddOp : public OperatorWithKernel { protected: - void InferShape( - const std::vector &inputs, - const std::vector &outputs) const override { + void InferShape(const std::vector &inputs, + const std::vector &outputs) const override { PADDLE_ENFORCE(inputs.size() == 2, "Input size of AddOp must be two"); PADDLE_ENFORCE(outputs.size() == 1, "Output size of AddOp must be one"); PADDLE_ENFORCE( @@ -35,10 +32,10 @@ protected: } }; -class AddOpMaker : public framework::OpProtoAndCheckerMaker { +class AddOpMaker : public OpProtoAndCheckerMaker { public: - AddOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The first input of add op"); AddInput("Y", "The second input of add op"); AddOutput("Out", "The output of add op"); @@ -50,11 +47,10 @@ The equation is: Out = X + Y } }; -class AddOpGrad : public framework::OperatorWithKernel { +class AddOpGrad : public OperatorWithKernel { protected: - void InferShape( - const std::vector &inputs, - const std::vector &outputs) const override {} + void InferShape(const std::vector &inputs, + const std::vector &outputs) const override {} std::string DebugString() const override { LOG(INFO) << "AddOpGrad"; return ""; @@ -64,7 +60,6 @@ protected: } // namespace operators } // namespace paddle -REGISTER_OP(add_two, paddle::operators::AddOp, paddle::operators::AddOpMaker); -REGISTER_GRADIENT_OP(add_two, add_two_grad, paddle::operators::AddOpGrad); -REGISTER_OP_CPU_KERNEL( - add_two, paddle::operators::AddKernel); +REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker); +REGISTER_GRADIENT_OP(add_two, add_two_grad, ops::AddOpGrad); +REGISTER_OP_CPU_KERNEL(add_two, ops::AddKernel); diff --git a/paddle/operators/add_op.cu b/paddle/operators/add_op.cu index 2e5a755f92e..79d8de6cd46 100644 --- a/paddle/operators/add_op.cu +++ b/paddle/operators/add_op.cu @@ -1,5 +1,4 @@ -#include "paddle/operators/add_op.h" #include "paddle/framework/op_registry.h" +#include "paddle/operators/add_op.h" -REGISTER_OP_GPU_KERNEL(add_two, - paddle::operators::AddKernel); \ No newline at end of file +REGISTER_OP_GPU_KERNEL(add_two, ops::AddKernel); diff --git a/paddle/operators/add_op.h b/paddle/operators/add_op.h index 39d54a63bd1..0c39433788e 100644 --- a/paddle/operators/add_op.h +++ b/paddle/operators/add_op.h @@ -13,27 +13,24 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "glog/logging.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/operator.h" +#include "paddle/operators/type_alias.h" namespace paddle { namespace operators { template -class AddKernel : public framework::OpKernel { +class AddKernel : public OpKernel { public: - void Compute(const framework::KernelContext& context) const override { - auto input0 = context.Input(0)->Get(); - auto input1 = context.Input(1)->Get(); - auto* output = context.Output(0)->GetMutable(); + void Compute(const KernelContext& context) const override { + auto input0 = context.Input(0)->Get(); + auto input1 = context.Input(1)->Get(); + auto output = context.Output(0)->GetMutable(); output->mutable_data(context.GetPlace()); - framework::EigenVector::Flatten(*output).device( + EigenVector::Flatten(*output).device( *(context.GetEigenDevice())) = - framework::EigenVector::Flatten(input0) + - framework::EigenVector::Flatten(input1); + EigenVector::Flatten(input0) + EigenVector::Flatten(input1); } }; diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index 7d7bb09f3d6..46c88d4d1a2 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -13,17 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/cross_entropy_op.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/tensor.h" namespace paddle { namespace operators { -class OnehotCrossEntropyOp : public framework::OperatorWithKernel { +class OnehotCrossEntropyOp : public OperatorWithKernel { protected: - void InferShape( - const std::vector &inputs, - const std::vector &outputs) const override { + void InferShape(const std::vector &inputs, + const std::vector &outputs) const override { PADDLE_ENFORCE(inputs.size() == 2, "Input size of OnehotCrossEntropyOp must be two"); PADDLE_ENFORCE(outputs.size() == 1, @@ -35,15 +32,14 @@ protected: PADDLE_ENFORCE(inputs[0]->dims().size() == 2, "X's dimension must be 2."); PADDLE_ENFORCE(outputs[0]->dims().size() == 1, "label's dimension must be 1."); - outputs[0]->Resize(framework::make_ddim({inputs[0]->dims()[0]})); + outputs[0]->Resize({inputs[0]->dims()[0]}); } }; -class OnehotCrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { +class OnehotCrossEntropyOpMaker : public OpProtoAndCheckerMaker { public: - OnehotCrossEntropyOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + OnehotCrossEntropyOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The first input of OnehotCrossEntropyOp"); AddInput("label", "The second input of OnehotCrossEntropyOp"); AddOutput("Y", "The output of OnehotCrossEntropyOp"); @@ -59,9 +55,7 @@ OnehotCrossEntropy Operator. } // namespace paddle REGISTER_OP(onehot_cross_entropy, - paddle::operators::OnehotCrossEntropyOp, - paddle::operators::OnehotCrossEntropyOpMaker); -REGISTER_OP_CPU_KERNEL( - onehot_cross_entropy, - paddle::operators::OnehotCrossEntropyOpKernel<::paddle::platform::CPUPlace, - float>); + ops::OnehotCrossEntropyOp, + ops::OnehotCrossEntropyOpMaker); +REGISTER_OP_CPU_KERNEL(onehot_cross_entropy, + ops::OnehotCrossEntropyOpKernel); diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/operators/cross_entropy_op.cu index 1bcdcb7ea65..19e4b74596a 100644 --- a/paddle/operators/cross_entropy_op.cu +++ b/paddle/operators/cross_entropy_op.cu @@ -1,6 +1,4 @@ #include "paddle/operators/cross_entropy_op.h" -#include "paddle/framework/op_registry.h" REGISTER_OP_GPU_KERNEL(onehot_cross_entropy, - paddle::operators::OnehotCrossEntropyOpKernel< - ::paddle::platform::GPUPlace, float>); \ No newline at end of file + ops::OnehotCrossEntropyOpKernel); \ No newline at end of file diff --git a/paddle/operators/cross_entropy_op.h b/paddle/operators/cross_entropy_op.h index ad2c7f34e1f..0383df46be3 100644 --- a/paddle/operators/cross_entropy_op.h +++ b/paddle/operators/cross_entropy_op.h @@ -13,23 +13,21 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "glog/logging.h" -#include "paddle/framework/operator.h" +#include "paddle/operators/type_alias.h" namespace paddle { namespace operators { template -class OnehotCrossEntropyOpKernel : public framework::OpKernel { +class OnehotCrossEntropyOpKernel : public OpKernel { public: constexpr T LOG_THRESHOLD() const { return static_cast(1e-20); } - void Compute(const framework::KernelContext& context) const override { - auto X = context.Input(0)->Get(); + void Compute(const KernelContext& context) const override { + auto X = context.Input(0)->Get(); const T* X_data = X.data(); - const int* label_data = - context.Input(1)->Get().data(); - auto* Y = context.Output(0)->GetMutable(); + const int* label_data = context.Input(1)->Get().data(); + auto* Y = context.Output(0)->GetMutable(); Y->mutable_data(context.GetPlace()); diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index 01e96f4c481..40ff2f41dda 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -12,41 +12,38 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/net.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" +#include "type_alias.h" namespace paddle { namespace operators { -class FullyConnectedOp : public framework::PlainNet { +class FullyConnectedOp : public PlainNet { public: void Init() override { - AddOp(framework::OpRegistry::CreateOp("mul", - { - Input("X"), Input("W"), - }, - {Output("before_act")}, - {})); + AddOp(OpRegistry::CreateOp("mul", + { + Input("X"), Input("W"), + }, + {Output("before_act")}, + {})); auto b = Input("b"); - if (b != framework::OperatorBase::EMPTY_VAR_NAME()) { - AddOp(framework::OpRegistry::CreateOp("rowwise_add", - {Output("before_act"), Input("b")}, - {Output("before_act")}, - {})); + if (b != EMPTY_VAR_NAME()) { + AddOp(OpRegistry::CreateOp("rowwise_add", + {Output("before_act"), Input("b")}, + {Output("before_act")}, + {})); } auto activation = GetAttr("activation"); - AddOp(framework::OpRegistry::CreateOp( + AddOp(OpRegistry::CreateOp( activation, {Output("before_act")}, {Output("Y")}, {})); CompleteAddOp(false); } }; -class FullyConnectedOpMaker : public framework::OpProtoAndCheckerMaker { +class FullyConnectedOpMaker : public OpProtoAndCheckerMaker { public: - FullyConnectedOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + FullyConnectedOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "the input of fc operator"); AddInput("W", "the weight of fc operator"); @@ -71,6 +68,4 @@ USE_OP(rowwise_add); USE_OP(sigmoid); USE_OP(softmax); -REGISTER_OP(fc, - paddle::operators::FullyConnectedOp, - paddle::operators::FullyConnectedOpMaker); +REGISTER_OP(fc, ops::FullyConnectedOp, ops::FullyConnectedOpMaker); diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index cd74c8b976d..22c1b780053 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -13,17 +13,14 @@ limitations under the License. */ #include "paddle/operators/mul_op.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/tensor.h" namespace paddle { namespace operators { -class MulOp : public framework::OperatorWithKernel { +class MulOp : public OperatorWithKernel { protected: - void InferShape( - const std::vector &inputs, - const std::vector &outputs) const override { + void InferShape(const std::vector &inputs, + const std::vector &outputs) const override { PADDLE_ENFORCE(inputs.size() == 2, "The mul op must take two inputs"); auto dim0 = inputs[0]->dims(); auto dim1 = inputs[1]->dims(); @@ -37,10 +34,10 @@ protected: } }; -class MulOpMaker : public framework::OpProtoAndCheckerMaker { +class MulOpMaker : public OpProtoAndCheckerMaker { public: - MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + MulOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The first input of mul op"); AddInput("Y", "The second input of mul op"); AddOutput("Out", "The output of mul op"); @@ -52,11 +49,10 @@ The equation is: Out = X * Y } }; -class MulOpGrad : public framework::OperatorWithKernel { +class MulOpGrad : public OperatorWithKernel { protected: - void InferShape( - const std::vector &inputs, - const std::vector &outputs) const override {} + void InferShape(const std::vector &inputs, + const std::vector &outputs) const override {} std::string DebugString() const override { LOG(INFO) << "MulGrad"; return ""; @@ -66,8 +62,7 @@ protected: } // namespace operators } // namespace paddle -REGISTER_OP(mul, paddle::operators::MulOp, paddle::operators::MulOpMaker); -REGISTER_GRADIENT_OP(mul, mul_grad, paddle::operators::MulOpGrad); +REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker); +REGISTER_GRADIENT_OP(mul, mul_grad, ops::MulOpGrad); -REGISTER_OP_CPU_KERNEL( - mul, paddle::operators::MulKernel); +REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); diff --git a/paddle/operators/mul_op.cu b/paddle/operators/mul_op.cu index 3ee581dc77d..c27fc886ce7 100644 --- a/paddle/operators/mul_op.cu +++ b/paddle/operators/mul_op.cu @@ -13,8 +13,5 @@ limitations under the License. */ #include "paddle/operators/mul_op.h" -#include "paddle/framework/op_registry.h" -REGISTER_OP_GPU_KERNEL(mul, - paddle::operators::MulKernel); \ No newline at end of file +REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel); \ No newline at end of file diff --git a/paddle/operators/mul_op.h b/paddle/operators/mul_op.h index e6bad7fb9da..46797504463 100644 --- a/paddle/operators/mul_op.h +++ b/paddle/operators/mul_op.h @@ -14,30 +14,27 @@ #pragma once -#include "glog/logging.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/operator.h" +#include "paddle/operators/type_alias.h" namespace paddle { namespace operators { template -class MulKernel : public framework::OpKernel { +class MulKernel : public OpKernel { public: - void Compute(const framework::KernelContext& context) const override { + void Compute(const KernelContext& context) const override { Eigen::array, 1> dim_pair = { {Eigen::IndexPair(1, 0)}}; - auto input0 = context.Input(0)->Get(); - auto input1 = context.Input(1)->Get(); - auto* output = context.Output(0)->GetMutable(); + auto input0 = context.Input(0)->Get(); + auto input1 = context.Input(1)->Get(); + auto* output = context.Output(0)->GetMutable(); output->mutable_data(context.GetPlace()); - framework::EigenMatrix::From(*output).device( - *(context.GetEigenDevice())) = - framework::EigenMatrix::From(input0).contract( - framework::EigenMatrix::From(input1), dim_pair); + EigenMatrix::From(*output).device(*(context.GetEigenDevice())) = + EigenMatrix::From(input0).contract(EigenMatrix::From(input1), + dim_pair); } }; } // namespace operators diff --git a/paddle/operators/rowwise_add_op.cc b/paddle/operators/rowwise_add_op.cc index e04d69fa72a..4129422fa74 100644 --- a/paddle/operators/rowwise_add_op.cc +++ b/paddle/operators/rowwise_add_op.cc @@ -13,15 +13,13 @@ limitations under the License. */ #include "paddle/operators/rowwise_add_op.h" -#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { -class RowWiseAddOp : public framework::OperatorWithKernel { +class RowWiseAddOp : public OperatorWithKernel { protected: - void InferShape( - const std::vector &inputs, - const std::vector &outputs) const override { + void InferShape(const std::vector &inputs, + const std::vector &outputs) const override { PADDLE_ENFORCE(inputs.size() == 2UL, "Two inputs is needed by rowwise add"); auto dim0 = inputs[0]->dims(); auto dim1 = inputs[1]->dims(); @@ -34,11 +32,10 @@ protected: } }; -class RowWiseAddOpMaker : public framework::OpProtoAndCheckerMaker { +class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { public: - RowWiseAddOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The left input of row-wise add op, must be matrix"); AddInput("b", "The right input of row-wise add op, must be vector"); AddOutput("Out", "The output of row-wise add op"); @@ -53,9 +50,6 @@ for i in xrange(X.shape[0]): } // namespace operators } // namespace paddle -REGISTER_OP(rowwise_add, - paddle::operators::RowWiseAddOp, - paddle::operators::RowWiseAddOpMaker); -REGISTER_OP_CPU_KERNEL( - rowwise_add, - paddle::operators::RowWiseAddKernel); +REGISTER_OP(rowwise_add, ops::RowWiseAddOp, ops::RowWiseAddOpMaker); +REGISTER_OP_CPU_KERNEL(rowwise_add, + ops::RowWiseAddKernel); diff --git a/paddle/operators/rowwise_add_op.cu b/paddle/operators/rowwise_add_op.cu index 5dfac4fd2cf..4b33e38ebab 100644 --- a/paddle/operators/rowwise_add_op.cu +++ b/paddle/operators/rowwise_add_op.cu @@ -1,6 +1,4 @@ -#include "paddle/framework/op_registry.h" #include "paddle/operators/rowwise_add_op.h" -REGISTER_OP_GPU_KERNEL( - rowwise_add, - paddle::operators::RowWiseAddKernel); +REGISTER_OP_GPU_KERNEL(rowwise_add, + ops::RowWiseAddKernel); diff --git a/paddle/operators/rowwise_add_op.h b/paddle/operators/rowwise_add_op.h index dc47fe7c847..4596925e932 100644 --- a/paddle/operators/rowwise_add_op.h +++ b/paddle/operators/rowwise_add_op.h @@ -13,25 +13,23 @@ limitations under the License. */ #pragma once -#include "glog/logging.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/operator.h" +#include "paddle/operators/type_alias.h" namespace paddle { namespace operators { template -class RowWiseAddKernel : public framework::OpKernel { +class RowWiseAddKernel : public OpKernel { public: - void Compute(const framework::KernelContext& context) const override { - auto in0 = context.Input(0)->Get(); - auto in1 = context.Input(1)->Get(); - auto* out = context.Output(0)->GetMutable(); + void Compute(const KernelContext& context) const override { + auto in0 = context.Input(0)->Get(); + auto in1 = context.Input(1)->Get(); + auto* out = context.Output(0)->GetMutable(); out->mutable_data(context.GetPlace()); - auto input = framework::EigenMatrix::From(in0); - auto bias = framework::EigenVector::From(in1); - auto output = framework::EigenMatrix::From(*out); + auto input = EigenMatrix::From(in0); + auto bias = EigenVector::From(in1); + auto output = EigenMatrix::From(*out); const int bias_size = bias.dimension(0); const int rest_size = input.size() / bias_size; diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 66ab1e00114..f6c654a9e70 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -13,17 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/sgd_op.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/tensor.h" namespace paddle { namespace operators { -class SGDOp : public framework::OperatorWithKernel { +class SGDOp : public OperatorWithKernel { protected: - void InferShape( - const std::vector &inputs, - const std::vector &outputs) const override { + void InferShape(const std::vector &inputs, + const std::vector &outputs) const override { PADDLE_ENFORCE(inputs.size() == 2, "Input size of SGDOp must be two"); PADDLE_ENFORCE(outputs.size() == 1, "Output size of SGDOp must be one"); PADDLE_ENFORCE(inputs[0] != nullptr, "inputs[0] mast be set"); @@ -35,10 +32,10 @@ protected: } }; -class SGDOpMaker : public framework::OpProtoAndCheckerMaker { +class SGDOpMaker : public OpProtoAndCheckerMaker { public: - SGDOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + SGDOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("param", "input parameter"); AddInput("grad", "input gradient"); AddOutput("param_out", "output parameter"); @@ -55,7 +52,5 @@ param_out = param - learning_rate * grad; } // namespace operators } // namespace paddle -REGISTER_OP(sgd, paddle::operators::SGDOp, paddle::operators::SGDOpMaker); -typedef paddle::operators::SGDOpKernel<::paddle::platform::CPUPlace, float> - SGDOpKernel_CPU_float; -REGISTER_OP_CPU_KERNEL(sgd, SGDOpKernel_CPU_float); +REGISTER_OP(sgd, ops::SGDOp, ops::SGDOpMaker); +REGISTER_OP_CPU_KERNEL(sgd, ops::SGDOpKernel); diff --git a/paddle/operators/sgd_op.cu b/paddle/operators/sgd_op.cu index 400425db108..f8f5b90cab4 100644 --- a/paddle/operators/sgd_op.cu +++ b/paddle/operators/sgd_op.cu @@ -1,5 +1,3 @@ #include "paddle/operators/sgd_op.h" -#include "paddle/framework/op_registry.h" -typedef paddle::operators::SGDOpKernel<::paddle::platform::GPUPlace, float> SGDOpKernel_GPU_float; -REGISTER_OP_GPU_KERNEL(sgd, SGDOpKernel_GPU_float); \ No newline at end of file +REGISTER_OP_GPU_KERNEL(sgd, ops::SGDOpKernel); \ No newline at end of file diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index 4b2d214618e..65179d323bd 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -13,28 +13,24 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "glog/logging.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/operator.h" +#include "paddle/operators/type_alias.h" namespace paddle { namespace operators { template -class SGDOpKernel : public framework::OpKernel { +class SGDOpKernel : public OpKernel { public: - void Compute(const framework::KernelContext& ctx) const override { - auto param = ctx.Input("param")->Get(); - auto grad = ctx.Input("grad")->Get(); - auto* param_out = ctx.Output(0)->GetMutable(); + void Compute(const KernelContext& ctx) const override { + auto param = ctx.Input("param")->Get(); + auto grad = ctx.Input("grad")->Get(); + auto* param_out = ctx.Output(0)->GetMutable(); float lr = ctx.op_.GetAttr("learning_rate"); param_out->mutable_data(ctx.GetPlace()); - framework::EigenVector::Flatten(*param_out) - .device(*(ctx.GetEigenDevice())) = - framework::EigenVector::Flatten(param) - - lr * framework::EigenVector::Flatten(grad); + EigenVector::Flatten(*param_out).device(*(ctx.GetEigenDevice())) = + EigenVector::Flatten(param) - lr * EigenVector::Flatten(grad); } }; diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc index bf63af28b00..716f1d9c4db 100644 --- a/paddle/operators/sigmoid_op.cc +++ b/paddle/operators/sigmoid_op.cc @@ -13,37 +13,33 @@ limitations under the License. */ #include "paddle/operators/sigmoid_op.h" -#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { -class SigmoidOp : public framework::OperatorWithKernel { +class SigmoidOp : public OperatorWithKernel { protected: - void InferShape( - const std::vector &inputs, - const std::vector &outputs) const override { + void InferShape(const std::vector &inputs, + const std::vector &outputs) const override { PADDLE_ENFORCE(inputs.size() == 1, "Sigmoid Op only have one input"); PADDLE_ENFORCE(outputs.size() == 1, "Sigmoid Op only have one output"); outputs[0]->Resize(inputs[0]->dims()); } }; -class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker { +class SigmoidOpMaker : public OpProtoAndCheckerMaker { public: - SigmoidOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + SigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "sigmoid input"); AddOutput("Y", "sigmoid output"); AddComment("Sigmoid function"); } }; -class SigmoidOpGrad : public framework::OperatorWithKernel { +class SigmoidOpGrad : public OperatorWithKernel { protected: - void InferShape( - const std::vector &inputs, - const std::vector &outputs) const override {} + void InferShape(const std::vector &inputs, + const std::vector &outputs) const override {} std::string DebugString() const override { LOG(INFO) << "SigmoidGrad"; return ""; @@ -53,11 +49,7 @@ protected: } // namespace operators } // namespace paddle -REGISTER_OP(sigmoid, - paddle::operators::SigmoidOp, - paddle::operators::SigmoidOpMaker); -REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, paddle::operators::SigmoidOpGrad); +REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker); +REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, ops::SigmoidOpGrad); -REGISTER_OP_CPU_KERNEL( - sigmoid, - paddle::operators::SigmoidKernel); +REGISTER_OP_CPU_KERNEL(sigmoid, ops::SigmoidKernel); diff --git a/paddle/operators/sigmoid_op.cu b/paddle/operators/sigmoid_op.cu index ed344b2bfd4..f679b20418f 100644 --- a/paddle/operators/sigmoid_op.cu +++ b/paddle/operators/sigmoid_op.cu @@ -1,5 +1,3 @@ #include "paddle/operators/sigmoid_op.h" -#include "paddle/framework/op_registry.h" -REGISTER_OP_GPU_KERNEL( - sigmoid, paddle::operators::SigmoidKernel); +REGISTER_OP_GPU_KERNEL(sigmoid, ops::SigmoidKernel); diff --git a/paddle/operators/sigmoid_op.h b/paddle/operators/sigmoid_op.h index 2b9356246c4..896a6f5d83e 100644 --- a/paddle/operators/sigmoid_op.h +++ b/paddle/operators/sigmoid_op.h @@ -14,25 +14,23 @@ #pragma once -#include "glog/logging.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/operator.h" +#include "paddle/operators/type_alias.h" namespace paddle { namespace operators { template -class SigmoidKernel : public framework::OpKernel { +class SigmoidKernel : public OpKernel { public: - void Compute(const framework::KernelContext& context) const override { - auto input = context.Input(0)->Get(); - auto* output = context.Output(0)->GetMutable(); + void Compute(const KernelContext& context) const override { + auto input = context.Input(0)->Get(); + auto* output = context.Output(0)->GetMutable(); output->mutable_data(context.GetPlace()); - framework::EigenVector::Flatten(*output).device( + EigenVector::Flatten(*output).device( *(context.GetEigenDevice())) = - 1.0 / (1.0 + (-1.0 * framework::EigenVector::Flatten(input)).exp()); + 1.0 / (1.0 + (-1.0 * EigenVector::Flatten(input)).exp()); } }; } // namespace operators diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index 82f72fa19f6..df60b62fa6a 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -12,16 +12,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/softmax_op.h" -#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { -class SoftmaxOp : public framework::OperatorWithKernel { +class SoftmaxOp : public OperatorWithKernel { protected: - void InferShape( - const std::vector &inputs, - const std::vector &outputs) const override { + void InferShape(const std::vector &inputs, + const std::vector &outputs) const override { PADDLE_ENFORCE(inputs.size() == 1, "Only one input is need for softmax"); PADDLE_ENFORCE(inputs[0]->dims().size() == 2, "The input of softmax op must be matrix"); @@ -31,10 +29,9 @@ protected: } }; -class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { +class SoftmaxOpMaker : public OpProtoAndCheckerMaker { public: - SoftmaxOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + SoftmaxOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "input of softmax"); AddOutput("Y", "output of softmax"); @@ -42,11 +39,10 @@ public: } }; -class SoftmaxOpGrad : public framework::OperatorWithKernel { +class SoftmaxOpGrad : public OperatorWithKernel { protected: - void InferShape( - const std::vector &inputs, - const std::vector &outputs) const override {} + void InferShape(const std::vector &inputs, + const std::vector &outputs) const override {} std::string DebugString() const override { LOG(INFO) << "SoftmaxOpGrad"; return ""; @@ -56,9 +52,6 @@ protected: } // namespace operators } // namespace paddle -namespace ops = paddle::operators; - REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker); -REGISTER_GRADIENT_OP(softmax, softmax_grad, paddle::operators::SoftmaxOpGrad); -REGISTER_OP_CPU_KERNEL(softmax, - ops::SoftmaxKernel); +REGISTER_GRADIENT_OP(softmax, softmax_grad, ops::SoftmaxOpGrad); +REGISTER_OP_CPU_KERNEL(softmax, ops::SoftmaxKernel); diff --git a/paddle/operators/softmax_op.cu b/paddle/operators/softmax_op.cu index 60676191eb9..a1f6944a369 100644 --- a/paddle/operators/softmax_op.cu +++ b/paddle/operators/softmax_op.cu @@ -1,5 +1,4 @@ #include "paddle/framework/op_registry.h" #include "paddle/operators/softmax_op.h" -REGISTER_OP_GPU_KERNEL( - softmax, paddle::operators::SoftmaxKernel); +REGISTER_OP_GPU_KERNEL(softmax, ops::SoftmaxKernel); diff --git a/paddle/operators/softmax_op.h b/paddle/operators/softmax_op.h index 500c188dbfc..625a87b5856 100644 --- a/paddle/operators/softmax_op.h +++ b/paddle/operators/softmax_op.h @@ -14,23 +14,21 @@ #pragma once -#include "glog/logging.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/operator.h" +#include "paddle/operators/type_alias.h" namespace paddle { namespace operators { template -class SoftmaxKernel : public framework::OpKernel { +class SoftmaxKernel : public OpKernel { public: - void Compute(const framework::KernelContext& context) const override { - auto input = context.Input(0)->Get(); - auto* output = context.Output(0)->GetMutable(); + void Compute(const KernelContext& context) const override { + auto input = context.Input(0)->Get(); + auto* output = context.Output(0)->GetMutable(); output->mutable_data(context.GetPlace()); - auto logits = framework::EigenMatrix::From(input); - auto softmax = framework::EigenMatrix::From(*output); + auto logits = EigenMatrix::From(input); + auto softmax = EigenMatrix::From(*output); const int kBatchDim = 0; const int kClassDim = 1; diff --git a/paddle/operators/type_alias.h b/paddle/operators/type_alias.h new file mode 100644 index 00000000000..44ffefb2993 --- /dev/null +++ b/paddle/operators/type_alias.h @@ -0,0 +1,51 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/net.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using OpKernel = framework::OpKernel; +using KernelContext = framework::KernelContext; +template +using EigenVector = framework::EigenVector; +template +using EigenMatrix = framework::EigenMatrix; +template +using EigenTensor = framework::EigenTensor; +using Tensor = framework::Tensor; +using OperatorWithKernel = framework::OperatorWithKernel; +using OpProtoAndCheckerMaker = framework::OpProtoAndCheckerMaker; +using OpProto = framework::OpProto; +using OpAttrChecker = framework::OpAttrChecker; +using CPUPlace = platform::CPUPlace; +using GPUPlace = platform::GPUPlace; +using PlainNet = framework::PlainNet; +using OpRegistry = framework::OpRegistry; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; -- GitLab