diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index b5cf236bac6bb5abe061f7b4ad469d20e0af76a9..162528bc1fbb06793edd31602cd46c8045fd4396 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -12,12 +12,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "type_alias.h" +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { -class FullyConnectedOp : public NetOp { +class FullyConnectedOp : public framework::NetOp { public: void Init() override { AddOp(OpRegistry::CreateOp("mul", @@ -39,9 +41,10 @@ class FullyConnectedOp : public NetOp { } }; -class FullyConnectedOpMaker : public OpProtoAndCheckerMaker { +class FullyConnectedOpMaker : public framework::OpProtoAndCheckerMaker { public: - FullyConnectedOpMaker(OpProto *proto, OpAttrChecker *op_checker) + FullyConnectedOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "the input of fc operator"); AddInput("W", "the weight of fc operator"); @@ -66,4 +69,5 @@ USE_OP(rowwise_add); USE_OP(sigmoid); USE_OP(softmax); +namespace ops = paddle::operators; REGISTER_OP(fc, ops::FullyConnectedOp, ops::FullyConnectedOpMaker); diff --git a/paddle/operators/rowwise_add_op.cc b/paddle/operators/rowwise_add_op.cc index 8d1a36f2b332faad516ced012a409ca428bbf689..55ed1c2f4c316656de94b24dd95b053a89d7e74e 100644 --- a/paddle/operators/rowwise_add_op.cc +++ b/paddle/operators/rowwise_add_op.cc @@ -13,12 +13,13 @@ limitations under the License. */ #include "paddle/operators/rowwise_add_op.h" + namespace paddle { namespace operators { -class RowWiseAddOp : public OperatorWithKernel { +class RowWiseAddOp : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { + void InferShape(const framework::InferShapeContext &ctx) const override { PADDLE_ENFORCE(ctx.InputSize() == 2UL, "Two inputs is needed by rowwise add"); auto dim0 = ctx.Input(0)->dims(); @@ -32,9 +33,10 @@ class RowWiseAddOp : public OperatorWithKernel { } }; -class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { +class RowWiseAddOpMaker : public framework::OpProtoAndCheckerMaker { public: - RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) + RowWiseAddOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The left input of row-wise add op, must be matrix"); AddInput("b", "The right input of row-wise add op, must be vector"); @@ -50,6 +52,7 @@ for i in xrange(X.shape[0]): } // namespace operators } // namespace paddle +namespace ops = paddle::operators; REGISTER_OP(rowwise_add, ops::RowWiseAddOp, ops::RowWiseAddOpMaker); -REGISTER_OP_CPU_KERNEL(rowwise_add, - ops::RowWiseAddKernel); +REGISTER_OP_CPU_KERNEL( + rowwise_add, ops::RowWiseAddKernel); diff --git a/paddle/operators/rowwise_add_op.cu b/paddle/operators/rowwise_add_op.cu index f76faa0a3a93a1ac277a1d1aa83c3fa6c3944648..86f80b81228a69ac4c05a4693901570f2b9966e0 100644 --- a/paddle/operators/rowwise_add_op.cu +++ b/paddle/operators/rowwise_add_op.cu @@ -15,5 +15,6 @@ #define EIGEN_USE_GPU #include "paddle/operators/rowwise_add_op.h" -REGISTER_OP_GPU_KERNEL(rowwise_add, - ops::RowWiseAddKernel); +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL( + rowwise_add, ops::RowWiseAddKernel); diff --git a/paddle/operators/rowwise_add_op.h b/paddle/operators/rowwise_add_op.h index b52524c47c7b80d8ddc6a94a4a6d03db8034088d..82e9d70e959441869b958c1241fa5f5beef4c50c 100644 --- a/paddle/operators/rowwise_add_op.h +++ b/paddle/operators/rowwise_add_op.h @@ -13,15 +13,24 @@ limitations under the License. */ #pragma once -#include "paddle/operators/type_alias.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; +template +using EigenMatrix = framework::EigenMatrix; + template -class RowWiseAddKernel : public OpKernel { +class RowWiseAddKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& context) const override { + void Compute(const framework::ExecutionContext& context) const override { auto out = context.Output(0); out->mutable_data(context.GetPlace()); diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc index 9d201eb93a2c0e34dd8e6869e97b43c4e278596e..8564cc9480423b80da41e4ea3c20c036fbe56d24 100644 --- a/paddle/operators/sigmoid_op.cc +++ b/paddle/operators/sigmoid_op.cc @@ -13,21 +13,23 @@ limitations under the License. */ #include "paddle/operators/sigmoid_op.h" + namespace paddle { namespace operators { -class SigmoidOp : public OperatorWithKernel { +class SigmoidOp : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { + void InferShape(const framework::InferShapeContext &ctx) const override { PADDLE_ENFORCE(ctx.InputSize() == 1, "Sigmoid Op only have one input"); PADDLE_ENFORCE(ctx.OutputSize() == 1, "Sigmoid Op only have one output"); ctx.Output(0)->Resize(ctx.Input(0)->dims()); } }; -class SigmoidOpMaker : public OpProtoAndCheckerMaker { +class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker { public: - SigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) + SigmoidOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "sigmoid input"); AddOutput("Y", "sigmoid output"); @@ -35,9 +37,9 @@ class SigmoidOpMaker : public OpProtoAndCheckerMaker { } }; -class SigmoidOpGrad : public OperatorWithKernel { +class SigmoidOpGrad : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override {} + void InferShape(const framework::InferShapeContext &ctx) const override {} std::string DebugString() const override { LOG(INFO) << "SigmoidGrad"; return ""; @@ -47,7 +49,9 @@ class SigmoidOpGrad : public OperatorWithKernel { } // namespace operators } // namespace paddle +namespace ops = paddle::operators; REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker); REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, ops::SigmoidOpGrad); -REGISTER_OP_CPU_KERNEL(sigmoid, ops::SigmoidKernel); +REGISTER_OP_CPU_KERNEL(sigmoid, + ops::SigmoidKernel); diff --git a/paddle/operators/sigmoid_op.cu b/paddle/operators/sigmoid_op.cu index 2123b17e4b5e90c22c2d6e9177f2a8956f8a4ac9..55cad18cebba4fe7ffec229cef99e07252783b1e 100644 --- a/paddle/operators/sigmoid_op.cu +++ b/paddle/operators/sigmoid_op.cu @@ -15,4 +15,6 @@ #define EIGEN_USE_GPU #include "paddle/operators/sigmoid_op.h" -REGISTER_OP_GPU_KERNEL(sigmoid, ops::SigmoidKernel); +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(sigmoid, + ops::SigmoidKernel); diff --git a/paddle/operators/sigmoid_op.h b/paddle/operators/sigmoid_op.h index eb473920a5f866825b52ecb946653ccead7000ea..a5c15740fa98c49fef78699cd77809e6ac33882e 100644 --- a/paddle/operators/sigmoid_op.h +++ b/paddle/operators/sigmoid_op.h @@ -13,16 +13,21 @@ limitations under the License. */ #pragma once - -#include "paddle/operators/type_alias.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; + template -class SigmoidKernel : public OpKernel { +class SigmoidKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& context) const override { + void Compute(const framework::ExecutionContext& context) const override { auto input = context.Input(0); auto output = context.Output(0); output->mutable_data(context.GetPlace()); diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index a070458f5e55cf47253ab0df5af7a1163b4f8092..e24e595732e7c2375feab4bfb75a53e193e69290 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -17,9 +17,9 @@ limitations under the License. */ namespace paddle { namespace operators { -class SoftmaxOp : public OperatorWithKernel { +class SoftmaxOp : public framework::OperatorWithKernel { protected: - void InferShape(const InferShapeContext &ctx) const override { + void InferShape(const framework::InferShapeContext &ctx) const override { PADDLE_ENFORCE(ctx.InputSize() == 1UL, "Only one input is need for softmax"); PADDLE_ENFORCE(ctx.Input("X")->dims().size() == 2UL, @@ -30,9 +30,10 @@ class SoftmaxOp : public OperatorWithKernel { } }; -class SoftmaxOpMaker : public OpProtoAndCheckerMaker { +class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftmaxOpMaker(OpProto *proto, OpAttrChecker *op_checker) + SoftmaxOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "input of softmax"); AddOutput("Y", "output of softmax"); @@ -61,8 +62,10 @@ class SoftmaxOpGrad : public OperatorWithKernel { } // namespace operators } // namespace paddle +namespace ops = paddle::operators; REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker); -REGISTER_OP_CPU_KERNEL(softmax, ops::SoftmaxKernel); +REGISTER_OP_CPU_KERNEL(softmax, + ops::SoftmaxKernel); REGISTER_GRADIENT_OP(softmax, softmax_grad, ops::SoftmaxOpGrad); -REGISTER_OP_CPU_KERNEL(softmax_grad, - ops::SoftmaxGradKernel); +REGISTER_OP_CPU_KERNEL( + softmax_grad, ops::SoftmaxGradKernel); diff --git a/paddle/operators/softmax_op.cu b/paddle/operators/softmax_op.cu index b79228580a7ea0f70b62eb2dc7a61cf85bc0b5fb..92d22142734dbdf31d4b63895bce5dc1fad95152 100644 --- a/paddle/operators/softmax_op.cu +++ b/paddle/operators/softmax_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,9 +13,10 @@ limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/framework/op_registry.h" #include "paddle/operators/softmax_op.h" -REGISTER_OP_GPU_KERNEL(softmax, ops::SoftmaxKernel); -REGISTER_OP_GPU_KERNEL(softmax_grad, - ops::SoftmaxGradKernel); +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(softmax, + ops::SoftmaxKernel); +REGISTER_OP_GPU_KERNEL( + softmax_grad, ops::SoftmaxGradKernel); diff --git a/paddle/operators/softmax_op.h b/paddle/operators/softmax_op.h index b2dbcf57edf1a64da8da0d9a4c14d708eec17f3f..0484555485aaf87d18e46b4716ad279124af67c5 100644 --- a/paddle/operators/softmax_op.h +++ b/paddle/operators/softmax_op.h @@ -13,19 +13,21 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once - -#include "paddle/framework/ddim.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/tensor.h" -#include "paddle/operators/type_alias.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { +using Tensor = framework::Tensor; +template +using EigenMatrix = framework::EigenMatrix; + template -class SoftmaxKernel : public OpKernel { +class SoftmaxKernel : public framework::OpKernel { public: - void Compute(const ExecutionContext& context) const override { + void Compute(const framework::ExecutionContext& context) const override { auto input = context.Input("X"); auto output = context.Output("Y"); output->mutable_data(context.GetPlace()); @@ -43,21 +45,19 @@ class SoftmaxKernel : public OpKernel { Eigen::DSizes batch_by_one(batch_size, 1); Eigen::DSizes one_by_class(1, num_classes); - auto shifted_logits = (logits - - logits.maximum(along_class) - .eval() - .reshape(batch_by_one) - .broadcast(one_by_class)); + auto shifted_logits = (logits - logits.maximum(along_class) + .eval() + .reshape(batch_by_one) + .broadcast(one_by_class)); softmax.device(context.GetEigenDevice()) = shifted_logits.exp(); softmax.device(context.GetEigenDevice()) = - (softmax * - softmax.sum(along_class) - .inverse() - .eval() - .reshape(batch_by_one) - .broadcast(one_by_class)); + (softmax * softmax.sum(along_class) + .inverse() + .eval() + .reshape(batch_by_one) + .broadcast(one_by_class)); } };