softmax_op.cc 3.0 KB
Newer Older
1 2
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Q
Qiao Longfei 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

Q
Qiao Longfei 已提交
7 8 9 10 11 12 13
    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

15
#include "paddle/operators/softmax_op.h"
16 17 18 19

namespace paddle {
namespace operators {

D
dongzhihong 已提交
20
class SoftmaxOp : public framework::OperatorWithKernel {
Y
Update  
Yi Wang 已提交
21
  DEFINE_OPERATOR_CTOR(SoftmaxOp, framework::OperatorWithKernel)
22
 protected:
D
dongzhihong 已提交
23
  void InferShape(const framework::InferShapeContext &ctx) const override {
Y
Yan Chunwei 已提交
24 25 26 27 28 29
    PADDLE_ENFORCE_EQ(ctx.InputSize(), 1UL,
                      "Only one input is need for softmax");
    PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims().size(), 2UL,
                      "The input of softmax op must be matrix");
    PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL,
                      "Only one output is need for softmax");
Q
Qiao Longfei 已提交
30
    ctx.Output<Tensor>("Y")->Resize(ctx.Input<Tensor>("X")->dims());
31 32 33
  }
};

D
dongzhihong 已提交
34
class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
35
 public:
D
dongzhihong 已提交
36 37
  SoftmaxOpMaker(framework::OpProto *proto,
                 framework::OpAttrChecker *op_checker)
38 39 40 41 42 43 44
      : OpProtoAndCheckerMaker(proto, op_checker) {
    AddInput("X", "input of softmax");
    AddOutput("Y", "output of softmax");
    AddComment("Softmax Op");
  }
};

D
dongzhihong 已提交
45
class SoftmaxOpGrad : public framework::OperatorWithKernel {
Y
Update  
Yi Wang 已提交
46
  DEFINE_OPERATOR_CTOR(SoftmaxOpGrad, framework::OperatorWithKernel)
47
 protected:
D
dongzhihong 已提交
48
  void InferShape(const framework::InferShapeContext &ctx) const override {
Y
Yan Chunwei 已提交
49 50 51 52 53 54 55
    PADDLE_ENFORCE_EQ(ctx.InputSize(), 3UL,
                      "Input of SoftmaxOpGrad should be 3, X, Y, YG");
    PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL,
                      "Output of SoftmaxOpGrad should be 1");
    PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null");
    PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Y")),
                            "Input(Y@GRAD) should not be null");
Q
Qiao Longfei 已提交
56
    PADDLE_ENFORCE(ctx.Input<Tensor>("Y")->dims() ==
Y
Yi Wang 已提交
57
                       ctx.Input<Tensor>(framework::GradVarName("Y"))->dims(),
Q
Qiao Longfei 已提交
58
                   "the shape of Input(0) and Input(1) should be the same");
59
    ctx.Output<Tensor>(framework::GradVarName("X"))
Q
Qiao Longfei 已提交
60
        ->Resize(ctx.Input<Tensor>("Y")->dims());
D
dongzhihong 已提交
61 62 63
  }
};

64 65 66
}  // namespace operators
}  // namespace paddle

D
dongzhihong 已提交
67
namespace ops = paddle::operators;
D
dongzhihong 已提交
68

69
REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker);
D
dongzhihong 已提交
70 71
REGISTER_OP_CPU_KERNEL(softmax,
                       ops::SoftmaxKernel<paddle::platform::CPUPlace, float>);
Q
Qiao Longfei 已提交
72
REGISTER_GRADIENT_OP(softmax, softmax_grad, ops::SoftmaxOpGrad);
D
dongzhihong 已提交
73 74
REGISTER_OP_CPU_KERNEL(
    softmax_grad, ops::SoftmaxGradKernel<paddle::platform::CPUPlace, float>);