softmax_op.cc 2.8 KB
Newer Older
1 2
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Q
Qiao Longfei 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

Q
Qiao Longfei 已提交
7 8 9 10 11 12 13
    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

15
#include "paddle/operators/softmax_op.h"
16 17 18 19

namespace paddle {
namespace operators {

D
dongzhihong 已提交
20
class SoftmaxOp : public framework::OperatorWithKernel {
21
 protected:
D
dongzhihong 已提交
22
  void InferShape(const framework::InferShapeContext &ctx) const override {
Q
Qiao Longfei 已提交
23 24 25
    PADDLE_ENFORCE(ctx.InputSize() == 1UL,
                   "Only one input is need for softmax");
    PADDLE_ENFORCE(ctx.Input<Tensor>("X")->dims().size() == 2UL,
Q
qijun 已提交
26
                   "The input of softmax op must be matrix");
Q
Qiao Longfei 已提交
27
    PADDLE_ENFORCE(ctx.OutputSize() == 1UL,
28
                   "Only one output is need for softmax");
Q
Qiao Longfei 已提交
29
    ctx.Output<Tensor>("Y")->Resize(ctx.Input<Tensor>("X")->dims());
30 31 32
  }
};

D
dongzhihong 已提交
33
class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
34
 public:
D
dongzhihong 已提交
35 36
  SoftmaxOpMaker(framework::OpProto *proto,
                 framework::OpAttrChecker *op_checker)
37 38 39 40 41 42 43
      : OpProtoAndCheckerMaker(proto, op_checker) {
    AddInput("X", "input of softmax");
    AddOutput("Y", "output of softmax");
    AddComment("Softmax Op");
  }
};

44
class SoftmaxOpGrad : public OperatorWithKernel {
45
 protected:
Q
Qiao Longfei 已提交
46 47 48 49 50 51
  void InferShape(const InferShapeContext &ctx) const override {
    PADDLE_ENFORCE(ctx.InputSize() == 3UL,
                   "Input of SoftmaxOpGrad should be 3, X, Y, YG");
    PADDLE_ENFORCE(ctx.OutputSize() == 1UL,
                   "Output of SoftmaxOpGrad should be 1");
    PADDLE_ENFORCE(ctx.InputVar("Y") != nullptr, "Input(Y) should not be null");
52
    PADDLE_ENFORCE(ctx.InputVar(framework::GradVarName("Y")) != nullptr,
Q
Qiao Longfei 已提交
53 54
                   "Input(Y@GRAD) should not be null");
    PADDLE_ENFORCE(ctx.Input<Tensor>("Y")->dims() ==
Y
Yi Wang 已提交
55
                       ctx.Input<Tensor>(framework::GradVarName("Y"))->dims(),
Q
Qiao Longfei 已提交
56
                   "the shape of Input(0) and Input(1) should be the same");
57
    ctx.Output<Tensor>(framework::GradVarName("X"))
Q
Qiao Longfei 已提交
58
        ->Resize(ctx.Input<Tensor>("Y")->dims());
D
dongzhihong 已提交
59 60 61
  }
};

62 63 64
}  // namespace operators
}  // namespace paddle

D
dongzhihong 已提交
65
namespace ops = paddle::operators;
66
REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker);
D
dongzhihong 已提交
67 68
REGISTER_OP_CPU_KERNEL(softmax,
                       ops::SoftmaxKernel<paddle::platform::CPUPlace, float>);
Q
Qiao Longfei 已提交
69
REGISTER_GRADIENT_OP(softmax, softmax_grad, ops::SoftmaxOpGrad);
D
dongzhihong 已提交
70 71
REGISTER_OP_CPU_KERNEL(
    softmax_grad, ops::SoftmaxGradKernel<paddle::platform::CPUPlace, float>);