softmax_op.cc 9.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

Q
Qiao Longfei 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

Q
Qiao Longfei 已提交
7 8 9 10 11 12 13
    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/softmax_op.h"
16

D
dengkaipeng 已提交
17
#include <memory>
18
#include <string>
D
dengkaipeng 已提交
19
#include <unordered_map>
20

K
Kexin Zhao 已提交
21 22 23
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
24

25 26 27
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
28

29 30 31
namespace paddle {
namespace operators {

D
dongzhihong 已提交
32
class SoftmaxOp : public framework::OperatorWithKernel {
Y
Yu Yang 已提交
33 34 35
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

36
  void InferShape(framework::InferShapeContext* ctx) const override {
Q
Qiao Longfei 已提交
37 38
    PADDLE_ENFORCE(ctx->HasInput("X"),
                   "Input(X) of SoftmaxOp should not be null.");
F
fengjiayi 已提交
39 40
    PADDLE_ENFORCE(ctx->HasOutput("Out"),
                   "Output(Out) of SoftmaxOp should not be null.");
Q
Qiao Longfei 已提交
41

42 43 44
    auto dim_x = ctx->GetInputDim("X");
    auto rank_x = dim_x.size();
    auto axis = ctx->Attrs().Get<int>("axis");
45 46 47 48 49 50 51 52 53 54 55 56
    PADDLE_ENFORCE(axis >= -rank_x && axis < rank_x,
                   "Attr(axis) value should be in range [-R, R-1], "
                   "R is the rank of Input(X).");

    auto use_cudnn = ctx->Attrs().Get<bool>("use_cudnn");
    auto use_mkldnn = ctx->Attrs().Get<bool>("use_mkldnn");
    if (axis != rank_x - 1 && axis != -1) {
      PADDLE_ENFORCE(!use_cudnn, 
          "CUDNN kernel only support axis as -1.");
      PADDLE_ENFORCE(!use_mkldnn, 
          "MKLDNN kernel only support axis as -1.");
    }
57

F
fengjiayi 已提交
58
    ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
Q
Qiao Longfei 已提交
59
    ctx->ShareLoD("X", /*->*/ "Out");
60
  }
61 62 63 64 65

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    // choose cudnn kernel if the runtime supported.
K
Kexin Zhao 已提交
66
    framework::LibraryType library_{framework::LibraryType::kPlain};
M
mozga-intel 已提交
67 68 69
    std::string data_format = ctx.Attr<std::string>("data_format");
    framework::DataLayout layout_ = framework::StringToDataLayout(data_format);

70
#ifdef PADDLE_WITH_CUDA
K
Kexin Zhao 已提交
71
    if (platform::CanCUDNNBeUsed(ctx)) {
K
Kexin Zhao 已提交
72
      library_ = framework::LibraryType::kCUDNN;
73 74
    }
#endif
75 76 77 78
#ifdef PADDLE_WITH_MKLDNN
    if (library_ == framework::LibraryType::kPlain &&
        platform::CanMKLDNNBeUsed(ctx)) {
      library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
79
      layout_ = framework::DataLayout::kMKLDNN;
80 81
    }
#endif
K
Kexin Zhao 已提交
82

Y
Yu Yang 已提交
83
    auto input_data_type = ctx.Input<Tensor>("X")->type();
K
Kexin Zhao 已提交
84
    if (input_data_type == framework::proto::VarType::FP16) {
85 86
      PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
                     "float16 can only be used on GPU place");
K
Kexin Zhao 已提交
87 88
    }

M
mozga-intel 已提交
89
    return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_,
K
Kexin Zhao 已提交
90
                                   library_);
91
  }
92
};
93

D
dongzhihong 已提交
94
class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
95
 public:
Y
Yu Yang 已提交
96
  void Make() override {
97
    AddInput("X",
F
fengjiayi 已提交
98
             "The input tensor of softmax, "
D
dengkaipeng 已提交
99
             "whose dimension :attr:`axis` is the input_feature_dimensions.");
100
    AddOutput("Out", "The normalized values with the same shape as X.");
101
    AddAttr<int>("axis",
D
dengkaipeng 已提交
102
                 "The dimension index of Input(x) to perform softmax,"
103 104
                 "default -1 for last dimension")
        .SetDefault(-1);
105 106 107 108 109 110 111 112 113 114 115
    AddAttr<bool>(
        "use_cudnn",
        "(bool, default false) Only used in cudnn kernel, need install cudnn")
        .SetDefault(false);
    AddAttr<std::string>(
        "data_format",
        "(string, default NCHW) Only used in "
        "An optional string from: \"NHWC\", \"NCHW\". "
        "Defaults to \"NHWC\". Specify the data format of the output data, "
        "the input will be transformed automatically. ")
        .SetDefault("AnyLayout");
116 117 118
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
J
Jacek Czaja 已提交
119
    AddAttr<bool>("is_test",
120 121
                  "(bool, default false) Set to true for inference only, false "
                  "for training. Some layers may run faster when this is true.")
J
Jacek Czaja 已提交
122
        .SetDefault(false);
C
caoying03 已提交
123
    AddComment(R"DOC(
124 125
Softmax Operator.

126
The input of the softmax operator is a tensor of any rank. The output tensor
F
fengjiayi 已提交
127
has the same shape as the input.
C
caoying03 已提交
128

D
dengkaipeng 已提交
129
The dimension :attr:`axis` of the input tensor will be permuted to the last.
D
dengkaipeng 已提交
130
Then the input tensor will be logically flattened to a 2-D matrix. The matrix's
D
dengkaipeng 已提交
131
second dimension(row length) is as same as the dimension :attr:`axis` of the input
132 133 134
tensor, and the first dimension(column length) is the product of all other
dimensions of the input tensor. For each row of the matrix, the softmax operator
squashes the K-dimensional(K is the width of the matrix, which is also the size
D
dengkaipeng 已提交
135
of the input tensor's dimension :attr:`axis`) vector of arbitrary real values to a
F
fengjiayi 已提交
136
K-dimensional vector of real values in the range [0, 1] that add up to 1.
137 138 139 140 141
It computes the exponential of the given dimension and the sum of exponential
values of all the other dimensions in the K-dimensional vector input.
Then the ratio of the exponential of the given dimension and the sum of
exponential values of all the other dimensions is the output of the softmax
operator.
C
caoying03 已提交
142

F
fengjiayi 已提交
143
For each row $i$ and each column $j$ in the matrix, we have:
F
fengjiayi 已提交
144
    $$Out[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$
C
caoying03 已提交
145 146

)DOC");
147 148 149
  }
};

C
chengduo 已提交
150 151 152 153 154 155 156 157
class SoftmaxOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
  std::unordered_map<std::string, std::string> GetInputOutputWithSameType()
      const override {
    return std::unordered_map<std::string, std::string>{{"X", /*->*/ "Out"}};
  }
};

D
dongzhihong 已提交
158
class SoftmaxOpGrad : public framework::OperatorWithKernel {
Y
Yu Yang 已提交
159 160 161
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

162
  void InferShape(framework::InferShapeContext* ctx) const override {
F
fengjiayi 已提交
163 164 165 166 167 168
    PADDLE_ENFORCE(ctx->HasInput("Out"), "Input(Out) should be not null.");
    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                   "Input(Out@GRAD) should be not null.");
    PADDLE_ENFORCE_EQ(ctx->GetInputDim("Out"),
                      ctx->GetInputDim(framework::GradVarName("Out")),
                      "Input(Out) and its gradients should have a same shape.");
169

170 171
    ctx->SetOutputDim(framework::GradVarName("X"),
                      ctx->GetInputDim(framework::GradVarName("Out")));
D
dongzhihong 已提交
172
  }
173 174 175 176 177

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    // choose cudnn kernel if the runtime supported.
K
Kexin Zhao 已提交
178
    framework::LibraryType library_{framework::LibraryType::kPlain};
J
Jacek Czaja 已提交
179 180
    std::string data_format = ctx.Attr<std::string>("data_format");
    framework::DataLayout layout_ = framework::StringToDataLayout(data_format);
M
mozga-intel 已提交
181

182
#ifdef PADDLE_WITH_CUDA
K
Kexin Zhao 已提交
183
    if (platform::CanCUDNNBeUsed(ctx)) {
K
Kexin Zhao 已提交
184
      library_ = framework::LibraryType::kCUDNN;
185 186
    }
#endif
J
Jacek Czaja 已提交
187 188 189 190 191 192 193
#ifdef PADDLE_WITH_MKLDNN
    if (library_ == framework::LibraryType::kPlain &&
        platform::CanMKLDNNBeUsed(ctx)) {
      library_ = framework::LibraryType::kMKLDNN;
      layout_ = framework::DataLayout::kMKLDNN;
    }
#endif
Y
Yu Yang 已提交
194 195
    auto input_data_type =
        ctx.Input<Tensor>(framework::GradVarName("Out"))->type();
J
Jacek Czaja 已提交
196 197 198 199 200 201 202
    if (input_data_type == framework::proto::VarType::FP16) {
      PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
                     "float16 can only be used on GPU place");
    }

    return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_,
                                   library_);
203
  }
D
dongzhihong 已提交
204 205
};

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
class SoftmaxOpGradMaker : public framework::SingleGradOpDescMaker {
 public:
  using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;

 protected:
  std::unique_ptr<framework::OpDesc> Apply() const override {
    auto* op = new framework::OpDesc();
    op->SetType("softmax_grad");

    op->SetInput("Out", Output("Out"));
    op->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));

    op->SetAttrMap(Attrs());

    op->SetOutput(framework::GradVarName("X"), InputGrad("X"));
    return std::unique_ptr<framework::OpDesc>(op);
  }
};
D
dzhwinter 已提交
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238

class SoftmaxInplaceInToOut : public framework::InplaceInToOut {
 public:
  using framework::InplaceInToOut::InplaceInToOut;

 protected:
  std::unordered_map<std::string, std::string> Apply(
      const framework::OpDesc& op_desc,
      framework::BlockDesc* block) const override {
    return std::unordered_map<std::string, std::string>{
        {"X", "Out"},
    };
  }
};

239 240 241
}  // namespace operators
}  // namespace paddle

D
dongzhihong 已提交
242
namespace ops = paddle::operators;
D
dongzhihong 已提交
243

Y
Yang Yang 已提交
244
REGISTER_OPERATOR(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker,
C
chengduo 已提交
245
                  ops::SoftmaxOpInferVarType, ops::SoftmaxOpGradMaker);
246
REGISTER_OPERATOR(softmax_grad, ops::SoftmaxOpGrad);
D
dongzhihong 已提交
247
REGISTER_OP_CPU_KERNEL(
D
dzhwinter 已提交
248 249
    softmax, ops::SoftmaxKernel<paddle::platform::CPUDeviceContext, float>,
    ops::SoftmaxKernel<paddle::platform::CPUDeviceContext, double>);
Q
QI JUN 已提交
250 251
REGISTER_OP_CPU_KERNEL(
    softmax_grad,
D
dzhwinter 已提交
252 253
    ops::SoftmaxGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::SoftmaxGradKernel<paddle::platform::CPUDeviceContext, double>);