softmax_op.cc 9.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

Q
Qiao Longfei 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

Q
Qiao Longfei 已提交
7 8 9 10 11 12 13
    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/softmax_op.h"
16

L
liuwei1031 已提交
17
#include <memory>
18
#include <string>
L
liuwei1031 已提交
19
#include <unordered_map>
20

K
Kexin Zhao 已提交
21 22 23
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
24

25 26 27
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
28

29 30 31
namespace paddle {
namespace operators {

D
dongzhihong 已提交
32
class SoftmaxOp : public framework::OperatorWithKernel {
Y
Yu Yang 已提交
33 34 35
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

36
  void InferShape(framework::InferShapeContext* ctx) const override {
37 38 39 40 41 42
    PADDLE_ENFORCE_EQ(
        ctx->HasInput("X"), true,
        platform::errors::NotFound("Input(X) of SoftmaxOp is not found."));
    PADDLE_ENFORCE_EQ(
        ctx->HasOutput("Out"), true,
        platform::errors::NotFound("Output(Out) of SoftmaxOp is not found."));
Q
Qiao Longfei 已提交
43

44 45 46
    auto dim_x = ctx->GetInputDim("X");
    auto rank_x = dim_x.size();
    auto axis = ctx->Attrs().Get<int>("axis");
47 48 49 50 51 52 53 54
    PADDLE_ENFORCE_GE(axis, -rank_x,
                      platform::errors::InvalidArgument(
                          "Attr(axis) value should be in range [-R, R-1], "
                          "R is the rank of Input(X)."));
    PADDLE_ENFORCE_LT(axis, rank_x,
                      platform::errors::InvalidArgument(
                          "Attr(axis) value should be in range [-R, R-1], "
                          "R is the rank of Input(X)."));
55

F
fengjiayi 已提交
56
    ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
Q
Qiao Longfei 已提交
57
    ctx->ShareLoD("X", /*->*/ "Out");
58
  }
59 60 61 62 63

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    // choose cudnn kernel if the runtime supported.
K
Kexin Zhao 已提交
64
    framework::LibraryType library_{framework::LibraryType::kPlain};
M
mozga-intel 已提交
65 66 67
    std::string data_format = ctx.Attr<std::string>("data_format");
    framework::DataLayout layout_ = framework::StringToDataLayout(data_format);

68
#ifdef PADDLE_WITH_CUDA
K
Kexin Zhao 已提交
69
    if (platform::CanCUDNNBeUsed(ctx)) {
K
Kexin Zhao 已提交
70
      library_ = framework::LibraryType::kCUDNN;
71 72
    }
#endif
73 74
#ifdef PADDLE_WITH_MKLDNN
    if (library_ == framework::LibraryType::kPlain &&
75
        this->CanMKLDNNBeUsed(ctx)) {
76
      library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
77
      layout_ = framework::DataLayout::kMKLDNN;
78 79
    }
#endif
K
Kexin Zhao 已提交
80

81
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
82
#ifndef PADDLE_WITH_ASCEND_CL
K
Kexin Zhao 已提交
83
    if (input_data_type == framework::proto::VarType::FP16) {
84 85 86
      PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
                        platform::errors::InvalidArgument(
                            "float16 can only be used on GPU place"));
K
Kexin Zhao 已提交
87
    }
88
#endif
K
Kexin Zhao 已提交
89

M
mozga-intel 已提交
90
    return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_,
K
Kexin Zhao 已提交
91
                                   library_);
92
  }
93
};
94

D
dongzhihong 已提交
95
class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
96
 public:
Y
Yu Yang 已提交
97
  void Make() override {
98
    AddInput("X",
F
fengjiayi 已提交
99
             "The input tensor of softmax, "
D
dengkaipeng 已提交
100
             "whose dimension :attr:`axis` is the input_feature_dimensions.");
101
    AddOutput("Out", "The normalized values with the same shape as X.");
102
    AddAttr<int>("axis",
D
dengkaipeng 已提交
103
                 "The dimension index of Input(x) to perform softmax,"
104 105
                 "default -1 for last dimension")
        .SetDefault(-1);
106 107 108 109 110 111 112 113 114 115 116
    AddAttr<bool>(
        "use_cudnn",
        "(bool, default false) Only used in cudnn kernel, need install cudnn")
        .SetDefault(false);
    AddAttr<std::string>(
        "data_format",
        "(string, default NCHW) Only used in "
        "An optional string from: \"NHWC\", \"NCHW\". "
        "Defaults to \"NHWC\". Specify the data format of the output data, "
        "the input will be transformed automatically. ")
        .SetDefault("AnyLayout");
117 118 119
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
120 121 122 123 124
    AddAttr<std::string>(
        "mkldnn_data_type",
        "(string, default \"float32\"). Data type of mkldnn kernel")
        .SetDefault("float32")
        .InEnum({"float32", "bfloat16"});
J
Jacek Czaja 已提交
125
    AddAttr<bool>("is_test",
126 127
                  "(bool, default false) Set to true for inference only, false "
                  "for training. Some layers may run faster when this is true.")
J
Jacek Czaja 已提交
128
        .SetDefault(false);
C
caoying03 已提交
129
    AddComment(R"DOC(
130 131
Softmax Operator.

132
The input of the softmax operator is a tensor of any rank. The output tensor
F
fengjiayi 已提交
133
has the same shape as the input.
C
caoying03 已提交
134

D
dengkaipeng 已提交
135
The dimension :attr:`axis` of the input tensor will be permuted to the last.
D
dengkaipeng 已提交
136
Then the input tensor will be logically flattened to a 2-D matrix. The matrix's
D
dengkaipeng 已提交
137
second dimension(row length) is as same as the dimension :attr:`axis` of the input
138 139 140
tensor, and the first dimension(column length) is the product of all other
dimensions of the input tensor. For each row of the matrix, the softmax operator
squashes the K-dimensional(K is the width of the matrix, which is also the size
D
dengkaipeng 已提交
141
of the input tensor's dimension :attr:`axis`) vector of arbitrary real values to a
F
fengjiayi 已提交
142
K-dimensional vector of real values in the range [0, 1] that add up to 1.
143 144 145 146 147
It computes the exponential of the given dimension and the sum of exponential
values of all the other dimensions in the K-dimensional vector input.
Then the ratio of the exponential of the given dimension and the sum of
exponential values of all the other dimensions is the output of the softmax
operator.
C
caoying03 已提交
148

F
fengjiayi 已提交
149
For each row $i$ and each column $j$ in the matrix, we have:
F
fengjiayi 已提交
150
    $$Out[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$
C
caoying03 已提交
151 152

)DOC");
153 154 155
  }
};

C
chengduo 已提交
156 157
class SoftmaxOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
158
  std::unordered_map<std::string, std::string>& GetInputOutputWithSameType()
C
chengduo 已提交
159
      const override {
160 161
    static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
    return m;
C
chengduo 已提交
162 163 164
  }
};

D
dongzhihong 已提交
165
class SoftmaxOpGrad : public framework::OperatorWithKernel {
Y
Yu Yang 已提交
166 167 168
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

169
  void InferShape(framework::InferShapeContext* ctx) const override {
170 171 172 173 174 175 176 177 178 179 180
    PADDLE_ENFORCE_EQ(
        ctx->HasInput("Out"), true,
        platform::errors::InvalidArgument("Input(Out) is not found."));
    PADDLE_ENFORCE_EQ(
        ctx->HasInput(framework::GradVarName("Out")), true,
        platform::errors::InvalidArgument("Input(Out@GRAD) is not found."));
    PADDLE_ENFORCE_EQ(
        ctx->GetInputDim("Out"),
        ctx->GetInputDim(framework::GradVarName("Out")),
        platform::errors::InvalidArgument("Input(Out) and its gradients "
                                          "should have a same shape."));
181

182 183
    ctx->SetOutputDim(framework::GradVarName("X"),
                      ctx->GetInputDim(framework::GradVarName("Out")));
D
dongzhihong 已提交
184
  }
185 186 187 188 189

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    // choose cudnn kernel if the runtime supported.
K
Kexin Zhao 已提交
190
    framework::LibraryType library_{framework::LibraryType::kPlain};
J
Jacek Czaja 已提交
191 192
    std::string data_format = ctx.Attr<std::string>("data_format");
    framework::DataLayout layout_ = framework::StringToDataLayout(data_format);
M
mozga-intel 已提交
193

194
#ifdef PADDLE_WITH_CUDA
K
Kexin Zhao 已提交
195
    if (platform::CanCUDNNBeUsed(ctx)) {
K
Kexin Zhao 已提交
196
      library_ = framework::LibraryType::kCUDNN;
197 198
    }
#endif
J
Jacek Czaja 已提交
199 200
#ifdef PADDLE_WITH_MKLDNN
    if (library_ == framework::LibraryType::kPlain &&
201
        this->CanMKLDNNBeUsed(ctx)) {
J
Jacek Czaja 已提交
202 203 204 205
      library_ = framework::LibraryType::kMKLDNN;
      layout_ = framework::DataLayout::kMKLDNN;
    }
#endif
206 207
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(
        ctx, framework::GradVarName("Out"));
J
Jacek Czaja 已提交
208
    if (input_data_type == framework::proto::VarType::FP16) {
209 210 211 212
      if (!(platform::is_gpu_place(ctx.GetPlace()) ||
            platform::is_npu_place(ctx.GetPlace())))
        PADDLE_THROW(platform::errors::InvalidArgument(
            "float16 can only be used on GPU/NPU place"));
J
Jacek Czaja 已提交
213 214 215 216
    }

    return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_,
                                   library_);
217
  }
D
dongzhihong 已提交
218 219
};

H
hong 已提交
220 221
template <typename T>
class SoftmaxOpGradMaker : public framework::SingleGradOpMaker<T> {
222
 public:
H
hong 已提交
223
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
224 225

 protected:
226
  void Apply(GradOpPtr<T> op) const override {
227 228
    op->SetType("softmax_grad");

H
hong 已提交
229 230
    op->SetInput("Out", this->Output("Out"));
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
231

H
hong 已提交
232
    op->SetAttrMap(this->Attrs());
233

H
hong 已提交
234
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
235 236
  }
};
D
dzhwinter 已提交
237

238 239
DECLARE_INPLACE_OP_INFERER(SoftmaxInplaceInferer, {"X", "Out"});

240 241 242
}  // namespace operators
}  // namespace paddle

D
dongzhihong 已提交
243
namespace ops = paddle::operators;
D
dongzhihong 已提交
244

Y
Yang Yang 已提交
245
REGISTER_OPERATOR(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker,
H
hong 已提交
246 247 248
                  ops::SoftmaxOpInferVarType,
                  ops::SoftmaxOpGradMaker<paddle::framework::OpDesc>,
                  ops::SoftmaxOpGradMaker<paddle::imperative::OpBase>,
249
                  ops::SoftmaxInplaceInferer);
250
REGISTER_OPERATOR(softmax_grad, ops::SoftmaxOpGrad);
D
dongzhihong 已提交
251
REGISTER_OP_CPU_KERNEL(
D
dzhwinter 已提交
252 253
    softmax, ops::SoftmaxKernel<paddle::platform::CPUDeviceContext, float>,
    ops::SoftmaxKernel<paddle::platform::CPUDeviceContext, double>);
Q
QI JUN 已提交
254 255
REGISTER_OP_CPU_KERNEL(
    softmax_grad,
D
dzhwinter 已提交
256 257
    ops::SoftmaxGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::SoftmaxGradKernel<paddle::platform::CPUDeviceContext, double>);