sequence_softmax_op.cc 7.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

W
Wu Yi 已提交
15
#include "paddle/fluid/operators/sequence_ops/sequence_softmax_op.h"
16

17
#include <string>
18 19 20 21 22 23 24 25

namespace paddle {
namespace operators {

class SequenceSoftmaxOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

26
  void InferShape(framework::InferShapeContext* ctx) const override {
27 28
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceSoftmax");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceSoftmax");
29 30

    ctx->ShareDim("X", /*->*/ "Out");
31
    ctx->ShareLoD("X", /*->*/ "Out");
32
  }
33 34 35 36 37

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    // choose cudnn kernel if the runtime supported.
38 39
    bool use_cudnn =
        ctx.HasAttr("use_cudnn") ? ctx.Attr<bool>("use_cudnn") : false;
40
    bool runtime_cudnn_support = false;
41
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
42 43 44 45 46 47 48 49 50 51
    if (platform::is_gpu_place(ctx.GetPlace())) {
      auto& dev_ctx =
          ctx.template device_context<platform::CUDADeviceContext>();
      runtime_cudnn_support = dev_ctx.cudnn_handle() != nullptr ? true : false;
    }
#endif
    framework::LibraryType library_ = framework::LibraryType::kPlain;
    if (use_cudnn && runtime_cudnn_support) {
      library_ = framework::LibraryType::kCUDNN;
    }
52 53 54
    std::string data_format = ctx.HasAttr("data_format")
                                  ? ctx.Attr<std::string>("data_format")
                                  : "AnyLayout";
55
    return framework::OpKernelType(
56
        OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace(),
57 58
        framework::StringToDataLayout(data_format), library_);
  }
59 60 61 62
};

class SequenceSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
63
  void Make() override {
64 65 66 67 68 69
    AddInput("X",
             "(LoDTensor) 1-D or 2-D input LoDTensor with the 2-nd dimension "
             "of length 1.");
    AddOutput("Out",
              "(LoDTensor) 1-D or 2-D output LoDTensor with the 2-nd dimension "
              "of length 1.");
70 71 72
    AddAttr<bool>(
        "use_cudnn",
        "(bool, default false) Only used in cudnn kernel, need install cudnn")
73 74
        .SetDefault(false)
        .AsExtra();
75 76 77 78 79 80
    AddAttr<std::string>(
        "data_format",
        "(string, default NCHW) Only used in "
        "An optional string from: \"NHWC\", \"NCHW\". "
        "Defaults to \"NHWC\". Specify the data format of the output data, "
        "the input will be transformed automatically. ")
81 82
        .SetDefault("AnyLayout")
        .AsExtra();
83
    AddComment(R"DOC(
84 85 86
Sequence Softmax Operator.

SequenceSoftmaxOp computes the softmax activation among all time-steps for each
87
sequence. The dimension of each time-step should be 1. Thus, the shape of
88 89
input Tensor can be either [N, 1] or [N], where N is the sum of the length
of all sequences.
90

91
The algorithm works as follows:
W
whs 已提交
92

93
    for i-th sequence in a mini-batch:
W
whs 已提交
94 95 96 97 98 99

$$
Out(X[lod[i]:lod[i+1]], :) = \
\frac{\exp(X[lod[i]:lod[i+1], :])} \
{\sum(\exp(X[lod[i]:lod[i+1], :]))}
$$
100 101 102

For example, for a mini-batch of 3 sequences with variable-length,
each containing 2, 3, 2 time-steps, the lod of which is [0, 2, 5, 7],
103
then softmax will be computed among X[0:2, :], X[2:5, :], X[5:7, :]
104
and N turns out to be 7.
105

106 107 108 109 110 111 112 113
)DOC");
  }
};

class SequenceSoftmaxGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

114
  void InferShape(framework::InferShapeContext* ctx) const override {
115 116 117 118 119 120 121 122 123
    OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "SequenceSoftmaxGrad");
    OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
                   "Out@GRAD", "SequenceSoftmaxGrad");
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceSoftmaxGrad");
    OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
                   "X@GRAD", "SequenceSoftmaxGrad");

    auto out_dim = ctx->GetInputDim("Out");
    auto out_grad_dim = ctx->GetInputDim(framework::GradVarName("Out"));
124
    PADDLE_ENFORCE_EQ(
125 126 127 128 129 130
        out_dim, out_grad_dim,
        platform::errors::InvalidArgument(
            "The shape of Input(Out) and Input(Out@GRAD) of "
            "SequenceSoftmaxGrad operator do not match. The Input(Out)'s shape "
            "is [%s], the Input(Out@GRAD)'s shape is [%s].",
            out_dim, out_grad_dim));
131 132 133

    ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
  }
134 135 136 137 138

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    // choose cudnn kernel if the runtime supported.
139 140
    bool use_cudnn =
        ctx.HasAttr("use_cudnn") ? ctx.Attr<bool>("use_cudnn") : false;
141
    bool runtime_cudnn_support = false;
142
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
143 144 145 146 147 148 149 150 151 152
    if (platform::is_gpu_place(ctx.GetPlace())) {
      auto& dev_ctx =
          ctx.template device_context<platform::CUDADeviceContext>();
      runtime_cudnn_support = dev_ctx.cudnn_handle() != nullptr ? true : false;
    }
#endif
    framework::LibraryType library_ = framework::LibraryType::kPlain;
    if (use_cudnn && runtime_cudnn_support) {
      library_ = framework::LibraryType::kCUDNN;
    }
153 154 155
    std::string data_format = ctx.HasAttr("data_format")
                                  ? ctx.Attr<std::string>("data_format")
                                  : "AnyLayout";
156
    return framework::OpKernelType(
157
        OperatorWithKernel::IndicateVarDataType(ctx, "Out"), ctx.GetPlace(),
158 159
        framework::StringToDataLayout(data_format), library_);
  }
160 161
};

162
DECLARE_NO_NEED_BUFFER_VARS_INFERER(
163 164
    SequenceSoftmaxGradOpNoNeedBufferVarsInferer, "X");

165 166 167 168
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
H
hong 已提交
169 170 171 172
REGISTER_OPERATOR(
    sequence_softmax, ops::SequenceSoftmaxOp, ops::SequenceSoftmaxOpMaker,
    paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
    paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>);
173 174
REGISTER_OPERATOR(sequence_softmax_grad, ops::SequenceSoftmaxGradOp,
                  ops::SequenceSoftmaxGradOpNoNeedBufferVarsInferer);
175 176
REGISTER_OP_CPU_KERNEL(
    sequence_softmax,
177 178
    ops::SequenceSoftmaxKernel<paddle::platform::CPUDeviceContext, float>,
    ops::SequenceSoftmaxKernel<paddle::platform::CPUDeviceContext, double>);
179 180
REGISTER_OP_CPU_KERNEL(
    sequence_softmax_grad,
181 182
    ops::SequenceSoftmaxGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::SequenceSoftmaxGradKernel<paddle::platform::CPUDeviceContext, double>);