lstmp_op.cc 14.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

Y
Yibing Liu 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/lstmp_op.h"
16
#include <string>
17 18 19 20 21 22 23 24 25 26

namespace paddle {
namespace operators {

class LSTMPOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("Input"),
27
                   "Input(Input) of LSTMP operator should not be null.");
28
    PADDLE_ENFORCE(ctx->HasInput("Weight"),
29
                   "Input(Weight) of LSTMP operator should not be null.");
30
    PADDLE_ENFORCE(ctx->HasInput("ProjWeight"),
31
                   "Input(ProjWeight) of LSTMP operator should not be null.");
32
    PADDLE_ENFORCE(ctx->HasInput("Bias"),
33
                   "Input(Bias) of LSTMP operator should not be null.");
34 35

    PADDLE_ENFORCE(ctx->HasOutput("Projection"),
36
                   "Output(Projection) of LSTMP operator should not be null.");
37
    PADDLE_ENFORCE(ctx->HasOutput("Cell"),
38
                   "Output(Cell) of LSTMP operator should not be null.");
39
    PADDLE_ENFORCE(ctx->HasOutput("BatchGate"),
40
                   "Output(BatchGate) of LSTMP operator should not be null.");
41
    PADDLE_ENFORCE(ctx->HasOutput("BatchCellPreAct"),
42 43
                   "Output(BatchCellPreAct) of LSTMP operator should not be "
                   "null.");
44
    PADDLE_ENFORCE(ctx->HasOutput("BatchHidden"),
45
                   "Output(BatchHidden) of LSTMP operator should not be null.");
46 47

    auto in_dims = ctx->GetInputDim("Input");
48 49
    PADDLE_ENFORCE_EQ(in_dims.size(), 2,
                      "Input(X)'s rank of LSTMP operator must be 2.");
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71

    int frame_size = in_dims[1] / 4;
    auto w_dims = ctx->GetInputDim("Weight");
    auto proj_dims = ctx->GetInputDim("ProjWeight");
    PADDLE_ENFORCE_EQ(w_dims.size(), 2,
                      "The rank of Input(Weight) should be 2.");
    PADDLE_ENFORCE_EQ(w_dims[0], proj_dims[1],
                      "The first dimension of Input(Weight) "
                      "should be %d.",
                      proj_dims[1]);
    PADDLE_ENFORCE_EQ(w_dims[1], 4 * frame_size,
                      "The second dimension of Input(Weight) "
                      "should be 4 * %d.",
                      frame_size);

    PADDLE_ENFORCE_EQ(proj_dims.size(), 2,
                      "The rank of Input(ProjWeight) should be 2.");
    PADDLE_ENFORCE_EQ(proj_dims[0], frame_size,
                      "The first dimension of Input(ProjWeight) "
                      "should be %d.",
                      frame_size);

72 73
    if (ctx->HasInput("H0")) {
      PADDLE_ENFORCE(ctx->HasInput("C0"),
74 75
                     "Input(C0) of LSTMP operator should not be null after "
                     "Input(H0) provided.");
76 77 78 79 80 81 82 83
      auto h_dims = ctx->GetInputDim("H0");
      auto c_dims = ctx->GetInputDim("C0");
      PADDLE_ENFORCE(h_dims == c_dims,
                     "The dimension of Input(H0) and Input(C0) "
                     "should be the same.");
      ctx->SetOutputDim("OrderedP0", {h_dims[0], proj_dims[1]});
    }

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
    auto b_dims = ctx->GetInputDim("Bias");
    PADDLE_ENFORCE_EQ(b_dims.size(), 2, "The rank of Input(Bias) should be 2.");
    PADDLE_ENFORCE_EQ(b_dims[0], 1,
                      "The first dimension of Input(Bias) should be 1.");

    if (ctx->Attrs().Get<bool>("use_peepholes")) {
      PADDLE_ENFORCE_EQ(b_dims[1], 7 * frame_size,
                        "The second dimension of Input(Bias) should be "
                        "7 * %d if enable peepholes connection",
                        frame_size);
    } else {
      PADDLE_ENFORCE_EQ(b_dims[1], 4 * frame_size,
                        "The second dimension of Input(Bias) should be "
                        "4 * %d if disable peepholes connection",
                        frame_size);
    }

    framework::DDim out_dims({in_dims[0], frame_size});
    framework::DDim proj_out_dims({in_dims[0], proj_dims[1]});
    ctx->SetOutputDim("Projection", proj_out_dims);
    ctx->SetOutputDim("Cell", out_dims);
    ctx->SetOutputDim("BatchGate", in_dims);
    ctx->SetOutputDim("BatchCellPreAct", out_dims);
107
    ctx->SetOutputDim("BatchHidden", out_dims);
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
    ctx->ShareLoD("Input", "Projection");
    ctx->ShareLoD("Input", "Cell");
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return framework::OpKernelType(
        framework::ToDataType(ctx.Input<framework::LoDTensor>("Input")->type()),
        ctx.device_context());
  }
};

class LSTMPOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  LSTMPOpMaker(OpProto* proto, OpAttrChecker* op_checker)
      : OpProtoAndCheckerMaker(proto, op_checker) {
    AddInput("Input",
Y
Yibing Liu 已提交
126
             "(LoDTensor) the input for sequence data, which supports "
127 128 129 130 131 132 133 134 135 136 137
             "variable-time length input sequence. The underlying tensor in "
             "this LoDTensor is a matrix with shape (T X 4D), where T is the "
             "total time steps in this mini-batch, D is the hidden size.");
    AddInput("H0",
             "(Tensor, optional) the initial hidden state is an optional "
             "input. This is a tensor with shape (N x D), where N is the "
             "batch size and D is the hidden size.")
        .AsDispensable();
    AddInput("C0",
             "(Tensor, optional) the initial cell state is an optional "
             "input. This is a tensor with shape (N x D), where N is the "
138
             "batch size. `C0` should not be null if `H0` provided.")
139 140 141
        .AsDispensable();
    AddInput("Weight",
             "(Tensor) the learnable hidden-hidden weights."
Y
Yibing Liu 已提交
142 143
             " - The shape is (P x 4D), where P is the projection layer size "
             "and  D is the hidden size."
144 145
             " - Weight = {W_cr, W_ir, W_fr, W_or}");
    AddInput("ProjWeight",
Y
Yibing Liu 已提交
146
             "(Tensor) the learnable weight of the projection layer."
147
             " - The shape is (D x P), where P is the recurrent projection "
Y
Yibing Liu 已提交
148 149
             "layer size and  D is the hidden size."
             " - ProjWeight = {W_rh}");
150
    AddInput("Bias",
Y
Yibing Liu 已提交
151 152 153
             "(Tensor) the learnable biases, which contains two parts: "
             "input-hidden biases and peephole connections weights if "
             "setting `use_peepholes` to `True`. "
154 155 156 157 158 159 160 161
             "1. `use_peepholes = False` "
             " - The shape is (1 x 4D). "
             " - Bias = {b_c, b_i, b_f, b_o}."
             "2. `use_peepholes = True` "
             " - The shape is (1 x 7D). "
             " - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}.");
    AddOutput("Projection",
              "(LoDTensor) the projection of the hidden state of LSTMP "
Y
Yibing Liu 已提交
162
              "operator. The shape is (T x P), and LoD is the same with the "
163 164 165 166 167 168
              "`Input`.");
    AddOutput("Cell",
              "(LoDTensor) the cell state of LSTMP operator. "
              "The shape is (T x D), and lod is the same with the `Input`.");
    AddOutput("BatchGate",
              "(LoDTensor) This LoDTensor contains input gate, forget gate "
Y
Yibing Liu 已提交
169 170 171 172 173
              "and output gate after the activations. This LoDTensor has the "
              "same shape as the reorganized input, which is also be called "
              "batch input. The LoD size is 2. The first-level LoD is the "
              "batch offsets and the second contains the indices, which "
              "denotes the position of reorganized sequence in the raw input.")
174 175
        .AsIntermediate();
    AddOutput("BatchCellPreAct",
Y
Yibing Liu 已提交
176 177 178
              "(LoDTensor) the pre-activation cell state reorganized in batch. "
              "This LoDTensor is obtained in the forward and used in the "
              "backward.")
179
        .AsIntermediate();
180
    AddOutput("BatchHidden",
Y
Yibing Liu 已提交
181 182 183
              "(LoDTensor) the hidden state reorganized in batch. "
              "This LoDTensor is obtained in the forward and used in the "
              "backward.")
184 185 186 187 188 189
        .AsIntermediate();
    AddOutput("OrderedP0",
              "(Tensor) the projection of the initial hidden state "
              "H0. This is a tensor with shape (N x P), where N is the "
              "batch size and P is the hidden size.")
        .AsIntermediate();
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
    AddAttr<bool>("use_peepholes",
                  "(bool, defalut: True) "
                  "whether to enable diagonal/peephole connections.")
        .SetDefault(true);
    AddAttr<bool>("is_reverse",
                  "(bool, defalut: False) "
                  "whether to compute reversed LSTMP.")
        .SetDefault(false);
    AddAttr<std::string>(
        "gate_activation",
        "(string, default: sigmoid)"
        "The activation for input gate, forget gate and output "
        "gate, `sigmoid` by default.")
        .SetDefault("sigmoid")
        .InEnum({"sigmoid", "tanh", "relu", "identity"});
    AddAttr<std::string>("cell_activation",
                         "(string, default: tanh)"
                         "The activation for cell output, `tanh` by defalut.")
        .SetDefault("tanh")
        .InEnum({"sigmoid", "tanh", "relu", "identity"});
    AddAttr<std::string>("candidate_activation",
                         "(string, default: tanh)"
                         "The activation for candidate hidden state, "
                         "`tanh` by default.")
        .SetDefault("tanh")
        .InEnum({"sigmoid", "tanh", "relu", "identity"});
216 217 218 219 220 221
    AddAttr<std::string>("proj_activation",
                         "(string, default: tanh)"
                         "The activation for projection output, "
                         "`tanh` by defalut.")
        .SetDefault("tanh")
        .InEnum({"sigmoid", "tanh", "relu", "identity"});
222
    AddComment(R"DOC(
Y
Yibing Liu 已提交
223
Long-Short Term Memory with recurrent Projection layer (LSTMP) Operator.
224

Y
Yibing Liu 已提交
225 226 227 228 229
LSTMP has a separate projection layer after the LSTM layer, projecting the 
original hidden state to a lower-dimensional one, which is proposed to reduce 
the number of total parameters and furthermore computational complexity for 
the LSTM, espeacially for the case that the size of output units is relative 
large (https://research.google.com/pubs/archive/43905.pdf). 
230

231 232 233
The formula is as follows:

$$
234
i_t = \sigma(W_{ix}x_{t} + W_{ir}r_{t-1} + W_{ic}c_{t-1} + b_i) \\
235

236
f_t = \sigma(W_{fx}x_{t} + W_{fr}r_{t-1} + W_{fc}c_{t-1} + b_f) \\
237

238
\tilde{c_t} = act_g(W_{cx}x_t + W_{cr}r_{t-1} + b_c) \\
239

240
o_t = \sigma(W_{ox}x_{t} + W_{or}r_{t-1} + W_{oc}c_t + b_o) \\
241

242
c_t = f_t \odot c_{t-1} + i_t \odot \tilde{c_t} \\
Y
Yibing Liu 已提交
243

244
h_t = o_t \odot act_h(c_t) \\
245

Y
Yibing Liu 已提交
246
r_t = \overline{act_h}(W_{rh}h_t)
247 248 249 250 251 252 253
$$

where the W terms denote weight matrices (e.g. $W_{xi}$ is the matrix
of weights from the input gate to the input), $W_{ic}, W_{fc}, W_{oc}$
are diagonal weight matrices for peephole connections. In our implementation,
we use vectors to reprenset these diagonal weight matrices. The b terms
denote bias vectors ($b_i$ is the input gate bias vector), $\sigma$
Y
Yibing Liu 已提交
254
is the activation, such as logistic sigmoid function, and
255 256
$i, f, o$ and $c$ are the input gate, forget gate, output gate,
and cell activation vectors, respectively, all of which have the same size as
Y
Yibing Liu 已提交
257 258 259 260
the cell output activation vector $h$. Here $h$ is usually called the hidden 
state and $r$ denotes its recurrent projection. And $\tilde{c_t}$ is also 
called the candidate hidden state, whose computation is based on the current 
input and previous hidden state.
261 262 263

The $\odot$ is the element-wise product of the vectors. $act_g$ and $act_h$
are the cell input and cell output activation functions and `tanh` is usually
264 265
used for them. $\overline{act_h}$ is the activation function for the 
projection output, usually using `identity` or same as $act_h$.
266 267 268

Note that these $W_{xi}x_{t}, W_{xf}x_{t}, W_{xc}x_{t}, W_{xo}x_{t}$
operations on the input $x_{t}$ are NOT included in this operator.
Y
Yibing Liu 已提交
269
Users can choose to use fully-connected operator before LSTMP operator.
270 271 272 273 274 275 276 277 278 279 280

)DOC");
  }
};

class LSTMPGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("Input"),
281
                   "Input(Input) of LSTMP operator should not be null.");
282
    PADDLE_ENFORCE(ctx->HasInput("Projection"),
283
                   "Input(Projection) of LSTMP operator should not be null.");
284
    PADDLE_ENFORCE(ctx->HasInput("Cell"),
285
                   "Input(Cell) of LSTMP operator should not be null.");
286
    PADDLE_ENFORCE(ctx->HasInput("Weight"),
287
                   "Input(Weight) of LSTMP operator should not be null.");
288
    PADDLE_ENFORCE(ctx->HasInput("ProjWeight"),
289
                   "Input(ProjWeight) of LSTMP operator should not be null.");
290
    PADDLE_ENFORCE(ctx->HasInput("Bias"),
291
                   "Input(Bias) of LSTMP operator should not be null.");
292 293

    PADDLE_ENFORCE(ctx->HasInput("BatchGate"),
294
                   "Input(BatchGate) of LSTMP operator should not be null.");
295
    PADDLE_ENFORCE(ctx->HasInput("BatchCellPreAct"),
296
                   "Input(BatchGate) of LSTMP operator should not be null.");
297 298 299 300 301 302 303 304 305

    auto SetOutGradDim = [&ctx](const std::string& name) {
      auto g_name = framework::GradVarName(name);
      if (ctx->HasOutput(g_name))
        ctx->SetOutputDim(g_name, ctx->GetInputDim(name));
    };

    SetOutGradDim("Input");
    SetOutGradDim("Weight");
306
    SetOutGradDim("ProjWeight");
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
    SetOutGradDim("Bias");
    SetOutGradDim("H0");
    SetOutGradDim("C0");
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return framework::OpKernelType(
        framework::ToDataType(ctx.Input<framework::LoDTensor>("Input")->type()),
        ctx.device_context());
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
Y
Yang Yang 已提交
325
REGISTER_OPERATOR(lstmp, ops::LSTMPOp, ops::LSTMPOpMaker,
326 327
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(lstmp_grad, ops::LSTMPGradOp);
328 329 330 331 332 333
REGISTER_OP_CPU_KERNEL(
    lstmp, ops::LSTMPKernel<paddle::platform::CPUDeviceContext, float>,
    ops::LSTMPKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    lstmp_grad, ops::LSTMPGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::LSTMPGradKernel<paddle::platform::CPUDeviceContext, double>);