fusion_lstm_op.cc 19.8 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

W
Wu Yi 已提交
15
#include "paddle/fluid/operators/fused/fusion_lstm_op.h"
T
tensor-tang 已提交
16
#include <string>
T
tensor-tang 已提交
17
#include "paddle/fluid/operators/math/blas.h"
18
#include "paddle/fluid/operators/math/fc_compute.h"
19
#include "paddle/fluid/operators/math/jit_kernel.h"
T
tensor-tang 已提交
20
#include "paddle/fluid/operators/math/sequence2batch.h"
T
tensor-tang 已提交
21

T
tensor-tang 已提交
22 23 24 25
namespace paddle {
namespace operators {

void FusionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
26 27
  PADDLE_ENFORCE(ctx->HasInput("X"), "Assert only one Input(X) of LSTM.");
  PADDLE_ENFORCE(ctx->HasInput("WeightX"),
T
tensor-tang 已提交
28
                 "Assert only one Input(WeightX) of LSTM.");
29
  PADDLE_ENFORCE(ctx->HasInput("WeightH"),
T
tensor-tang 已提交
30
                 "Assert only one Input(WeightH) of LSTM.");
31 32 33
  PADDLE_ENFORCE(ctx->HasInput("Bias"), "Assert only one Input(Bias) of LSTM.");
  PADDLE_ENFORCE(ctx->HasOutput("XX"), "Assert only one Output(XX) of LSTM.");
  PADDLE_ENFORCE(ctx->HasOutput("Hidden"),
T
tensor-tang 已提交
34
                 "Assert only one Output(Hidden) of LSTM.");
35 36
  PADDLE_ENFORCE(ctx->HasOutput("Cell"),
                 "Assert only one Output(Cell) of LSTM.");
T
tensor-tang 已提交
37

T
tensor-tang 已提交
38 39
  auto x_dims = ctx->GetInputDim("X");
  PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank must be 2.");
T
tensor-tang 已提交
40

41 42
  if (ctx->HasInput("H0")) {
    PADDLE_ENFORCE(ctx->HasInput("C0"),
T
tensor-tang 已提交
43 44 45 46 47 48 49 50 51
                   "Input(Cell) and Input(Hidden) of LSTM should not "
                   "be null at the same time.");
    auto h_dims = ctx->GetInputDim("H0");
    auto c_dims = ctx->GetInputDim("C0");
    PADDLE_ENFORCE(h_dims == c_dims,
                   "The dimension of Input(H0) and Input(C0) "
                   "should be the same.");
  }

T
tensor-tang 已提交
52 53 54 55 56 57 58 59 60 61 62 63 64 65
  auto wx_dims = ctx->GetInputDim("WeightX");
  PADDLE_ENFORCE_EQ(wx_dims.size(), 2,
                    "The rank of Input(WeightX) should be 2.");
  PADDLE_ENFORCE_EQ(wx_dims[0], x_dims[1],
                    "The first dimension of Input(WeightX) "
                    "should be %d.",
                    x_dims[1]);

  int frame_size = wx_dims[1] / 4;
  auto wh_dims = ctx->GetInputDim("WeightH");
  PADDLE_ENFORCE_EQ(wh_dims.size(), 2,
                    "The rank of Input(WeightH) should be 2.");
  PADDLE_ENFORCE_EQ(wh_dims[0], frame_size,
                    "The first dimension of Input(WeightH) "
T
tensor-tang 已提交
66 67
                    "should be %d.",
                    frame_size);
T
tensor-tang 已提交
68 69
  PADDLE_ENFORCE_EQ(wh_dims[1], 4 * frame_size,
                    "The second dimension of Input(WeightH) "
T
tensor-tang 已提交
70 71 72 73 74 75 76
                    "should be 4 * %d.",
                    frame_size);

  auto b_dims = ctx->GetInputDim("Bias");
  PADDLE_ENFORCE_EQ(b_dims.size(), 2, "The rank of Input(Bias) should be 2.");
  PADDLE_ENFORCE_EQ(b_dims[0], 1,
                    "The first dimension of Input(Bias) should be 1.");
T
tensor-tang 已提交
77 78 79 80 81 82 83 84 85 86 87 88
  if (ctx->Attrs().Get<bool>("use_peepholes")) {
    PADDLE_ENFORCE_EQ(b_dims[1], 7 * frame_size,
                      "The second dimension of Input(Bias) should be "
                      "7 * %d if enable peepholes connection",
                      frame_size);
    ctx->SetOutputDim("CheckedCell", {2, frame_size});
  } else {
    PADDLE_ENFORCE_EQ(b_dims[1], 4 * frame_size,
                      "The second dimension of Input(Bias) should be "
                      "4 * %d if disable peepholes",
                      frame_size);
  }
T
tensor-tang 已提交
89

T
tensor-tang 已提交
90
  framework::DDim out_dims({x_dims[0], frame_size});
T
tensor-tang 已提交
91 92
  ctx->SetOutputDim("Hidden", out_dims);
  ctx->SetOutputDim("Cell", out_dims);
T
tensor-tang 已提交
93 94
  ctx->ShareLoD("X", "Hidden");
  ctx->ShareLoD("X", "Cell");
T
tensor-tang 已提交
95
  int xx_width;
T
tensor-tang 已提交
96
  if (ctx->Attrs().Get<bool>("use_seq")) {
T
tensor-tang 已提交
97 98 99
    xx_width = wx_dims[1];
  } else {
    xx_width = x_dims[1] > wx_dims[1] ? wx_dims[1] : x_dims[1];
100
    PADDLE_ENFORCE(ctx->HasOutput("BatchedInput"),
T
tensor-tang 已提交
101
                   "Assert only one Output(BatchedInput) of LSTM.");
102
    PADDLE_ENFORCE(ctx->HasOutput("BatchedHidden"),
T
tensor-tang 已提交
103
                   "Assert only one Output(BatchedHidden) of LSTM.");
104
    PADDLE_ENFORCE(ctx->HasOutput("BatchedCell"),
T
tensor-tang 已提交
105
                   "Assert only one Output(BatchedCell) of LSTM.");
106
    PADDLE_ENFORCE(ctx->HasOutput("ReorderedH0"),
T
tensor-tang 已提交
107
                   "Assert only one Output(ReorderedH0) of LSTM");
108
    PADDLE_ENFORCE(ctx->HasOutput("ReorderedC0"),
T
tensor-tang 已提交
109
                   "Assert only one Output(ReorderedC0) of LSTM.");
T
tensor-tang 已提交
110 111 112
    ctx->SetOutputDim("BatchedInput", {x_dims[0], wx_dims[1]});
    ctx->SetOutputDim("BatchedHidden", out_dims);
    ctx->SetOutputDim("BatchedCell", out_dims);
T
tensor-tang 已提交
113
  }
T
tensor-tang 已提交
114 115
  ctx->SetOutputDim("XX", {x_dims[0], xx_width});
  ctx->ShareLoD("X", "XX");
T
tensor-tang 已提交
116 117 118 119
}

framework::OpKernelType FusionLSTMOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
Y
Yu Yang 已提交
120 121
  return framework::OpKernelType(ctx.Input<framework::LoDTensor>("X")->type(),
                                 ctx.device_context());
T
tensor-tang 已提交
122 123 124
}

void FusionLSTMOpMaker::Make() {
T
tensor-tang 已提交
125
  AddInput("X",
T
tensor-tang 已提交
126
           "(LoDTensor) the input is a LodTensor, which support "
T
tensor-tang 已提交
127
           "variable-time length input sequence. The underlying tensor in "
T
tensor-tang 已提交
128 129 130 131 132 133 134 135 136
           "this LoDTensor is a matrix with shape (T X M), where T is the "
           "total time steps in this mini-batch, M is the dim size of x.");
  AddInput("WeightX",
           "(Tensor) the learnable weights of X."
           " - The shape is (M x 4D), where M is the dim size of x, D is the "
           "hidden size. "
           " - Weight = {W_cx, W_ix, W_fx, W_ox}");
  AddInput("WeightH",
           "(Tensor) same as LSTMOp, the learnable hidden-hidden weights."
T
tensor-tang 已提交
137 138 139
           " - The shape is (D x 4D), where D is the hidden size. "
           " - Weight = {W_ch, W_ih, W_fh, W_oh}");
  AddInput("Bias",
T
tensor-tang 已提交
140 141
           "(Tensor) the learnable weights. Almost same as LSTMOp"
           "Note: we should add the fc bias into this (1x4D) in bias."
T
tensor-tang 已提交
142 143 144 145 146 147 148 149
           "input-hidden bias weight and peephole connections weight if "
           "setting `use_peepholes` True. "
           "1. `use_peepholes = False` "
           " - The shape is (1 x 4D). "
           " - Bias = {b_c, b_i, b_f, b_o}."
           "2. `use_peepholes = True` "
           " - The shape is (1 x 7D). "
           " - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}.");
T
tensor-tang 已提交
150 151 152 153 154 155 156 157 158 159 160 161
  AddInput("H0",
           "(Tensor, optional) (same as LSTMOp) the initial hidden state is an "
           "optional "
           "input. This is a tensor with shape (N x D), where N is the "
           "batch size and D is the hidden size.")
      .AsDispensable();
  AddInput("C0",
           "(Tensor, optional) (same as LSTMOp) (the initial cell state is an "
           "optional "
           "input. This is a tensor with shape (N x D), where N is the "
           "batch size. `H0` and `C0` can be NULL but only at the same time.")
      .AsDispensable();
T
tensor-tang 已提交
162
  AddOutput("Hidden",
T
tensor-tang 已提交
163
            "(LoDTensor) (same as LSTMOp) the hidden state of LSTM operator. "
T
tensor-tang 已提交
164 165
            "The shape is (T x D), and lod is the same with the `Input`.");
  AddOutput("Cell",
T
tensor-tang 已提交
166
            "(LoDTensor) (same as LSTMOp) the cell state of LSTM operator. "
T
tensor-tang 已提交
167
            "The shape is (T x D), and lod is the same with the `Input`.");
T
tensor-tang 已提交
168
  AddOutput("XX",
T
tensor-tang 已提交
169 170 171
            "(LoDTensor) the result after X * WeightX (size is T x 4D)"
            " or batched_X (size is T x M), this will be automatically chosen,"
            " where T is the total time steps in this mini-batch,"
T
tensor-tang 已提交
172 173
            " D is the hidden size, M is the dim size of x input.")
      .AsIntermediate();
T
tensor-tang 已提交
174 175 176 177 178
  AddOutput("BatchedInput", "(LoDTensor) (T x 4D).").AsIntermediate();
  AddOutput("BatchedHidden", "(LoDTensor) (T x D).").AsIntermediate();
  AddOutput("BatchedCell", "(LoDTensor) (T x D).").AsIntermediate();
  AddOutput("ReorderedH0", "(LoDTensor) (N x D).").AsIntermediate();
  AddOutput("ReorderedC0", "(LoDTensor) (N x D).").AsIntermediate();
T
tensor-tang 已提交
179 180
  AddOutput("CheckedCell", "(Tensor) (2 x D) only for peephole.")
      .AsIntermediate();
T
tensor-tang 已提交
181 182 183 184 185 186 187 188
  AddAttr<bool>("use_peepholes",
                "(bool, defalut: True) "
                "whether to enable diagonal/peephole connections.")
      .SetDefault(true);
  AddAttr<bool>("is_reverse",
                "(bool, defalut: False) "
                "whether to compute reversed LSTM.")
      .SetDefault(false);
T
tensor-tang 已提交
189 190 191 192
  AddAttr<bool>("use_seq",
                "(bool, defalut: True) "
                "whether to use seq mode to compute.")
      .SetDefault(true);
T
tensor-tang 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
  AddAttr<std::string>("gate_activation",
                       "(string, default: sigmoid)"
                       "The activation for input gate, forget gate and output "
                       "gate, `sigmoid` by default.")
      .SetDefault("sigmoid")
      .InEnum({"sigmoid", "tanh", "relu", "identity"});
  AddAttr<std::string>("cell_activation",
                       "(string, default: tanh)"
                       "The activation for cell output, `tanh` by defalut.")
      .SetDefault("tanh")
      .InEnum({"sigmoid", "tanh", "relu", "identity"});
  AddAttr<std::string>("candidate_activation",
                       "(string, default: tanh)"
                       "The activation for candidate hidden state, "
                       "`tanh` by default.")
      .SetDefault("tanh")
      .InEnum({"sigmoid", "tanh", "relu", "identity"});
  AddComment(R"DOC(
T
tensor-tang 已提交
211 212
Fusion Long-Short Term Memory (LSTM) Operator.
This operator fuse the X into LSTM, more details can refer to LSTM op.
T
tensor-tang 已提交
213 214 215
)DOC");
}

T
tensor-tang 已提交
216
template <typename T>
T
tensor-tang 已提交
217
class FuisonLSTMKernel : public framework::OpKernel<T> {
T
tensor-tang 已提交
218
 public:
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
#define INIT_BASE_DEFINES                                   \
  using DeviceContext = paddle::platform::CPUDeviceContext; \
  auto* x = ctx.Input<LoDTensor>("X");                      \
  auto* h0 = ctx.Input<Tensor>("H0");                       \
  auto* c0 = ctx.Input<Tensor>("C0");                       \
  auto* wx = ctx.Input<Tensor>("WeightX");                  \
  auto* wh = ctx.Input<Tensor>("WeightH");                  \
  auto* bias = ctx.Input<Tensor>("Bias");                   \
  auto* xx = ctx.Output<LoDTensor>("XX");                   \
  auto* hidden_out = ctx.Output<LoDTensor>("Hidden");       \
  auto* cell_out = ctx.Output<LoDTensor>("Cell");           \
  bool is_reverse = ctx.Attr<bool>("is_reverse");           \
  bool use_peepholes = ctx.Attr<bool>("use_peepholes");     \
  auto x_dims = x->dims();   /* T x M*/                     \
  auto wh_dims = wh->dims(); /* D x 4D*/                    \
  const int M = x_dims[1];                                  \
  const int D = wh_dims[0];                                 \
  const int D4 = wh_dims[1]

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
#define INIT_OTHER_DEFINES                                      \
  const T* x_data = x->data<T>();                               \
  const T* wx_data = wx->data<T>();                             \
  const T* wh_data = wh->data<T>();                             \
  /* diagonal weight*/                                          \
  const T* wp_data = bias->data<T>() + D4;                      \
  /* for peephole only*/                                        \
  T* checked_cell_data = nullptr;                               \
  auto place = ctx.GetPlace();                                  \
  if (use_peepholes) {                                          \
    /* w_ic * Ct-1, w_fc * Ct-1  ; w_oc * Ct => ih*/            \
    auto* checked_cell = ctx.Output<Tensor>("CheckedCell");     \
    checked_cell_data = checked_cell->mutable_data<T>(place);   \
  }                                                             \
  const math::jitkernel::lstm_attr_t attr(                      \
      D, ctx.Attr<std::string>("gate_activation"),              \
      ctx.Attr<std::string>("candidate_activation"),            \
      ctx.Attr<std::string>("cell_activation"), use_peepholes); \
  math::jitkernel::lstm_t one_step;                             \
  one_step.wp = wp_data;                                        \
  one_step.checked = checked_cell_data;                         \
  const auto& ker =                                             \
      math::jitkernel::KernelPool::Instance()                   \
          .template Get<math::jitkernel::LSTMKernel<T>,         \
                        const math::jitkernel::lstm_attr_t&>(attr)
263 264

// Wh GEMM
T
tensor-tang 已提交
265 266 267 268
#define GEMM_WH_ADDON(bs, prev, out)                                           \
  blas.GEMM(CblasNoTrans, CblasNoTrans, bs, D4, D, static_cast<T>(1), prev, D, \
            wh_data, D4, static_cast<T>(1), out, D4)

T
tensor-tang 已提交
269
  void SeqCompute(const framework::ExecutionContext& ctx) const {
270 271
    INIT_BASE_DEFINES;
    INIT_OTHER_DEFINES;
T
tensor-tang 已提交
272
    auto x_lod = x->lod();
T
tensor-tang 已提交
273
    const int total_T = x_dims[0];
T
tensor-tang 已提交
274
    const int N = x_lod[0].size() - 1;
T
tensor-tang 已提交
275 276
    const T* h0_data = h0 ? h0->data<T>() : nullptr;
    const T* c0_data = c0 ? c0->data<T>() : nullptr;
T
tensor-tang 已提交
277
    T* xx_data = xx->mutable_data<T>(place);
T
tensor-tang 已提交
278 279
    T* h_out_data = hidden_out->mutable_data<T>(place);
    T* c_out_data = cell_out->mutable_data<T>(place);
T
tensor-tang 已提交
280 281 282
    auto blas = math::GetBlas<DeviceContext, T>(ctx);
    math::FCCompute<DeviceContext, T>(blas, total_T, D4, M, x_data, wx_data,
                                      xx_data, bias->data<T>());
B
Brian Liu 已提交
283

T
tensor-tang 已提交
284 285 286 287 288
    int xx_offset = D4;
    int gate_offset = D;
    if (is_reverse) {
      const int offset = (total_T - 1) * D;
      xx_data = xx_data + offset * 4;
T
tensor-tang 已提交
289 290
      h_out_data = h_out_data + offset;
      c_out_data = c_out_data + offset;
T
tensor-tang 已提交
291 292 293 294
      xx_offset = -D4;
      gate_offset = -D;
    }

295 296 297 298 299 300 301 302 303 304
    for (int i = 0; i < N; ++i) {
      int bid = is_reverse ? N - 1 - i : i;
      int seq_len = x_lod[0][bid + 1] - x_lod[0][bid];
      const T* prev_c_data = nullptr;
      const T* prev_h_data = nullptr;
      int tstart = 0;
      if (h0_data) {
        prev_h_data = h0_data + bid * D;
        prev_c_data = c0_data + bid * D;
      } else {
305 306 307 308
        one_step.gates = xx_data;
        one_step.ct = c_out_data;
        one_step.ht = h_out_data;
        ker->ComputeC1H1(&one_step, &attr);
309 310 311 312 313 314 315
        tstart = 1;
        // move one step
        prev_h_data = h_out_data;
        prev_c_data = c_out_data;
        xx_data = xx_data + xx_offset;
        h_out_data = h_out_data + gate_offset;
        c_out_data = c_out_data + gate_offset;
T
tensor-tang 已提交
316
      }
317 318
      for (int step = tstart; step < seq_len; ++step) {
        GEMM_WH_ADDON(1, prev_h_data, xx_data);
319 320 321 322 323 324

        one_step.gates = xx_data;
        one_step.ct_1 = prev_c_data;
        one_step.ct = c_out_data;
        one_step.ht = h_out_data;
        ker->ComputeCtHt(&one_step, &attr);
325 326 327 328 329 330
        // move one step
        prev_h_data = h_out_data;
        prev_c_data = c_out_data;
        xx_data = xx_data + xx_offset;
        h_out_data = h_out_data + gate_offset;
        c_out_data = c_out_data + gate_offset;
T
tensor-tang 已提交
331
      }
T
tensor-tang 已提交
332
    }
T
tensor-tang 已提交
333 334 335
  }

  void BatchCompute(const framework::ExecutionContext& ctx) const {
336
    INIT_BASE_DEFINES;
T
tensor-tang 已提交
337
    if (x->lod()[0].size() == 2) {
338
      xx->Resize({x_dims[0], D4});
T
tensor-tang 已提交
339
      SeqCompute(ctx);
T
tensor-tang 已提交
340
      return;
T
tensor-tang 已提交
341
    }
342
    INIT_OTHER_DEFINES;
T
tensor-tang 已提交
343

T
tensor-tang 已提交
344 345 346 347 348 349 350 351 352 353 354
    auto* reordered_h0 = ctx.Output<Tensor>("ReorderedH0");
    auto* reordered_c0 = ctx.Output<Tensor>("ReorderedC0");
    auto* batched_input = ctx.Output<LoDTensor>("BatchedInput");
    auto* batched_c_out = ctx.Output<LoDTensor>("BatchedCell");
    auto* batched_h_out = ctx.Output<LoDTensor>("BatchedHidden");
    T* xx_data = xx->mutable_data<T>(place);
    T* batched_input_data = batched_input->mutable_data<T>(place);
    T* batched_c_out_data = batched_c_out->mutable_data<T>(place);
    T* batched_h_out_data = batched_h_out->mutable_data<T>(place);
    hidden_out->mutable_data<T>(place);
    cell_out->mutable_data<T>(place);
T
tensor-tang 已提交
355

T
tensor-tang 已提交
356
    math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;
T
tensor-tang 已提交
357 358
    auto& dev_ctx = ctx.template device_context<DeviceContext>();
    auto blas = math::GetBlas<DeviceContext, T>(dev_ctx);
T
tensor-tang 已提交
359 360 361 362
    if (M > D4) {
      math::FCCompute<DeviceContext, T>(blas, x_dims[0], D4, M, x_data, wx_data,
                                        xx_data, bias->data<T>());
      to_batch(dev_ctx, *xx, batched_input, true, is_reverse);
T
tensor-tang 已提交
363 364
    } else {
      to_batch(dev_ctx, *x, xx, true, is_reverse);
T
tensor-tang 已提交
365 366 367
      batched_input->set_lod(xx->lod());
      math::FCCompute<DeviceContext, T>(blas, x_dims[0], D4, M, xx_data,
                                        wx_data, batched_input_data,
368
                                        bias->data<T>());
T
tensor-tang 已提交
369 370
    }

T
tensor-tang 已提交
371 372 373 374 375 376 377
    auto batched_lod = batched_input->lod();
    const auto& seq_order = batched_lod[2];
    const int max_bs = seq_order.size();
    reordered_h0->Resize({max_bs, D});
    reordered_c0->Resize({max_bs, D});

    int tstart = 0;
T
tensor-tang 已提交
378 379
    T* prev_h_data = nullptr;
    T* prev_c_data = nullptr;
T
tensor-tang 已提交
380 381 382 383 384 385
    if (h0) {
      // reorder h0, c0
      T* reordered_h0_data = reordered_h0->mutable_data<T>(place);
      T* reordered_c0_data = reordered_c0->mutable_data<T>(place);
      const T* h0_data = h0->data<T>();
      const T* c0_data = c0->data<T>();
T
tensor-tang 已提交
386 387
      prev_h_data = reordered_h0_data;
      prev_c_data = reordered_c0_data;
T
tensor-tang 已提交
388 389
      size_t sz = sizeof(T) * D;
      for (int i = 0; i < max_bs; ++i) {
390 391
        blas.VCOPY(sz, h0_data + seq_order[i] * D, reordered_h0_data);
        blas.VCOPY(sz, c0_data + seq_order[i] * D, reordered_c0_data);
T
tensor-tang 已提交
392 393 394 395
        reordered_h0_data += D;
        reordered_c0_data += D;
      }
    } else {
T
tensor-tang 已提交
396 397 398 399 400
      // compute without h0, c0
      T* cur_in_data = batched_input_data;
      T* cur_h_out_data = batched_h_out_data;
      T* cur_c_out_data = batched_c_out_data;
      for (int i = 0; i < max_bs; ++i) {
401 402 403 404 405
        one_step.gates = cur_in_data;
        one_step.ct = cur_c_out_data;
        one_step.ht = cur_h_out_data;
        ker->ComputeC1H1(&one_step, &attr);

T
tensor-tang 已提交
406 407 408 409 410
        cur_in_data += D4;
        cur_c_out_data += D;
        cur_h_out_data += D;
      }
      tstart = 1;
T
tensor-tang 已提交
411 412
      prev_h_data = batched_h_out_data;
      prev_c_data = batched_c_out_data;
T
tensor-tang 已提交
413
    }
414 415

    // compute kernel part
T
tensor-tang 已提交
416 417
    const auto& batch_starts = batched_lod[0];
    const int max_seq_len = batch_starts.size() - 1;
T
tensor-tang 已提交
418 419 420 421
    const int offset = tstart * max_bs * D;
    batched_input_data = batched_input_data + offset * 4;
    batched_h_out_data = batched_h_out_data + offset;
    batched_c_out_data = batched_c_out_data + offset;
422 423 424 425 426 427 428 429
    for (int step = tstart; step < max_seq_len; ++step) {
      const int cur_bs = batch_starts[step + 1] - batch_starts[step];
      GEMM_WH_ADDON(cur_bs, prev_h_data, batched_input_data);
      T* cur_in_data = batched_input_data;
      T* cur_prev_c_data = prev_c_data;
      T* cur_c_out_data = batched_c_out_data;
      T* cur_h_out_data = batched_h_out_data;
      for (int i = 0; i < cur_bs; ++i) {
430 431 432 433 434 435
        one_step.gates = cur_in_data;
        one_step.ct_1 = cur_prev_c_data;
        one_step.ct = cur_c_out_data;
        one_step.ht = cur_h_out_data;
        ker->ComputeCtHt(&one_step, &attr);

436 437 438 439 440
        // move one batch
        cur_in_data += D4;
        cur_prev_c_data += D;
        cur_c_out_data += D;
        cur_h_out_data += D;
T
tensor-tang 已提交
441
      }
442 443 444 445 446 447
      // move one step
      prev_c_data = batched_c_out_data;
      prev_h_data = batched_h_out_data;
      batched_c_out_data = cur_c_out_data;
      batched_h_out_data = cur_h_out_data;
      batched_input_data = cur_in_data;
T
tensor-tang 已提交
448 449 450
    }

    math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
T
tensor-tang 已提交
451 452 453 454
    batched_h_out->set_lod(batched_lod);
    to_seq(dev_ctx, *batched_h_out, hidden_out);
    batched_c_out->set_lod(batched_lod);
    to_seq(dev_ctx, *batched_c_out, cell_out);
T
tensor-tang 已提交
455
  }
T
tensor-tang 已提交
456

T
tensor-tang 已提交
457
  void Compute(const framework::ExecutionContext& ctx) const override {
T
tensor-tang 已提交
458
    if (ctx.Attr<bool>("use_seq")) {
T
tensor-tang 已提交
459 460 461 462 463
      SeqCompute(ctx);
    } else {
      BatchCompute(ctx);
    }
  }
T
tensor-tang 已提交
464 465

#undef GEMM_WH_ADDON
466 467
#undef INIT_OTHER_DEFINES
#undef INIT_BASE_DEFINES
T
tensor-tang 已提交
468 469 470 471 472 473
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
T
tensor-tang 已提交
474
REGISTER_OPERATOR(fusion_lstm, ops::FusionLSTMOp, ops::FusionLSTMOpMaker,
T
tensor-tang 已提交
475 476
                  paddle::framework::DefaultGradOpDescMaker<true>);

T
tensor-tang 已提交
477 478
REGISTER_OP_CPU_KERNEL(fusion_lstm, ops::FuisonLSTMKernel<float>,
                       ops::FuisonLSTMKernel<double>);