lstmp_op.h 20.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

Y
Yibing Liu 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14 15

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
16
#include <string>
Y
Yu Yang 已提交
17 18
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
Y
Yi Wang 已提交
19
#include "paddle/fluid/operators/activation_op.h"
Y
Yu Yang 已提交
20
#include "paddle/fluid/operators/math/blas.h"
Y
Yi Wang 已提交
21 22 23
#include "paddle/fluid/operators/math/detail/activation_functions.h"
#include "paddle/fluid/operators/math/lstm_compute.h"
#include "paddle/fluid/operators/math/sequence2batch.h"
24 25 26 27 28 29 30

namespace paddle {
namespace operators {

using LoDTensor = framework::LoDTensor;
using Tensor = framework::Tensor;

31 32 33 34
template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;

35 36
template <typename DeviceContext, typename T>
inline void ReorderInitState(const DeviceContext& ctx,
D
dzhwinter 已提交
37 38
                             const framework::Tensor& src,
                             framework::Vector<size_t> index,
39 40 41
                             framework::Tensor* dst, bool indexed_src) {
  math::CopyMatrixRowsFunctor<DeviceContext, T> row_shuffle;
  dst->mutable_data<T>(src.dims(), ctx.GetPlace());
42
  row_shuffle(ctx, src, index, dst, indexed_src);
43 44 45 46 47
}

template <typename DeviceContext, typename T>
class LSTMPKernel : public framework::OpKernel<T> {
 public:
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
  template <typename Device, typename X, typename Y>
  void ActCompute(const math::detail::ActivationType act_type, const Device& d,
                  X x, Y y) const {
    if (act_type == math::detail::ActivationType::kIdentity)
      y.device(d) = x;
    else if (act_type == math::detail::ActivationType::kSigmoid)
      SigmoidFunctor<T>()(d, x, y);
    else if (act_type == math::detail::ActivationType::kTanh)
      TanhFunctor<T>()(d, x, y);
    else if (act_type == math::detail::ActivationType::kReLU)
      ReluFunctor<T>()(d, x, y);
    else
      PADDLE_THROW("unsupported activation type");
  }

63 64 65 66 67 68 69
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto* input = ctx.Input<LoDTensor>("Input");
    auto* weight = ctx.Input<Tensor>("Weight");
    auto* proj_weight = ctx.Input<Tensor>("ProjWeight");
    auto* bias = ctx.Input<Tensor>("Bias");

    auto* hidden_t0 = ctx.Input<Tensor>("H0");
70
    auto* ordered_proj0 = ctx.Output<Tensor>("OrderedP0");
71 72 73 74 75 76 77 78 79 80 81 82
    auto* cell_t0 = ctx.Input<Tensor>("C0");

    auto* batch_gate = ctx.Output<LoDTensor>("BatchGate");
    batch_gate->mutable_data<T>(ctx.GetPlace());
    auto* proj_out = ctx.Output<LoDTensor>("Projection");
    proj_out->mutable_data<T>(ctx.GetPlace());
    auto* cell_out = ctx.Output<LoDTensor>("Cell");
    cell_out->mutable_data<T>(ctx.GetPlace());

    bool is_reverse = ctx.Attr<bool>("is_reverse");
    math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;
    auto& device_ctx = ctx.template device_context<DeviceContext>();
83
    to_batch(device_ctx, *input, batch_gate, true, is_reverse);
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112

    auto in_dims = input->dims();
    int frame_size = static_cast<int>(in_dims[1] / 4);
    framework::DDim dims({in_dims[0], frame_size});
    framework::DDim proj_dims({in_dims[0], proj_weight->dims()[1]});

    if (bias) {
      Tensor b = *bias;
      b.Resize({bias->numel(), 1});
      Tensor gate_bias = b.Slice(0, 4 * frame_size);
      math::RowwiseAdd<DeviceContext, T> add_bias;
      add_bias(device_ctx, *batch_gate, gate_bias, batch_gate);
    }

    math::LstmMetaValue<T> lstmp_value;
    if (bias && ctx.Attr<bool>("use_peepholes")) {
      T* bias_data = const_cast<T*>(bias->data<T>());
      // the code style in LstmpMetaValue will be updated later.

      lstmp_value.check_ig = bias_data + 4 * frame_size;
      lstmp_value.check_fg = lstmp_value.check_ig + frame_size;
      lstmp_value.check_og = lstmp_value.check_fg + frame_size;
    } else {
      lstmp_value.check_ig = nullptr;
      lstmp_value.check_fg = nullptr;
      lstmp_value.check_og = nullptr;
    }
    lstmp_value.prev_state_value = nullptr;
    Tensor ordered_c0;
D
dzhwinter 已提交
113 114 115

    framework::Vector<size_t> order(batch_gate->lod()[2]);

116 117 118 119 120 121 122 123 124 125
    if (cell_t0) {
      // Since the batch computing for LSTMP reorders the input sequence
      // according to their length. The initialized cell state also needs
      // to reorder.
      ReorderInitState<DeviceContext, T>(device_ctx, *cell_t0, order,
                                         &ordered_c0, true);
      lstmp_value.prev_state_value = ordered_c0.data<T>();
    }

    // Use the local variable as here.
126
    LoDTensor batch_proj, batch_cell;
127
    auto* batch_cell_pre_act = ctx.Output<LoDTensor>("BatchCellPreAct");
128 129 130
    batch_cell_pre_act->mutable_data<T>(dims, ctx.GetPlace());
    auto* batch_hidden = ctx.Output<LoDTensor>("BatchHidden");
    batch_hidden->mutable_data<T>(dims, ctx.GetPlace());    // T x D
131 132 133 134 135 136 137 138 139 140 141
    batch_proj.mutable_data<T>(proj_dims, ctx.GetPlace());  // T x P
    batch_cell.mutable_data<T>(dims, ctx.GetPlace());       // T x D

    auto batch_starts = batch_gate->lod()[0];
    size_t num_batch = batch_starts.size() - 1;
    auto gate_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("gate_activation"));
    auto cell_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("cell_activation"));
    auto cand_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("candidate_activation"));
142 143
    auto proj_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("proj_activation"));
144
    auto& place = *ctx.template device_context<DeviceContext>().eigen_device();
Y
Yu Yang 已提交
145
    auto blas = math::GetBlas<DeviceContext, T>(device_ctx);
146 147 148 149 150
    for (size_t n = 0; n < num_batch; n++) {
      int bstart = static_cast<int>(batch_starts[n]);
      int bend = static_cast<int>(batch_starts[n + 1]);

      Tensor gate_t = batch_gate->Slice(bstart, bend);
151
      Tensor hidden_t = batch_hidden->Slice(bstart, bend);
152 153 154 155 156 157 158 159 160 161
      Tensor proj_t = batch_proj.Slice(bstart, bend);
      Tensor cell_t = batch_cell.Slice(bstart, bend);
      Tensor cell_pre_act_t = batch_cell_pre_act->Slice(bstart, bend);

      int cur_batch_size = bend - bstart;

      if (n > 0) {
        int pre_h_start = static_cast<int>(batch_starts[n - 1]);
        int pre_h_end = pre_h_start + cur_batch_size;
        auto pre_proj_t = batch_proj.Slice(pre_h_start, pre_h_end);
Y
Yu Yang 已提交
162 163
        blas.MatMul(pre_proj_t, false, *weight, false, static_cast<T>(1.0),
                    &gate_t, static_cast<T>(1.0));
164 165 166 167 168 169 170 171
      } else if (hidden_t0) {
        // If n == 0 and there is no initialized hidden state, that is to say
        // the H0 is zeros, the calculation W_h * H0 will be skiped.
        // If n == 0 and there is initialized hidden state, calculate W_h * H0.

        // Since the batch computing for LSTMP reorders the input sequence
        // according to their length. The initialized hidden state also needs
        // to reorder.
172 173 174

        Tensor ordered_h0;
        ordered_proj0->mutable_data<T>(ctx.GetPlace());
175 176
        ReorderInitState<DeviceContext, T>(device_ctx, *hidden_t0, order,
                                           &ordered_h0, true);
Y
Yu Yang 已提交
177 178
        blas.MatMul(ordered_h0, false, *proj_weight, false, static_cast<T>(1.0),
                    ordered_proj0, static_cast<T>(0.0));
179
        if (proj_act != math::detail::ActivationType::kIdentity) {
180 181 182
          auto proj0_dev = EigenMatrix<T>::From(*ordered_proj0);
          ActCompute(cell_act, place, proj0_dev, proj0_dev);
        }
Y
Yu Yang 已提交
183 184
        blas.MatMul(*ordered_proj0, false, *weight, false, static_cast<T>(1.0),
                    &gate_t, static_cast<T>(1.0));
185 186 187 188 189 190 191 192 193 194
      }

      lstmp_value.gate_value = gate_t.data<T>();
      lstmp_value.output_value = hidden_t.data<T>();
      lstmp_value.state_value = cell_t.data<T>();
      lstmp_value.state_active_value = cell_pre_act_t.data<T>();
      math::LstmUnitFunctor<DeviceContext, T>::compute(
          device_ctx, lstmp_value, frame_size, cur_batch_size, gate_act,
          cell_act, cand_act);
      lstmp_value.prev_state_value = lstmp_value.state_value;
Y
Yu Yang 已提交
195 196
      blas.MatMul(hidden_t, false, *proj_weight, false, static_cast<T>(1.0),
                  &proj_t, static_cast<T>(0.0));
197
      if (proj_act != math::detail::ActivationType::kIdentity) {
198 199 200
        auto proj_t_dev = EigenMatrix<T>::From(proj_t);
        ActCompute(cell_act, place, proj_t_dev, proj_t_dev);
      }
201 202 203 204 205
    }

    math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
    batch_proj.set_lod(batch_gate->lod());
    // restore the output hidden in LoDTensor from the batch hidden
206
    to_seq(device_ctx, batch_proj, proj_out);
207 208 209

    batch_cell.set_lod(batch_gate->lod());
    // restore the output cell state in LoDTensor from the batch cell
210
    to_seq(device_ctx, batch_cell, cell_out);
211 212 213 214 215 216
  }
};

template <typename DeviceContext, typename T>
class LSTMPGradKernel : public framework::OpKernel<T> {
 public:
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
  template <typename Device, typename X, typename Y, typename DX, typename DY>
  void ActGradCompute(const math::detail::ActivationType act_type,
                      const Device& d, X x, Y y, DX dx, DY dy) const {
    // x is dummy and won't be used even in Relu(use y instead)
    if (act_type == math::detail::ActivationType::kIdentity)
      dx.device(d) = dy;
    else if (act_type == math::detail::ActivationType::kSigmoid)
      SigmoidGradFunctor<T>()(d, x, y, dy, dx);
    else if (act_type == math::detail::ActivationType::kTanh)
      TanhGradFunctor<T>()(d, x, y, dy, dx);
    else if (act_type == math::detail::ActivationType::kReLU)
      ReluGradFunctor<T>()(d, x, y, dy, dx);
    else
      PADDLE_THROW("unsupported activation type");
  }

233 234 235
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto* input = ctx.Input<LoDTensor>("Input");
    auto* weight = ctx.Input<Tensor>("Weight");
236
    auto* proj_weight = ctx.Input<Tensor>("ProjWeight");
237 238 239 240 241 242 243
    auto* bias = ctx.Input<Tensor>("Bias");

    auto* proj_out = ctx.Input<LoDTensor>("Projection");
    auto* cell_out = ctx.Input<LoDTensor>("Cell");

    auto* batch_gate = ctx.Input<LoDTensor>("BatchGate");
    auto* batch_cell_pre_act = ctx.Input<LoDTensor>("BatchCellPreAct");
244
    auto* batch_hidden = ctx.Input<LoDTensor>("BatchHidden");
245

246 247
    auto* projection_g =
        ctx.Input<LoDTensor>(framework::GradVarName("Projection"));
248 249 250

    auto* in_g = ctx.Output<LoDTensor>(framework::GradVarName("Input"));
    auto* weight_g = ctx.Output<Tensor>(framework::GradVarName("Weight"));
251 252
    auto* proj_weight_g =
        ctx.Output<Tensor>(framework::GradVarName("ProjWeight"));
253 254 255
    auto* bias_g = ctx.Output<Tensor>(framework::GradVarName("Bias"));

    auto* h0 = ctx.Input<Tensor>("H0");
256
    auto* ordered_proj0 = ctx.Input<Tensor>("OrderedP0");
257 258 259 260 261 262 263 264 265 266 267
    auto* c0 = ctx.Input<Tensor>("C0");

    auto* h0_g = ctx.Output<Tensor>(framework::GradVarName("H0"));
    auto* c0_g = ctx.Output<Tensor>(framework::GradVarName("C0"));

    auto& device_ctx = ctx.template device_context<DeviceContext>();
    math::SetConstant<DeviceContext, T> zero;
    if (weight_g) {
      weight_g->mutable_data<T>(ctx.GetPlace());
      zero(device_ctx, weight_g, static_cast<T>(0.0));
    }
268 269 270 271
    if (proj_weight_g) {
      proj_weight_g->mutable_data<T>(ctx.GetPlace());
      zero(device_ctx, proj_weight_g, static_cast<T>(0.0));
    }
272 273 274 275 276

    // ordered_h0/c0 is the reordered hidden/cell initialization.
    // ordered_h0_g/c0_g is the reordered gradient of hidden/cell
    // initialization.
    Tensor ordered_h0, ordered_c0, ordered_h0_g, ordered_c0_g;
D
dzhwinter 已提交
277 278 279

    framework::Vector<size_t> order(batch_gate->lod()[2]);

280 281 282 283 284 285 286 287 288
    if (c0) {
      ReorderInitState<DeviceContext, T>(device_ctx, *c0, order, &ordered_c0,
                                         true);
    }
    if (c0 && c0_g) {
      ordered_c0_g.mutable_data<T>(c0_g->dims(), ctx.GetPlace());
    }

    auto in_dims = input->dims();
289 290
    auto out_dims = cell_out->dims();
    framework::DDim proj_dims({in_dims[0], proj_weight->dims()[1]});
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
    int frame_size = static_cast<int>(in_dims[1] / 4);
    PADDLE_ENFORCE_EQ(frame_size, out_dims[1]);

    math::LstmMetaValue<T> lstmp_value;
    if (bias && ctx.Attr<bool>("use_peepholes")) {
      T* bias_data = const_cast<T*>(bias->data<T>());
      lstmp_value.check_ig = bias_data + 4 * frame_size;
      lstmp_value.check_fg = lstmp_value.check_ig + frame_size;
      lstmp_value.check_og = lstmp_value.check_fg + frame_size;
    } else {
      lstmp_value.check_ig = nullptr;
      lstmp_value.check_fg = nullptr;
      lstmp_value.check_og = nullptr;
    }

    math::LstmMetaGrad<T> lstmp_grad;

    if (bias && bias_g) {
      bias_g->mutable_data<T>(ctx.GetPlace());
      zero(device_ctx, bias_g, static_cast<T>(0.0));
    }
    if (bias && bias_g && ctx.Attr<bool>("use_peepholes")) {
      T* bias_g_data = bias_g->data<T>();
      lstmp_grad.check_ig_grad = bias_g_data + 4 * frame_size;
      lstmp_grad.check_fg_grad = lstmp_grad.check_ig_grad + frame_size;
      lstmp_grad.check_og_grad = lstmp_grad.check_fg_grad + frame_size;
    } else {
      lstmp_grad.check_ig_grad = nullptr;
      lstmp_grad.check_fg_grad = nullptr;
      lstmp_grad.check_og_grad = nullptr;
    }

    math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;

    auto ToBatch = [&batch_gate, &to_batch](
        const DeviceContext& ctx, const framework::LoDTensor& src,
        const framework::DDim& dims, framework::LoDTensor& dst) {
      dst.mutable_data<T>(dims, ctx.GetPlace());
      dst.set_lod(batch_gate->lod());
330
      to_batch(ctx, src, &dst, false);
331 332
    };

333 334 335 336 337
    LoDTensor batch_hidden_g, batch_proj, batch_proj_g, batch_cell;
    batch_hidden_g.mutable_data<T>(out_dims, ctx.GetPlace());
    ToBatch(device_ctx, *proj_out, proj_dims, batch_proj);        // T x P
    ToBatch(device_ctx, *projection_g, proj_dims, batch_proj_g);  // T x P
    ToBatch(device_ctx, *cell_out, out_dims, batch_cell);         // T x D
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352

    LoDTensor batch_cell_g, batch_gate_g;
    batch_cell_g.mutable_data<T>(out_dims, ctx.GetPlace());
    // TODO(qingqing) support the case output cell has gradient.
    // to_batch(device_ctx, *cell_g, batch_cell_g, false);
    zero(device_ctx, &batch_cell_g, static_cast<T>(0.0));
    batch_gate_g.mutable_data<T>(batch_gate->dims(), ctx.GetPlace());
    batch_gate_g.set_lod(batch_gate->lod());

    auto gate_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("gate_activation"));
    auto cell_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("cell_activation"));
    auto cand_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("candidate_activation"));
353 354
    auto proj_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("proj_activation"));
355
    auto& place = *ctx.template device_context<DeviceContext>().eigen_device();
356 357 358

    auto batch_starts = batch_gate->lod()[0];
    size_t num_batch = batch_starts.size() - 1;
Y
Yu Yang 已提交
359
    auto blas = math::GetBlas<DeviceContext, T>(device_ctx);
360 361 362 363
    for (int n = static_cast<int>(num_batch) - 1; n >= 0; n--) {
      int bstart = static_cast<int>(batch_starts[n]);
      int bend = static_cast<int>(batch_starts[n + 1]);

364 365
      Tensor cur_proj = batch_proj.Slice(bstart, bend);
      Tensor proj_g = batch_proj_g.Slice(bstart, bend);
366
      if (proj_act != math::detail::ActivationType::kIdentity) {
367 368 369 370 371
        auto cur_proj_dev = EigenMatrix<T>::From(cur_proj);
        auto proj_g_dev = EigenMatrix<T>::From(proj_g);
        ActGradCompute(cell_act, place, cur_proj_dev, cur_proj_dev, proj_g_dev,
                       proj_g_dev);
      }
372
      /* hidden state backwarad */
373
      Tensor out_g = batch_hidden_g.Slice(bstart, bend);
Y
Yu Yang 已提交
374 375
      blas.MatMul(proj_g, false, *proj_weight, true, static_cast<T>(1.0),
                  &out_g, static_cast<T>(0.0));
376 377 378
      /* projection weight backward*/
      if (proj_weight_g) {
        Tensor hidden_t = batch_hidden->Slice(bstart, bend);
Y
Yu Yang 已提交
379 380
        blas.MatMul(hidden_t, true, proj_g, false, static_cast<T>(1.0),
                    proj_weight_g, static_cast<T>(1.0));
381
      }
382

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
      Tensor gate = batch_gate->Slice(bstart, bend);
      Tensor cell = batch_cell.Slice(bstart, bend);
      Tensor cell_pre_act = batch_cell_pre_act->Slice(bstart, bend);
      lstmp_value.gate_value = gate.data<T>();
      lstmp_value.state_value = cell.data<T>();
      lstmp_value.state_active_value = cell_pre_act.data<T>();

      Tensor gate_g = batch_gate_g.Slice(bstart, bend);
      Tensor cell_g = batch_cell_g.Slice(bstart, bend);
      lstmp_grad.state_grad = cell_g.data<T>();
      lstmp_grad.gate_grad = gate_g.data<T>();
      lstmp_grad.output_grad = out_g.data<T>();

      if (n > 0) {
        int bstart_pre = static_cast<int>(batch_starts[n - 1]);
        Tensor cell_pre = batch_cell.Slice(bstart_pre, bstart);
        Tensor cell_pre_g = batch_cell_g.Slice(bstart_pre, bstart);
        lstmp_value.prev_state_value = cell_pre.data<T>();
        lstmp_grad.prev_state_grad = cell_pre_g.data<T>();
      } else {
        lstmp_value.prev_state_value = c0 ? ordered_c0.data<T>() : nullptr;
        lstmp_grad.prev_state_grad = c0_g ? ordered_c0_g.data<T>() : nullptr;
      }

      int cur_batch_size = bend - bstart;
L
liuhongyu 已提交
408 409 410 411 412
      // lstm_value.output_value not used in bp, set to null
      // lstm_grad.state_active_grad not used in bp, set to null
      lstm_value.output_value = nullptr;
      lstm_grad.state_active_grad = nullptr;

413 414 415 416 417 418 419 420
      math::LstmUnitGradFunctor<DeviceContext, T>::compute(
          device_ctx, lstmp_value, lstmp_grad, frame_size, cur_batch_size,
          gate_act, cell_act, cand_act);

      if (n > 0) {
        int pre_h_start = static_cast<int>(batch_starts[n - 1]);
        int pre_h_end = pre_h_start + cur_batch_size;
        auto pre_proj_g = batch_proj_g.Slice(pre_h_start, pre_h_end);
Y
Yu Yang 已提交
421 422
        blas.MatMul(gate_g, false, *weight, true, static_cast<T>(1.0),
                    &pre_proj_g, static_cast<T>(1.0));
423
        if (weight_g) {
424
          /* weight backward*/
425
          auto pre_proj = batch_proj.Slice(pre_h_start, pre_h_end);
Y
Yu Yang 已提交
426 427
          blas.MatMul(pre_proj, true, gate_g, false, static_cast<T>(1.0),
                      weight_g, static_cast<T>(1.0));
428 429 430 431 432
        }
      } else {
        if (h0 && weight_g) {
          ReorderInitState<DeviceContext, T>(device_ctx, *h0, order,
                                             &ordered_h0, true);
433
          if (weight_g) {
Y
Yu Yang 已提交
434 435
            blas.MatMul(*ordered_proj0, true, gate_g, false,
                        static_cast<T>(1.0), weight_g, static_cast<T>(1.0));
436
          }
437
        }
438
        if (h0 && (h0_g || proj_weight_g)) {
439
          ordered_h0_g.mutable_data<T>(h0_g->dims(), ctx.GetPlace());
440 441 442
          Tensor proj0_g;
          proj0_g.Resize({in_dims[0], proj_weight->dims()[1]});
          proj0_g.mutable_data<T>(ctx.GetPlace());
Y
Yu Yang 已提交
443 444
          blas.MatMul(gate_g, false, *weight, true, static_cast<T>(1.0),
                      &proj0_g, static_cast<T>(0.0));
445
          if (proj_act != math::detail::ActivationType::kIdentity) {
446 447 448 449 450 451
            auto proj0_dev = EigenMatrix<T>::From(*ordered_proj0);
            auto proj0_g_dev = EigenMatrix<T>::From(proj0_g);
            ActGradCompute(cell_act, place, proj0_dev, proj0_dev, proj0_g_dev,
                           proj0_g_dev);
          }
          if (h0_g) {
Y
Yu Yang 已提交
452 453
            blas.MatMul(proj0_g, false, *proj_weight, true, static_cast<T>(1.0),
                        &ordered_h0_g, static_cast<T>(0.0));
454 455
          }
          if (proj_weight_g) {
Y
Yu Yang 已提交
456 457
            blas.MatMul(ordered_h0, true, proj0_g, false, static_cast<T>(1.0),
                        proj_weight_g, static_cast<T>(1.0));
458
          }
459 460 461 462 463 464 465 466
        }
      }
    }

    math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
    if (in_g) {
      /* backward data */
      in_g->mutable_data<T>(ctx.GetPlace());
467
      to_seq(device_ctx, batch_gate_g, in_g);
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
    }
    if (bias && bias_g) {
      /* backward bias */
      Tensor b_g = *bias_g;
      b_g.Resize({bias_g->numel(), 1});
      Tensor gate_bias_g = b_g.Slice(0, 4 * frame_size);
      math::ColwiseSum<DeviceContext, T> col_sum;
      col_sum(device_ctx, batch_gate_g, &gate_bias_g);
    }

    if (h0 && h0_g) {
      ReorderInitState<DeviceContext, T>(device_ctx, ordered_h0_g, order, h0_g,
                                         false);
    }
    if (c0 && c0_g) {
      ReorderInitState<DeviceContext, T>(device_ctx, ordered_c0_g, order, c0_g,
                                         false);
    }
  }
};

}  // namespace operators
}  // namespace paddle