lstm_op.h 15.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
D
dangqingqing 已提交
2

D
dangqingqing 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
D
dangqingqing 已提交
6

D
dangqingqing 已提交
7
http://www.apache.org/licenses/LICENSE-2.0
D
dangqingqing 已提交
8

D
dangqingqing 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dangqingqing 已提交
14 15

#pragma once
16
#include <string>
Y
Yi Wang 已提交
17 18 19 20 21
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/detail/activation_functions.h"
#include "paddle/fluid/operators/math/lstm_compute.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/sequence2batch.h"
D
dangqingqing 已提交
22 23 24 25

namespace paddle {
namespace operators {

D
dangqingqing 已提交
26 27 28
using LoDTensor = framework::LoDTensor;
using Tensor = framework::Tensor;

Q
QI JUN 已提交
29 30
template <typename DeviceContext, typename T>
inline void ReorderInitState(const DeviceContext& ctx,
D
dzhwinter 已提交
31 32
                             const framework::Tensor& src,
                             framework::Vector<size_t> index_lod,
D
dangqingqing 已提交
33
                             framework::Tensor* dst, bool indexed_src) {
Q
QI JUN 已提交
34
  math::CopyMatrixRowsFunctor<DeviceContext, T> row_shuffle;
D
dangqingqing 已提交
35
  dst->mutable_data<T>(src.dims(), ctx.GetPlace());
36
  row_shuffle(ctx, src, index_lod, dst, indexed_src);
D
dangqingqing 已提交
37 38
}

Q
QI JUN 已提交
39
template <typename DeviceContext, typename T>
D
dangqingqing 已提交
40 41
class LSTMKernel : public framework::OpKernel<T> {
 public:
D
dangqingqing 已提交
42
  void Compute(const framework::ExecutionContext& ctx) const override {
D
dangqingqing 已提交
43 44 45
    auto* input = ctx.Input<LoDTensor>("Input");
    auto* weight = ctx.Input<Tensor>("Weight");
    auto* bias = ctx.Input<Tensor>("Bias");
46

47 48 49
    auto* hidden_t0 = ctx.Input<Tensor>("H0");
    auto* cell_t0 = ctx.Input<Tensor>("C0");

D
dangqingqing 已提交
50
    auto* batch_gate = ctx.Output<LoDTensor>("BatchGate");
51
    batch_gate->mutable_data<T>(ctx.GetPlace());
D
dangqingqing 已提交
52
    auto* hidden_out = ctx.Output<LoDTensor>("Hidden");
53
    hidden_out->mutable_data<T>(ctx.GetPlace());
D
dangqingqing 已提交
54
    auto* cell_out = ctx.Output<LoDTensor>("Cell");
55 56
    cell_out->mutable_data<T>(ctx.GetPlace());

57
    bool is_reverse = ctx.Attr<bool>("is_reverse");
Q
QI JUN 已提交
58 59
    math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;
    auto& device_ctx = ctx.template device_context<DeviceContext>();
60
    to_batch(device_ctx, *input, batch_gate, true, is_reverse);
61 62

    auto in_dims = input->dims();
Y
Yu Yang 已提交
63
    int frame_size = static_cast<int>(in_dims[1] / 4);
64
    framework::DDim dims({in_dims[0], frame_size});
D
dangqingqing 已提交
65

66
    if (bias) {
67 68 69
      Tensor b = *bias;
      b.Resize({bias->numel(), 1});
      Tensor gate_bias = b.Slice(0, 4 * frame_size);
Q
QI JUN 已提交
70
      math::RowwiseAdd<DeviceContext, T> add_bias;
71
      add_bias(device_ctx, *batch_gate, gate_bias, batch_gate);
72 73 74
    }

    math::LstmMetaValue<T> lstm_value;
D
dangqingqing 已提交
75
    if (bias && ctx.Attr<bool>("use_peepholes")) {
D
dangqingqing 已提交
76 77
      T* bias_data = const_cast<T*>(bias->data<T>());
      // the code style in LstmMetaValue will be updated later.
78

79 80 81
      lstm_value.check_ig = bias_data + 4 * frame_size;
      lstm_value.check_fg = lstm_value.check_ig + frame_size;
      lstm_value.check_og = lstm_value.check_fg + frame_size;
D
dangqingqing 已提交
82
    } else {
83 84 85
      lstm_value.check_ig = nullptr;
      lstm_value.check_fg = nullptr;
      lstm_value.check_og = nullptr;
D
dangqingqing 已提交
86
    }
87
    lstm_value.prev_state_value = nullptr;
88
    Tensor ordered_c0;
D
dzhwinter 已提交
89 90 91

    framework::Vector<size_t> order(batch_gate->lod()[2]);

92
    if (cell_t0) {
D
dangqingqing 已提交
93 94 95
      // Since the batch computing for LSTM reorders the input sequence
      // according to their length. The initialized cell state also needs
      // to reorder.
Q
QI JUN 已提交
96 97
      ReorderInitState<DeviceContext, T>(device_ctx, *cell_t0, order,
                                         &ordered_c0, true);
98
      lstm_value.prev_state_value = ordered_c0.data<T>();
99
    }
100

D
dangqingqing 已提交
101 102
    // Use the local variable as here.
    LoDTensor batch_hidden, batch_cell;
103
    auto* batch_cell_pre_act = ctx.Output<LoDTensor>("BatchCellPreAct");
D
dangqingqing 已提交
104
    batch_hidden.mutable_data<T>(dims, ctx.GetPlace());
105
    batch_cell.mutable_data<T>(dims, ctx.GetPlace());
106
    batch_cell_pre_act->mutable_data<T>(dims, ctx.GetPlace());
107

D
dangqingqing 已提交
108
    auto batch_starts = batch_gate->lod()[0];
Y
Yu Yang 已提交
109
    size_t num_batch = batch_starts.size() - 1;
110 111 112 113 114 115
    auto gate_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("gate_activation"));
    auto cell_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("cell_activation"));
    auto cand_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("candidate_activation"));
116

Y
Yu Yang 已提交
117 118 119
    for (size_t n = 0; n < num_batch; n++) {
      int bstart = static_cast<int>(batch_starts[n]);
      int bend = static_cast<int>(batch_starts[n + 1]);
120

D
dangqingqing 已提交
121
      Tensor gate_t = batch_gate->Slice(bstart, bend);
D
dangqingqing 已提交
122
      Tensor out_t = batch_hidden.Slice(bstart, bend);
D
dangqingqing 已提交
123
      Tensor cell_t = batch_cell.Slice(bstart, bend);
124
      Tensor cell_pre_act_t = batch_cell_pre_act->Slice(bstart, bend);
125 126 127

      int cur_batch_size = bend - bstart;

128
      if (n > 0) {
Y
Yu Yang 已提交
129
        int pre_h_start = static_cast<int>(batch_starts[n - 1]);
D
dangqingqing 已提交
130
        int pre_h_end = pre_h_start + cur_batch_size;
D
dangqingqing 已提交
131
        auto pre_hidden_t = batch_hidden.Slice(pre_h_start, pre_h_end);
Q
QI JUN 已提交
132 133 134
        math::matmul<DeviceContext, T>(device_ctx, pre_hidden_t, false, *weight,
                                       false, static_cast<T>(1.0), &gate_t,
                                       static_cast<T>(1.0));
135
      } else if (hidden_t0) {
D
dangqingqing 已提交
136 137 138 139 140 141 142
        // If n == 0 and there is no initialized hidden state, that is to say
        // the H0 is zeros, the calculation W_h * H0 will be skiped.
        // If n == 0 and there is initialized hidden state, calculate W_h * H0.

        // Since the batch computing for LSTM reorders the input sequence
        // according to their length. The initialized hidden state also needs
        // to reorder.
143
        Tensor ordered_h0;
Q
QI JUN 已提交
144 145 146 147 148
        ReorderInitState<DeviceContext, T>(device_ctx, *hidden_t0, order,
                                           &ordered_h0, true);
        math::matmul<DeviceContext, T>(device_ctx, ordered_h0, false, *weight,
                                       false, static_cast<T>(1.0), &gate_t,
                                       static_cast<T>(1.0));
149 150
      }

151 152 153 154
      lstm_value.gate_value = gate_t.data<T>();
      lstm_value.output_value = out_t.data<T>();
      lstm_value.state_value = cell_t.data<T>();
      lstm_value.state_active_value = cell_pre_act_t.data<T>();
Q
QI JUN 已提交
155 156 157
      math::LstmUnitFunctor<DeviceContext, T>::compute(
          device_ctx, lstm_value, frame_size, cur_batch_size, gate_act,
          cell_act, cand_act);
158
      lstm_value.prev_state_value = lstm_value.state_value;
D
dangqingqing 已提交
159
    }
160

Q
QI JUN 已提交
161
    math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
D
dangqingqing 已提交
162
    batch_hidden.set_lod(batch_gate->lod());
163
    // restore the output hidden in LoDTensor from the batch hidden
164
    to_seq(device_ctx, batch_hidden, hidden_out);
165

166
    batch_cell.set_lod(batch_gate->lod());
167
    // restore the output cell state in LoDTensor from the batch cell
168
    to_seq(device_ctx, batch_cell, cell_out);
D
dangqingqing 已提交
169
  }
D
dangqingqing 已提交
170 171
};

Q
QI JUN 已提交
172
template <typename DeviceContext, typename T>
D
dangqingqing 已提交
173 174
class LSTMGradKernel : public framework::OpKernel<T> {
 public:
D
dangqingqing 已提交
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto* input = ctx.Input<LoDTensor>("Input");
    auto* weight = ctx.Input<Tensor>("Weight");
    auto* bias = ctx.Input<Tensor>("Bias");

    auto* hidden_out = ctx.Input<LoDTensor>("Hidden");
    auto* cell_out = ctx.Input<LoDTensor>("Cell");

    auto* batch_gate = ctx.Input<LoDTensor>("BatchGate");
    auto* batch_cell_pre_act = ctx.Input<LoDTensor>("BatchCellPreAct");

    auto* hidden_g = ctx.Input<LoDTensor>(framework::GradVarName("Hidden"));

    auto* in_g = ctx.Output<LoDTensor>(framework::GradVarName("Input"));
    auto* weight_g = ctx.Output<Tensor>(framework::GradVarName("Weight"));
    auto* bias_g = ctx.Output<Tensor>(framework::GradVarName("Bias"));

192 193 194 195 196 197
    auto* h0 = ctx.Input<Tensor>("H0");
    auto* c0 = ctx.Input<Tensor>("C0");

    auto* h0_g = ctx.Output<Tensor>(framework::GradVarName("H0"));
    auto* c0_g = ctx.Output<Tensor>(framework::GradVarName("C0"));

Q
QI JUN 已提交
198 199
    auto& device_ctx = ctx.template device_context<DeviceContext>();
    math::SetConstant<DeviceContext, T> zero;
D
dangqingqing 已提交
200
    if (weight_g) {
201
      weight_g->mutable_data<T>(ctx.GetPlace());
D
dangqingqing 已提交
202 203 204
      zero(device_ctx, weight_g, static_cast<T>(0.0));
    }

D
dangqingqing 已提交
205 206 207
    // ordered_h0/c0 is the reordered hidden/cell initialization.
    // ordered_h0_g/c0_g is the reordered gradient of hidden/cell
    // initialization.
208
    Tensor ordered_h0, ordered_c0, ordered_h0_g, ordered_c0_g;
D
dzhwinter 已提交
209 210
    framework::Vector<size_t> order(batch_gate->lod()[2]);

211
    if (c0) {
Q
QI JUN 已提交
212 213
      ReorderInitState<DeviceContext, T>(device_ctx, *c0, order, &ordered_c0,
                                         true);
D
dangqingqing 已提交
214 215 216
    }
    if (c0 && c0_g) {
      ordered_c0_g.mutable_data<T>(c0_g->dims(), ctx.GetPlace());
217 218
    }

D
dangqingqing 已提交
219 220 221 222 223 224
    auto in_dims = input->dims();
    auto out_dims = hidden_g->dims();
    int frame_size = static_cast<int>(in_dims[1] / 4);
    PADDLE_ENFORCE_EQ(frame_size, out_dims[1]);

    math::LstmMetaValue<T> lstm_value;
D
dangqingqing 已提交
225
    if (bias && ctx.Attr<bool>("use_peepholes")) {
D
dangqingqing 已提交
226
      T* bias_data = const_cast<T*>(bias->data<T>());
227 228 229
      lstm_value.check_ig = bias_data + 4 * frame_size;
      lstm_value.check_fg = lstm_value.check_ig + frame_size;
      lstm_value.check_og = lstm_value.check_fg + frame_size;
D
dangqingqing 已提交
230
    } else {
231 232 233
      lstm_value.check_ig = nullptr;
      lstm_value.check_fg = nullptr;
      lstm_value.check_og = nullptr;
D
dangqingqing 已提交
234 235 236
    }

    math::LstmMetaGrad<T> lstm_grad;
D
dangqingqing 已提交
237

D
dangqingqing 已提交
238
    if (bias && bias_g) {
D
dangqingqing 已提交
239
      bias_g->mutable_data<T>(ctx.GetPlace());
240
      zero(device_ctx, bias_g, static_cast<T>(0.0));
D
dangqingqing 已提交
241 242 243
    }
    if (bias && bias_g && ctx.Attr<bool>("use_peepholes")) {
      T* bias_g_data = bias_g->data<T>();
244 245 246
      lstm_grad.check_ig_grad = bias_g_data + 4 * frame_size;
      lstm_grad.check_fg_grad = lstm_grad.check_ig_grad + frame_size;
      lstm_grad.check_og_grad = lstm_grad.check_fg_grad + frame_size;
D
dangqingqing 已提交
247
    } else {
248 249 250
      lstm_grad.check_ig_grad = nullptr;
      lstm_grad.check_fg_grad = nullptr;
      lstm_grad.check_og_grad = nullptr;
D
dangqingqing 已提交
251 252
    }

Q
QI JUN 已提交
253
    math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;
D
dangqingqing 已提交
254

D
dangqingqing 已提交
255
    auto ToBatch = [&batch_gate, &to_batch](
Q
QI JUN 已提交
256
        const DeviceContext& ctx, const framework::LoDTensor& src,
D
dangqingqing 已提交
257 258 259
        const framework::DDim& dims, framework::LoDTensor& dst) {
      dst.mutable_data<T>(dims, ctx.GetPlace());
      dst.set_lod(batch_gate->lod());
260
      to_batch(ctx, src, &dst, false);
D
dangqingqing 已提交
261
    };
D
dangqingqing 已提交
262

D
dangqingqing 已提交
263 264 265 266
    LoDTensor batch_hidden, batch_hidden_g, batch_cell;
    ToBatch(device_ctx, *hidden_out, out_dims, batch_hidden);
    ToBatch(device_ctx, *hidden_g, out_dims, batch_hidden_g);
    ToBatch(device_ctx, *cell_out, out_dims, batch_cell);
D
dangqingqing 已提交
267

D
dangqingqing 已提交
268
    LoDTensor batch_cell_g, batch_gate_g;
D
dangqingqing 已提交
269
    batch_cell_g.mutable_data<T>(out_dims, ctx.GetPlace());
270
    // TODO(qingqing) support the case output cell has gradient.
D
dangqingqing 已提交
271
    // to_batch(device_ctx, *cell_g, batch_cell_g, false);
272
    zero(device_ctx, &batch_cell_g, static_cast<T>(0.0));
D
dangqingqing 已提交
273 274 275
    batch_gate_g.mutable_data<T>(batch_gate->dims(), ctx.GetPlace());
    batch_gate_g.set_lod(batch_gate->lod());

276 277 278 279 280 281
    auto gate_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("gate_activation"));
    auto cell_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("cell_activation"));
    auto cand_act = math::detail::GetActivationType(
        ctx.Attr<std::string>("candidate_activation"));
D
dangqingqing 已提交
282 283 284

    auto batch_starts = batch_gate->lod()[0];
    size_t num_batch = batch_starts.size() - 1;
285
    for (int n = static_cast<int>(num_batch) - 1; n >= 0; n--) {
D
dangqingqing 已提交
286 287 288 289 290 291
      int bstart = static_cast<int>(batch_starts[n]);
      int bend = static_cast<int>(batch_starts[n + 1]);

      Tensor gate = batch_gate->Slice(bstart, bend);
      Tensor cell = batch_cell.Slice(bstart, bend);
      Tensor cell_pre_act = batch_cell_pre_act->Slice(bstart, bend);
292 293 294
      lstm_value.gate_value = gate.data<T>();
      lstm_value.state_value = cell.data<T>();
      lstm_value.state_active_value = cell_pre_act.data<T>();
D
dangqingqing 已提交
295 296 297 298

      Tensor out_g = batch_hidden_g.Slice(bstart, bend);
      Tensor gate_g = batch_gate_g.Slice(bstart, bend);
      Tensor cell_g = batch_cell_g.Slice(bstart, bend);
299 300 301
      lstm_grad.state_grad = cell_g.data<T>();
      lstm_grad.gate_grad = gate_g.data<T>();
      lstm_grad.output_grad = out_g.data<T>();
D
dangqingqing 已提交
302

303
      if (n > 0) {
D
dangqingqing 已提交
304 305 306
        int bstart_pre = static_cast<int>(batch_starts[n - 1]);
        Tensor cell_pre = batch_cell.Slice(bstart_pre, bstart);
        Tensor cell_pre_g = batch_cell_g.Slice(bstart_pre, bstart);
307 308
        lstm_value.prev_state_value = cell_pre.data<T>();
        lstm_grad.prev_state_grad = cell_pre_g.data<T>();
D
dangqingqing 已提交
309
      } else {
310 311
        lstm_value.prev_state_value = c0 ? ordered_c0.data<T>() : nullptr;
        lstm_grad.prev_state_grad = c0_g ? ordered_c0_g.data<T>() : nullptr;
D
dangqingqing 已提交
312 313 314
      }

      int cur_batch_size = bend - bstart;
Q
QI JUN 已提交
315
      math::LstmUnitGradFunctor<DeviceContext, T>::compute(
D
dangqingqing 已提交
316 317 318
          device_ctx, lstm_value, lstm_grad, frame_size, cur_batch_size,
          gate_act, cell_act, cand_act);

319
      if (n > 0) {
D
dangqingqing 已提交
320 321 322
        int pre_h_start = static_cast<int>(batch_starts[n - 1]);
        int pre_h_end = pre_h_start + cur_batch_size;
        auto pre_hidden_g = batch_hidden_g.Slice(pre_h_start, pre_h_end);
Q
QI JUN 已提交
323 324 325
        math::matmul<DeviceContext, T>(device_ctx, gate_g, false, *weight, true,
                                       static_cast<T>(1.0), &pre_hidden_g,
                                       static_cast<T>(1.0));
D
dangqingqing 已提交
326 327 328
        if (weight_g) {
          /* backward weight */
          auto pre_hidden = batch_hidden.Slice(pre_h_start, pre_h_end);
Q
QI JUN 已提交
329 330 331
          math::matmul<DeviceContext, T>(device_ctx, pre_hidden, true, gate_g,
                                         false, static_cast<T>(1.0), weight_g,
                                         static_cast<T>(1.0));
D
dangqingqing 已提交
332
        }
333 334
      } else {
        if (h0 && weight_g) {
Q
QI JUN 已提交
335 336 337 338 339
          ReorderInitState<DeviceContext, T>(device_ctx, *h0, order,
                                             &ordered_h0, true);
          math::matmul<DeviceContext, T>(device_ctx, ordered_h0, true, gate_g,
                                         false, static_cast<T>(1.0), weight_g,
                                         static_cast<T>(1.0));
340 341 342
        }
        if (h0 && h0_g) {
          ordered_h0_g.mutable_data<T>(h0_g->dims(), ctx.GetPlace());
Q
QI JUN 已提交
343 344 345
          math::matmul<DeviceContext, T>(device_ctx, gate_g, false, *weight,
                                         true, static_cast<T>(1.0),
                                         &ordered_h0_g, static_cast<T>(0.0));
346
        }
D
dangqingqing 已提交
347 348 349
      }
    }

Q
QI JUN 已提交
350
    math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
D
dangqingqing 已提交
351 352
    if (in_g) {
      /* backward data */
353
      in_g->mutable_data<T>(ctx.GetPlace());
354
      to_seq(device_ctx, batch_gate_g, in_g);
D
dangqingqing 已提交
355 356 357
    }
    if (bias && bias_g) {
      /* backward bias */
358 359 360
      Tensor b_g = *bias_g;
      b_g.Resize({bias_g->numel(), 1});
      Tensor gate_bias_g = b_g.Slice(0, 4 * frame_size);
Q
QI JUN 已提交
361
      math::ColwiseSum<DeviceContext, T> col_sum;
362
      col_sum(device_ctx, batch_gate_g, &gate_bias_g);
D
dangqingqing 已提交
363
    }
364 365

    if (h0 && h0_g) {
Q
QI JUN 已提交
366 367
      ReorderInitState<DeviceContext, T>(device_ctx, ordered_h0_g, order, h0_g,
                                         false);
368 369
    }
    if (c0 && c0_g) {
Q
QI JUN 已提交
370 371
      ReorderInitState<DeviceContext, T>(device_ctx, ordered_c0_g, order, c0_g,
                                         false);
372
    }
D
dangqingqing 已提交
373
  }
D
dangqingqing 已提交
374 375 376 377
};

}  // namespace operators
}  // namespace paddle