fusion_gru_op.cc 16.8 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/fusion_gru_op.h"
T
tensor-tang 已提交
16
#include <cstring>  // for memcpy
T
tensor-tang 已提交
17
#include <string>
T
tensor-tang 已提交
18
#include "paddle/fluid/operators/math/blas.h"
T
tensor-tang 已提交
19
#include "paddle/fluid/operators/math/cpu_vec.h"
T
tensor-tang 已提交
20
#include "paddle/fluid/operators/math/fc_compute.h"
T
tensor-tang 已提交
21
#include "paddle/fluid/operators/math/sequence2batch.h"
T
tensor-tang 已提交
22
#include "paddle/fluid/platform/cpu_info.h"
T
tensor-tang 已提交
23

T
tensor-tang 已提交
24 25
DEFINE_bool(gru_use_seq, true, "Use sequence mode");

T
tensor-tang 已提交
26 27 28 29
namespace paddle {
namespace operators {

void FusionGRUOp::InferShape(framework::InferShapeContext* ctx) const {
T
tensor-tang 已提交
30 31 32 33 34 35 36
  PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of GRU should not be null.");
  PADDLE_ENFORCE(ctx->HasInput("WeightX"),
                 "Input(WeightX) of GRU should not be null.");
  PADDLE_ENFORCE(ctx->HasInput("WeightH"),
                 "Input(WeightH) of GRU should not be null.");

  PADDLE_ENFORCE(ctx->HasOutput("XX"), "Output(XX) of GRU should not be null.");
T
tensor-tang 已提交
37 38 39 40 41 42
  PADDLE_ENFORCE(ctx->HasOutput("ReorderedH0"),
                 "Output(ReorderedH0) of GRU should not be null.");
  PADDLE_ENFORCE(ctx->HasOutput("BatchedInput"),
                 "Output(BatchedInput) of GRU should not be null.");
  PADDLE_ENFORCE(ctx->HasOutput("BatchedOut"),
                 "Output(BatchedOut) of GRU should not be null.");
T
tensor-tang 已提交
43
  PADDLE_ENFORCE(ctx->HasOutput("Hidden"),
T
tensor-tang 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
                 "Output(Hidden) of GRU should not be null.");

  auto x_dims = ctx->GetInputDim("X");
  PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank must be 2.");

  auto wx_dims = ctx->GetInputDim("WeightX");
  PADDLE_ENFORCE_EQ(wx_dims.size(), 2,
                    "The rank of Input(WeightX) should be 2.");
  PADDLE_ENFORCE_EQ(wx_dims[0], x_dims[1],
                    "The first dimension of Input(WeightX) "
                    "should be %d.",
                    x_dims[1]);

  int frame_size = wx_dims[1] / 3;
  auto wh_dims = ctx->GetInputDim("WeightH");
  PADDLE_ENFORCE_EQ(wh_dims.size(), 2,
                    "The rank of Input(WeightH) should be 2.");
  PADDLE_ENFORCE_EQ(wh_dims[0], frame_size,
                    "The first dimension of Input(WeightH) "
                    "should be %d.",
                    frame_size);
  PADDLE_ENFORCE_EQ(wh_dims[1], 3 * frame_size,
                    "The second dimension of Input(WeightH) "
                    "should be 3 * %d.",
                    frame_size);

T
tensor-tang 已提交
70 71 72 73 74 75
  if (ctx->HasInput("H0")) {
    auto h0_dims = ctx->GetInputDim("H0");
    PADDLE_ENFORCE_EQ(h0_dims[1], frame_size,
                      "The width of H0 must be equal to frame_size.");
  }
  if (ctx->HasInput("Bias")) {
T
tensor-tang 已提交
76 77 78 79 80
    auto b_dims = ctx->GetInputDim("Bias");
    PADDLE_ENFORCE_EQ(b_dims.size(), 2, "The rank of Input(Bias) should be 2.");
    PADDLE_ENFORCE_EQ(b_dims[0], 1,
                      "The first dimension of Input(Bias) should be 1.");
    PADDLE_ENFORCE_EQ(b_dims[1], frame_size * 3,
T
tensor-tang 已提交
81 82
                      "The shape of Bias must be [1, frame_size * 3].");
  }
T
tensor-tang 已提交
83 84
  framework::DDim out_dims({x_dims[0], frame_size});
  ctx->SetOutputDim("Hidden", out_dims);
T
tensor-tang 已提交
85 86
  ctx->SetOutputDim("BatchedInput", {x_dims[0], wx_dims[1]});
  ctx->SetOutputDim("BatchedOut", out_dims);
T
tensor-tang 已提交
87 88
  ctx->ShareLoD("X", "Hidden");

T
tensor-tang 已提交
89 90 91 92 93 94
  int xx_width;
  if (FLAGS_gru_use_seq) {
    xx_width = wx_dims[1];
  } else {
    xx_width = x_dims[1] > wx_dims[1] ? wx_dims[1] : x_dims[1];
  }
T
tensor-tang 已提交
95 96
  ctx->SetOutputDim("XX", {x_dims[0], xx_width});
  ctx->ShareLoD("X", "XX");
T
tensor-tang 已提交
97 98 99 100 101 102 103 104 105 106
}

framework::OpKernelType FusionGRUOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
  return framework::OpKernelType(
      framework::ToDataType(ctx.Input<framework::LoDTensor>("X")->type()),
      ctx.device_context());
}

void FusionGRUOpMaker::Make() {
T
tensor-tang 已提交
107 108
  AddInput("X",
           "(LoDTensor) the input is a LodTensor, which support "
T
tensor-tang 已提交
109
           "variable-time length input sequence. The underlying tensor in "
T
tensor-tang 已提交
110 111
           "this LoDTensor is a matrix with shape (T X M), where T is the "
           "total time steps in this mini-batch, M is the dim size of x.");
T
tensor-tang 已提交
112 113 114 115 116
  AddInput("H0",
           "(Tensor, optional) The initial hidden state is an optional "
           "input. This is a tensor with shape (N x D), where N is the "
           "batch size, D is the hidden size.")
      .AsDispensable();
T
tensor-tang 已提交
117 118 119 120
  AddInput("WeightX",
           "(Tensor) The FC weight with shape (M x 3D),"
           "where M is the dim size of x, D is the hidden size. ");
  AddInput("WeightH",
T
tensor-tang 已提交
121 122 123 124 125
           "(Tensor) (D x 3D) Same as GRUOp, where D is the hidden size. "
           "This weight is not exactly D x 3D as: {W_update, W_reset, W_state}"
           "Acutally they are D x 2D and D x D two part weights."
           "{W_update, W_reset; W_state}"
           "{D x (D + D); D x D}");
T
tensor-tang 已提交
126
  AddInput("Bias",
T
tensor-tang 已提交
127 128 129
           "(Tensor, optional) (1 x 3D)."
           "Almost same as GRUOp."
           "Note: if have FC bias it should be added on this bias.")
T
tensor-tang 已提交
130
      .AsDispensable();
T
tensor-tang 已提交
131 132
  AddOutput("ReorderedH0", "(Tensor) (N x D), which N is the min-batch size.")
      .AsIntermediate();
T
tensor-tang 已提交
133
  AddOutput("XX",
T
tensor-tang 已提交
134
            "(LoDTensor) the result after X * WeightX (size is T x 3D)"
T
tensor-tang 已提交
135 136 137
            " or batched_X (size is T x M), this will be automatically chosen,"
            " where T is the total time steps in this mini-batch,"
            " D is the hidden size, M is the dim size of x input.")
T
tensor-tang 已提交
138
      .AsIntermediate();
T
tensor-tang 已提交
139 140
  AddOutput("BatchedInput", "(LoDTensor) (T x 3D)").AsIntermediate();
  AddOutput("BatchedOut", "(LoDTensor) (T X D) save batched hidden.")
T
tensor-tang 已提交
141
      .AsIntermediate();
T
tensor-tang 已提交
142
  AddOutput("Hidden", "(LoDTensor) (T x D) Same as GRUOp");
T
tensor-tang 已提交
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
  AddAttr<std::string>("activation",
                       "(string, default tanh) "
                       "The activation type used for output candidate {h}_t.")
      .SetDefault("tanh");
  AddAttr<std::string>(
      "gate_activation",
      "(string, default sigmoid) "
      "The activation type used in update gate and reset gate.")
      .SetDefault("sigmoid");
  AddAttr<bool>("is_reverse",
                "(bool, defalut: False) "
                "whether to compute reversed GRU.")
      .SetDefault(false);
  AddComment(R"DOC(
The Fusion complete GRU Operator.
This operator fuse the fully-connected operator into GRU, 
more details can refer to GRU op.
)DOC");
}

T
tensor-tang 已提交
163
template <typename T>
T
tensor-tang 已提交
164 165
class FusionGRUKernel : public framework::OpKernel<T> {
 public:
T
tensor-tang 已提交
166
  void Compute(const framework::ExecutionContext& ctx) const override {
T
tensor-tang 已提交
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
    if (FLAGS_gru_use_seq) {
      SeqCompute(ctx);
    } else {
      BatchCompute(ctx);
    }
  }

#define INIT_VEC_FUNC                                                     \
  std::function<void(const int, const T *, T *)> act_gate, act_state;     \
  std::function<void(const int, const T*, const T*, const T*, T*)> cross; \
  auto& act_gate_str = ctx.Attr<std::string>("gate_activation");          \
  auto& act_state_str = ctx.Attr<std::string>("activation");              \
  if (platform::jit::MayIUse(platform::jit::avx)) {                       \
    math::VecActivations<T, platform::jit::avx> act_functor;              \
    act_gate = act_functor(act_gate_str);                                 \
    act_state = act_functor(act_state_str);                               \
    cross = math::vec_cross<T, platform::jit::avx>;                       \
  } else {                                                                \
    math::VecActivations<T, platform::jit::isa_any> act_functor;          \
    act_gate = act_functor(act_gate_str);                                 \
    act_state = act_functor(act_state_str);                               \
    cross = math::vec_cross<T, platform::jit::isa_any>;                   \
  }

  void SeqCompute(const framework::ExecutionContext& ctx) const {
    using DeviceContext = paddle::platform::CPUDeviceContext;
    auto* x = ctx.Input<LoDTensor>("X");
    auto* h0 = ctx.Input<Tensor>("H0");
    auto* wx = ctx.Input<Tensor>("WeightX");
    auto* wh = ctx.Input<Tensor>("WeightH");
    auto* bias = ctx.Input<Tensor>("Bias");

    auto* xx = ctx.Output<LoDTensor>("XX");
    auto* hidden_out = ctx.Output<LoDTensor>("Hidden");
    bool is_reverse = ctx.Attr<bool>("is_reverse");
    INIT_VEC_FUNC

    auto x_lod = x->lod();
    auto x_dims = x->dims();    // T x M
    auto wh_dims = wh->dims();  // D x 3D
    const int N = x_lod[0].size() - 1;
    const int total_T = x_dims[0];
    const int M = x_dims[1];
    const int D3 = wh_dims[1];
    const int D = wh_dims[0];
    const int D2 = D * 2;

    const T* x_data = x->data<T>();
    const T* h0_data = h0 ? h0->data<T>() : NULL;
    const T* wx_data = wx->data<T>();
    const T* wh_data = wh->data<T>();
    const T* wh_state_data = wh_data + D * D2;
    T* xx_data = xx->mutable_data<T>(ctx.GetPlace());
    T* hidden_out_data = hidden_out->mutable_data<T>(ctx.GetPlace());

    auto blas = math::GetBlas<DeviceContext, T>(ctx);
    math::FCCompute<DeviceContext, T>(blas, total_T, D3, M, x_data, wx_data,
                                      xx_data, bias ? bias->data<T>() : NULL);

    int xx_offset = D3;
    int gate_offset = D;
    if (is_reverse) {
      const int offset = (total_T - 1) * D;
      xx_data = xx_data + offset * 3;
      hidden_out_data = hidden_out_data + offset;
      xx_offset = -D3;
      gate_offset = -D;
    }
    auto move_step = [&]() {
      xx_data = xx_data + xx_offset;
      hidden_out_data = hidden_out_data + gate_offset;
    };
    for (int i = 0; i < N; ++i) {
      int bid = is_reverse ? N - 1 - i : i;
      int seq_len = x_lod[0][bid + 1] - x_lod[0][bid];
      const T* prev_hidden_data = NULL;
      int tstart = 0;
      if (h0_data) {
        prev_hidden_data = h0_data + bid * D;
      } else {
        // W: {W_update, W_reset; W_state}
        // update gate
        act_gate(D, xx_data, xx_data);
        // state gate
        act_state(D, xx_data + D2, xx_data + D2);
        // out = a*b
        blas.VMUL(D, xx_data, xx_data + D2, hidden_out_data);
        // save prev
        prev_hidden_data = hidden_out_data;
        tstart = 1;
        move_step();
      }
      for (int step = tstart; step < seq_len; ++step) {
        // gemm prev * (Wu + Wr)
        blas.GEMM(CblasNoTrans, CblasNoTrans, 1, D2, D, static_cast<T>(1),
                  prev_hidden_data, D, wh_data, D2, static_cast<T>(1), xx_data,
                  D3);
        act_gate(D2, xx_data, xx_data);
        // rt = rt*ht_1 inplace result
        blas.VMUL(D, prev_hidden_data, xx_data + D, hidden_out_data);

        // gemm rt * Ws
        blas.GEMM(CblasNoTrans, CblasNoTrans, 1, D, D, static_cast<T>(1),
                  hidden_out_data, D, wh_state_data, D, static_cast<T>(1),
                  xx_data + D2, D3);
        act_state(D, xx_data + D2, xx_data + D2);
        // out = zt*ht~ + (1-zt)*ht_1
        cross(D, xx_data, xx_data + D2, prev_hidden_data, hidden_out_data);
        // save prev
        prev_hidden_data = hidden_out_data;
        move_step();
      }
    }
  }

  void BatchCompute(const framework::ExecutionContext& ctx) const {
T
tensor-tang 已提交
283
    using DeviceContext = paddle::platform::CPUDeviceContext;
T
tensor-tang 已提交
284 285 286 287 288
    auto* x = ctx.Input<LoDTensor>("X");
    auto* wx = ctx.Input<Tensor>("WeightX");
    auto* wh = ctx.Input<Tensor>("WeightH");
    auto* bias = ctx.Input<Tensor>("Bias");
    auto* h0 = ctx.Input<Tensor>("H0");
T
tensor-tang 已提交
289

T
tensor-tang 已提交
290
    auto* reordered_h0 = ctx.Output<Tensor>("ReorderedH0");
T
tensor-tang 已提交
291
    auto* xx = ctx.Output<LoDTensor>("XX");
T
tensor-tang 已提交
292 293
    auto* batched_input = ctx.Output<LoDTensor>("BatchedInput");
    auto* batched_out = ctx.Output<LoDTensor>("BatchedOut");
T
tensor-tang 已提交
294
    auto* hidden_out = ctx.Output<LoDTensor>("Hidden");
T
tensor-tang 已提交
295

T
tensor-tang 已提交
296
    bool is_reverse = ctx.Attr<bool>("is_reverse");
T
tensor-tang 已提交
297
    INIT_VEC_FUNC
T
tensor-tang 已提交
298

T
tensor-tang 已提交
299 300 301
    const T* x_data = x->data<T>();
    const T* wx_data = wx->data<T>();
    const T* wh_data = wh->data<T>();
T
tensor-tang 已提交
302 303 304 305 306
    T* xx_data = xx->mutable_data<T>(ctx.GetPlace());
    T* batched_input_data = batched_input->mutable_data<T>(ctx.GetPlace());
    T* batched_out_data = batched_out->mutable_data<T>(ctx.GetPlace());
    hidden_out->mutable_data<T>(ctx.GetPlace());

T
tensor-tang 已提交
307 308
    auto x_dims = x->dims();
    auto wx_dims = wx->dims();
T
tensor-tang 已提交
309 310 311
    const int D3 = wx_dims[1];
    const int D = D3 / 3;
    const int D2 = D * 2;
T
tensor-tang 已提交
312 313 314 315 316 317 318
    auto& dev_ctx = ctx.template device_context<DeviceContext>();
    auto blas = math::GetBlas<DeviceContext, T>(dev_ctx);
    math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;
    if (x_dims[1] > wx_dims[1]) {
      math::FCCompute<DeviceContext, T>(blas, x_dims[0], wx_dims[1], x_dims[1],
                                        x_data, wx_data, xx_data,
                                        bias ? bias->data<T>() : NULL);
T
tensor-tang 已提交
319
      to_batch(dev_ctx, *xx, batched_input, true, is_reverse);
T
tensor-tang 已提交
320 321
    } else {
      to_batch(dev_ctx, *x, xx, true, is_reverse);
T
tensor-tang 已提交
322
      batched_input->set_lod(xx->lod());
T
tensor-tang 已提交
323
      math::FCCompute<DeviceContext, T>(blas, x_dims[0], wx_dims[1], x_dims[1],
T
tensor-tang 已提交
324
                                        xx_data, wx_data, batched_input_data,
T
tensor-tang 已提交
325
                                        bias ? bias->data<T>() : NULL);
T
tensor-tang 已提交
326 327
    }

T
tensor-tang 已提交
328 329 330 331
    auto batched_lod = batched_input->lod();
    const auto& seq_order = batched_lod[2];
    const int max_bs = seq_order.size();
    reordered_h0->Resize({max_bs, D});
T
tensor-tang 已提交
332

T
tensor-tang 已提交
333 334
    int tstart = 0;
    T* prev_hidden_data = NULL;
T
tensor-tang 已提交
335
    if (h0) {
T
tensor-tang 已提交
336 337 338 339 340 341 342 343 344
      // reorder h0
      T* reordered_h0_data = reordered_h0->mutable_data<T>(ctx.GetPlace());
      const T* h0_data = h0->data<T>();
      prev_hidden_data = reordered_h0_data;
      size_t sz = sizeof(T) * D;
      for (int i = 0; i < max_bs; ++i) {
        std::memcpy(reordered_h0_data, h0_data + seq_order[i] * D, sz);
        reordered_h0_data += D;
      }
T
tensor-tang 已提交
345
    } else {
T
tensor-tang 已提交
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
      // compute without h0
      T* cur_in_data = batched_input_data;
      T* cur_out_data = batched_out_data;
      // W: {W_update, W_reset; W_state}
      for (int i = 0; i < max_bs; ++i) {
        // update gate
        act_gate(D, cur_in_data, cur_in_data);
        // state gate
        act_state(D, cur_in_data + D2, cur_in_data + D2);
        // out = a*b
        blas.VMUL(D, cur_in_data, cur_in_data + D2, cur_out_data);
        // add offset
        cur_in_data += D3;
        cur_out_data += D;
      }
      tstart = 1;
      prev_hidden_data = batched_out_data;
T
tensor-tang 已提交
363
    }
T
tensor-tang 已提交
364 365 366 367 368 369 370 371 372 373 374 375 376 377
    // Then start from next
    const T* wh_state_data = wh_data + D * D2;
    const auto& batch_starts = batched_lod[0];
    const int max_seq_len = batch_starts.size() - 1;
    batched_input_data = batched_input_data + tstart * max_bs * D3;
    batched_out_data = batched_out_data + tstart * max_bs * D;
    for (int step = tstart; step < max_seq_len; ++step) {
      const int cur_bs = batch_starts[step + 1] - batch_starts[step];
      // gemm prev * (Wu + Wr)
      blas.GEMM(CblasNoTrans, CblasNoTrans, cur_bs, D2, D, static_cast<T>(1),
                prev_hidden_data, D, wh_data, D2, static_cast<T>(1),
                batched_input_data, D3);

      T* cur_batched_data = batched_input_data;
378
      T* cur_out_data = batched_out_data;
T
tensor-tang 已提交
379 380 381 382
      T* cur_prev_hidden_data = prev_hidden_data;
      for (int i = 0; i < cur_bs; ++i) {
        act_gate(D2, cur_batched_data, cur_batched_data);
        // rt = rt*ht_1 inplace result
383
        blas.VMUL(D, cur_prev_hidden_data, cur_batched_data + D, cur_out_data);
T
tensor-tang 已提交
384 385 386

        cur_batched_data += D3;
        cur_prev_hidden_data += D;
387
        cur_out_data += D;
T
tensor-tang 已提交
388 389
      }

T
tensor-tang 已提交
390
      cur_batched_data = batched_input_data;
391
      cur_out_data = batched_out_data;
T
tensor-tang 已提交
392
      blas.GEMM(CblasNoTrans, CblasNoTrans, cur_bs, D, D, static_cast<T>(1),
393
                cur_out_data, D, wh_state_data, D, static_cast<T>(1),
T
tensor-tang 已提交
394 395 396 397 398 399
                cur_batched_data + D2, D3);

      cur_prev_hidden_data = prev_hidden_data;
      for (int i = 0; i < cur_bs; ++i) {
        // ht~ = act_state(...)
        act_state(D, cur_batched_data + D2, cur_batched_data + D2);
T
tensor-tang 已提交
400 401 402
        // out = zt*ht~ + (1-zt)*ht_1
        cross(D, cur_batched_data, cur_batched_data + D2, cur_prev_hidden_data,
              cur_out_data);
T
tensor-tang 已提交
403 404 405 406

        cur_batched_data += D3;
        cur_prev_hidden_data += D;
        cur_out_data += D;
T
tensor-tang 已提交
407
      }
T
tensor-tang 已提交
408 409 410
      prev_hidden_data = batched_out_data;
      batched_out_data = cur_out_data;
      batched_input_data = cur_batched_data;
T
tensor-tang 已提交
411
    }
T
tensor-tang 已提交
412

T
tensor-tang 已提交
413
    math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
T
tensor-tang 已提交
414 415
    batched_out->set_lod(batched_lod);
    to_seq(dev_ctx, *batched_out, hidden_out);
T
tensor-tang 已提交
416
  }
T
tensor-tang 已提交
417
#undef INIT_VEC_FUNC
T
tensor-tang 已提交
418 419 420 421 422 423 424 425
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_gru, ops::FusionGRUOp, ops::FusionGRUOpMaker,
                  paddle::framework::DefaultGradOpDescMaker<true>);
T
tensor-tang 已提交
426 427
REGISTER_OP_CPU_KERNEL(fusion_gru, ops::FusionGRUKernel<float>,
                       ops::FusionGRUKernel<double>);