gru_op.cc 21.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
Luo Tao 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
G
guosheng 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/gru_op.h"
16

17
#include <memory>
18
#include <string>
19

20
#include "paddle/phi/kernels/funcs/blas/blas.h"
F
Feiyu Chan 已提交
21 22
#include "paddle/phi/kernels/funcs/detail/gru_cpu_kernel.h"
#include "paddle/phi/kernels/funcs/detail/gru_kernel.h"
T
tensor-tang 已提交
23 24

DECLARE_int32(paddle_num_threads);
G
guosheng 已提交
25 26 27 28 29 30 31 32 33 34 35

namespace paddle {
namespace operators {

using framework::Tensor;

class GRUOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
36 37 38
    OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "GRU");
    OP_INOUT_CHECK(ctx->HasInput("Weight"), "Input", "Weight", "GRU");
    OP_INOUT_CHECK(ctx->HasOutput("Hidden"), "Output", "Hidden", "GRU");
39 40 41 42 43 44 45 46
    bool is_test = ctx->Attrs().Get<bool>("is_test");
    if (!is_test) {
      OP_INOUT_CHECK(ctx->HasOutput("BatchGate"), "Output", "BatchGate", "GRU");
      OP_INOUT_CHECK(ctx->HasOutput("BatchResetHiddenPrev"), "Output",
                     "BatchResetHiddenPrev", "GRU");
      OP_INOUT_CHECK(ctx->HasOutput("BatchHidden"), "Output", "BatchHidden",
                     "GRU");
    }
G
guosheng 已提交
47 48 49 50
    auto input_dims = ctx->GetInputDim("Input");
    auto weight_dims = ctx->GetInputDim("Weight");
    int input_size = input_dims[1];
    int frame_size = weight_dims[0];
51
    if (ctx->IsRuntime()) {
52 53 54 55 56 57
      PADDLE_ENFORCE_EQ(input_size, frame_size * 3,
                        platform::errors::InvalidArgument(
                            "The second dimension of Input(Input) must be 3 "
                            "times of frame_size in GRUOp, but received %d "
                            "(Input) vs %d (frame_size).",
                            input_size, frame_size));
58
    }
G
guosheng 已提交
59 60
    PADDLE_ENFORCE_EQ(
        weight_dims[1], frame_size * 3,
61 62 63 64
        platform::errors::InvalidArgument(
            "The shape of Input(Weight) matrix must be [frame_size, frame_size "
            "* 3], but received [%d, %d] (Weight) vs [%d, %d] (frame_size).",
            weight_dims[0], weight_dims[1], frame_size, frame_size * 3));
65
    if (ctx->HasInput("H0")) {
G
guosheng 已提交
66
      auto h0_dims = ctx->GetInputDim("H0");
67 68 69 70 71 72
      PADDLE_ENFORCE_EQ(
          h0_dims[1], frame_size,
          platform::errors::InvalidArgument(
              "The width of Input(H0) must be equal to frame_size, but "
              "received %d (width of H0) vs %d (frame_size).",
              h0_dims[1], frame_size));
G
guosheng 已提交
73
    }
74
    if (ctx->HasInput("Bias")) {
G
guosheng 已提交
75 76 77
      auto bias_dims = ctx->GetInputDim("Bias");
      int bias_height = bias_dims[0];
      int bias_width = bias_dims[1];
78 79 80 81 82 83 84 85 86 87 88 89
      PADDLE_ENFORCE_EQ(
          bias_height, 1,
          platform::errors::InvalidArgument(
              "The shape of Bias must be [1, frame_size * 3], but received "
              "[%d, %d] (Bias) vs [1, %d] (frame_size * 3).",
              bias_height, bias_width, frame_size * 3));
      PADDLE_ENFORCE_EQ(
          bias_width, frame_size * 3,
          platform::errors::InvalidArgument(
              "The shape of Bias must be [1, frame_size * 3], but received "
              "[%d, %d] (Bias) vs [1, %d] (frame_size * 3).",
              bias_height, bias_width, frame_size * 3));
G
guosheng 已提交
90
    }
91 92 93 94 95
    if (!is_test) {
      ctx->SetOutputDim("BatchGate", input_dims);
      ctx->SetOutputDim("BatchResetHiddenPrev", {input_dims[0], frame_size});
      ctx->SetOutputDim("BatchHidden", {input_dims[0], frame_size});
    }
G
guosheng 已提交
96 97 98 99 100 101 102
    ctx->SetOutputDim("Hidden", {input_dims[0], frame_size});
    ctx->ShareLoD("Input", "Hidden");
  }
};

class GRUOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
103
  void Make() override {
G
guosheng 已提交
104
    AddInput("Input",
105
             "(LoDTensor) The first input is a LodTensor, which supports "
G
guosheng 已提交
106 107 108 109
             "variable-time length input sequence. The underlying tensor in "
             "this LoDTenosr is a matrix with shape (T X 3D), where, T is the "
             "total time steps in this mini-batch, D is the hidden size.");
    AddInput("H0",
110
             "(Tensor, optional) The initial hidden state is an optional "
G
guosheng 已提交
111
             "input. This is a tensor with shape (N x D), where N is the "
112 113
             "batch size, D is the hidden size.")
        .AsDispensable();
G
guosheng 已提交
114 115
    AddInput(
        "Weight",
116 117 118 119 120
        "(Tensor) The learnable hidden-hidden weight matrix with shape "
        "(D x 3D), where D is the hidden size. The elements continuous in "
        "memory can be divided into two parts. The first part are weights of "
        "the update gate and reset gate with shape (D x 2D), and the second "
        "part are weights of output candidate with shape (D x D).");
G
guosheng 已提交
121
    AddInput("Bias",
122 123 124
             "(Tensor, optional) Bias vector with shape (1 x 3D) concating "
             "bias of the update gate, reset gate and output candidate.")
        .AsDispensable();
G
guosheng 已提交
125
    AddOutput("BatchGate",
126 127 128 129 130 131 132
              "(LoDTensor) To compute with batches, sequence data will be "
              "reorganized into several successive batches each containing "
              "data from the same time step. The LoDTensor BatchGate contains "
              "the update gate, reset gate and output candidate values "
              "organized in batches. The LoD size is 2. The first LoD contains "
              "the batch offsets and the second LoD contains the indexes in "
              "the raw sequence data.")
133 134
        .AsIntermediate()
        .AsExtra();
G
guosheng 已提交
135 136
    AddOutput(
        "BatchResetHiddenPrev",
T
tianshuo78520a 已提交
137
        "(LoDTensor) The reset hidden state LoDTensor organized in batches. "
138 139
        "This LoDTensor is a matrix with shape (T X D) and has the same LoD "
        "with `BatchGate`.")
140 141
        .AsIntermediate()
        .AsExtra();
G
guosheng 已提交
142 143
    AddOutput(
        "BatchHidden",
144 145 146
        "(LoDTensor) The hidden state LoDTensor organized in batches.  "
        "This LoDTensor is a matrix with shape (T X D) and has the same LoD "
        "with `BatchGate`.")
147 148
        .AsIntermediate()
        .AsExtra();
149 150 151 152 153
    AddOutput(
        "Hidden",
        "(LoDTensor) the hidden state LoDTensor organized in sequences. "
        "This LoDTensor is a matrix with shape (T X D) and has the same LoD "
        "with `BatchGate`.");
G
guosheng 已提交
154 155 156 157 158 159 160 161 162 163
    AddAttr<std::string>("activation",
                         "(string, default tanh) "
                         "The activation type used for output candidate {h}_t.")
        .SetDefault("tanh");
    AddAttr<std::string>(
        "gate_activation",
        "(string, default sigmoid) "
        "The activation type used in update gate and reset gate.")
        .SetDefault("sigmoid");
    AddAttr<bool>("is_reverse",
翟飞跃 已提交
164
                  "(bool, default: False) "
G
guosheng 已提交
165 166
                  "whether to compute reversed GRU.")
        .SetDefault(false);
167 168 169
    AddAttr<bool>("is_test", "True if in test phase.")
        .SetDefault(false)
        .AsExtra();
Q
Qiao Longfei 已提交
170 171 172 173
    AddAttr<bool>("origin_mode",
                  "bool"
                  "use origin mode in article https://arxiv.org/abs/1412.3555")
        .SetDefault(false);
G
guosheng 已提交
174
    AddComment(R"DOC(
175 176
GRU Operator implements part calculations of the complete GRU as following:

K
kavyasrinet 已提交
177 178 179 180
$$
update\_gate: u_t = actGate(xu_t + W_u * h_{t-1} + b_u) \\
reset\_gate: r_t = actGate(xr_t + W_r * h_{t-1} + b_r)  \\
output\_candidate: {h}_t = actNode(xc_t + W_c * dot(r_t, h_{t-1}) + b_c) \\
181
output: h_t = dot((1 - u_t), h_{t-1}) + dot(u_t, {h}_t)
K
kavyasrinet 已提交
182
$$
183

K
kavyasrinet 已提交
184
@note To implement the complete GRU, fully-connected operator must be used
185
before to feed xu, xr and xc as the Input of GRU operator.
G
guosheng 已提交
186 187 188 189 190 191 192 193 194
)DOC");
  }
};

class GRUGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
195 196 197 198 199 200 201 202 203 204 205 206
    OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "GRU@Grad");
    OP_INOUT_CHECK(ctx->HasInput("Weight"), "Input", "Weight", "GRU@Grad");
    OP_INOUT_CHECK(ctx->HasInput("BatchGate"), "Input", "BatchGate",
                   "GRU@Grad");
    OP_INOUT_CHECK(ctx->HasInput("BatchResetHiddenPrev"), "Input",
                   "BatchResetHiddenPrev", "GRU@Grad");
    OP_INOUT_CHECK(ctx->HasInput("BatchHidden"), "Input", "BatchHidden",
                   "GRU@Grad");
    OP_INOUT_CHECK(ctx->HasInput("Hidden"), "Input", "Hidden", "GRU@Grad");
    OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Hidden")), "Input",
                   framework::GradVarName("Hidden"), "GRU@Grad");

G
guosheng 已提交
207 208 209 210 211 212
    auto input_dims = ctx->GetInputDim("Input");
    auto weight_dims = ctx->GetInputDim("Weight");
    int input_size = input_dims[1];
    int frame_size = weight_dims[0];
    int weight_height = weight_dims[0];
    int weight_width = weight_dims[1];
213 214 215 216 217 218
    PADDLE_ENFORCE_EQ(
        input_size, frame_size * 3,
        platform::errors::InvalidArgument(
            "The second dimension of Input(Input) must be 3 times of "
            "frame_size in GRUOp, but received %d (Input) vs %d (frame_size).",
            input_size, frame_size));
G
guosheng 已提交
219 220
    PADDLE_ENFORCE_EQ(
        weight_height, frame_size,
221 222 223 224
        platform::errors::InvalidArgument(
            "The shape of Input(Weight) matrix must be [frame_size, frame_size "
            "* 3], but received [%d, %d] (Weight) vs [%d, %d] (frame_size).",
            weight_height, weight_width, frame_size, frame_size * 3));
G
guosheng 已提交
225 226
    PADDLE_ENFORCE_EQ(
        weight_width, frame_size * 3,
227 228 229 230
        platform::errors::InvalidArgument(
            "The shape of Input(Weight) matrix must be [frame_size, frame_size "
            "* 3], but received [%d, %d] (Weight) vs [%d, %d] (frame_size).",
            weight_height, weight_width, frame_size, frame_size * 3));
231
    if (ctx->HasInput("H0")) {
G
guosheng 已提交
232
      auto h0_dims = ctx->GetInputDim("H0");
233 234 235 236 237 238
      PADDLE_ENFORCE_EQ(
          h0_dims[1], frame_size,
          platform::errors::InvalidArgument(
              "The width of Input(H0) must be equal to frame_size, but "
              "received %d (width of H0) vs %d (frame_size).",
              h0_dims[1], frame_size));
G
guosheng 已提交
239 240 241 242
      auto h0_grad_name = framework::GradVarName("H0");
      if (ctx->HasOutput(h0_grad_name))
        ctx->SetOutputDim(h0_grad_name, h0_dims);
    }
243
    if (ctx->HasInput("Bias")) {
G
guosheng 已提交
244 245 246
      auto bias_dims = ctx->GetInputDim("Bias");
      int bias_height = bias_dims[0];
      int bias_width = bias_dims[1];
247 248 249 250 251 252 253 254 255 256 257 258
      PADDLE_ENFORCE_EQ(
          bias_height, 1,
          platform::errors::InvalidArgument(
              "The shape of Bias must be [1, frame_size * 3], but received "
              "[%d, %d] (Bias) vs [1, %d] (frame_size * 3).",
              bias_height, bias_width, frame_size * 3));
      PADDLE_ENFORCE_EQ(
          bias_width, frame_size * 3,
          platform::errors::InvalidArgument(
              "The shape of Bias must be [1, frame_size * 3], but received "
              "[%d, %d] (Bias) vs [1, %d] (frame_size * 3).",
              bias_height, bias_width, frame_size * 3));
G
guosheng 已提交
259 260 261 262 263 264 265 266 267 268 269
      auto bias_grad_name = framework::GradVarName("Bias");
      if (ctx->HasOutput(bias_grad_name))
        ctx->SetOutputDim(bias_grad_name, bias_dims);
    }
    auto input_grad_name = framework::GradVarName("Input");
    if (ctx->HasOutput(input_grad_name))
      ctx->SetOutputDim(input_grad_name, input_dims);
    auto weight_grad_name = framework::GradVarName("Weight");
    if (ctx->HasOutput(weight_grad_name))
      ctx->SetOutputDim(weight_grad_name, weight_dims);
  }
270 271 272 273 274 275 276

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType(
                                       ctx, framework::GradVarName("Hidden")),
                                   ctx.device_context());
  }
G
guosheng 已提交
277 278
};

279 280 281 282 283
template <typename T>
class GRUCPUKernel : public framework::OpKernel<T> {
 public:
  void BatchCompute(const framework::ExecutionContext& context) const {
    using DeviceContext = paddle::platform::CPUDeviceContext;
284 285 286
    using LodTensorPtr = LoDTensor*;
    bool is_test = context.Attr<bool>("is_test");

Q
Qiao Longfei 已提交
287
    bool origin_mode = context.Attr<bool>("origin_mode");
288 289 290 291 292 293 294 295
    auto* input = context.Input<LoDTensor>("Input");
    auto* h0 = context.Input<Tensor>("H0");
    auto* weight = context.Input<Tensor>("Weight");
    const T* weight_data = weight->data<T>();
    auto* bias = context.Input<Tensor>("Bias");
    auto* hidden = context.Output<LoDTensor>("Hidden");
    hidden->mutable_data<T>(context.GetPlace());

296
    auto input_dims = input->dims();
297 298
    auto hidden_dims = hidden->dims();

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
    LodTensorPtr batch_gate, batch_reset_hidden_prev, batch_hidden;
    LoDTensor batch_gate_tmp, batch_reset_hidden_prev_tmp, batch_hidden_tmp;
    if (is_test) {
      batch_gate = &batch_gate_tmp;
      batch_gate->Resize(input_dims);

      batch_reset_hidden_prev = &batch_reset_hidden_prev_tmp;
      batch_reset_hidden_prev->Resize(hidden_dims);

      batch_hidden = &batch_hidden_tmp;
      batch_hidden->Resize(hidden_dims);
    } else {
      batch_gate = context.Output<LoDTensor>("BatchGate");
      batch_hidden = context.Output<LoDTensor>("BatchHidden");
      batch_reset_hidden_prev =
          context.Output<LoDTensor>("BatchResetHiddenPrev");
    }
    batch_gate->mutable_data<T>(context.GetPlace());
    batch_reset_hidden_prev->mutable_data<T>(context.GetPlace());
    batch_hidden->mutable_data<T>(context.GetPlace());

320
    bool is_reverse = context.Attr<bool>("is_reverse");
F
Feiyu Chan 已提交
321
    phi::funcs::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;
322 323 324 325
    auto& dev_ctx = context.template device_context<DeviceContext>();
    to_batch(dev_ctx, *input, batch_gate, true, is_reverse);

    if (bias) {
326
      phi::funcs::RowwiseAdd<DeviceContext, T> add_bias;
327 328 329 330
      add_bias(dev_ctx, *batch_gate, *bias, batch_gate);
    }

    int frame_size = hidden_dims[1];
F
Feiyu Chan 已提交
331
    phi::funcs::GRUMetaValue<T> gru_value;
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
    gru_value.gate_weight = const_cast<T*>(weight_data);
    gru_value.state_weight =
        const_cast<T*>(weight_data + 2 * frame_size * frame_size);
    Tensor ordered_h0;

    framework::Vector<size_t> order(batch_gate->lod()[2]);

    if (h0) {
      // Since the batch computing for GRU reorders the input sequences
      // according to their length. The initialized cell state also needs
      // to reorder.
      ReorderInitState<DeviceContext, T>(
          context.template device_context<DeviceContext>(), *h0, order,
          &ordered_h0, true);
      gru_value.prev_out_value = ordered_h0.data<T>();
    } else {
      gru_value.prev_out_value = nullptr;
    }
    auto batch_starts = batch_gate->lod()[0];
T
tensor-tang 已提交
351
    size_t seq_len = batch_starts.size() - 1;
F
Feiyu Chan 已提交
352
    auto active_node = phi::funcs::detail::GetActivationType(
353
        context.Attr<std::string>("activation"));
F
Feiyu Chan 已提交
354
    auto active_gate = phi::funcs::detail::GetActivationType(
355 356 357
        context.Attr<std::string>("gate_activation"));

#ifdef PADDLE_WITH_MKLML
T
tensor-tang 已提交
358
    // use MKL packed to speedup GEMM
T
tensor-tang 已提交
359
    if (FLAGS_paddle_num_threads >= 4) {
360
      auto blas = phi::funcs::GetBlas<DeviceContext, T>(dev_ctx);
T
tensor-tang 已提交
361 362 363
      T* packed_gate = blas.GEMM_ALLOC(CblasBMatrix, 1 /*height of C*/,
                                       frame_size * 2 /*width of weight*/,
                                       frame_size /*height of height*/);
364 365 366 367
      PADDLE_ENFORCE_NOT_NULL(
          packed_gate, platform::errors::NotFound(
                           "The caculation result of packed_gate by "
                           "GEMM_ALLOC should not be null when using MKL."));
T
tensor-tang 已提交
368 369 370 371 372 373
      blas.GEMM_PACK(CblasBMatrix, CblasNoTrans, 1 /*cur bs?*/, frame_size * 2,
                     frame_size, T(1.0), gru_value.gate_weight, frame_size * 2,
                     packed_gate);
      T* packed_state = blas.GEMM_ALLOC(CblasBMatrix, 1 /*height of C*/,
                                        frame_size /*width of weight*/,
                                        frame_size /*height of height*/);
374 375 376 377
      PADDLE_ENFORCE_NOT_NULL(
          packed_state, platform::errors::NotFound(
                            "The caculation result of packed_state by "
                            "GEMM_ALLOC should not be null when using MKL."));
T
tensor-tang 已提交
378 379 380 381 382 383 384
      blas.GEMM_PACK(CblasBMatrix, CblasNoTrans, 1 /*cur bs?*/, frame_size,
                     frame_size, T(1.0), gru_value.state_weight, frame_size,
                     packed_state);
      for (size_t n = 0; n < seq_len; n++) {
        int bstart = static_cast<int>(batch_starts[n]);
        int bend = static_cast<int>(batch_starts[n + 1]);
        int cur_batch_size = bend - bstart;
385

T
tensor-tang 已提交
386 387 388 389 390 391 392
        Tensor gate_t = batch_gate->Slice(bstart, bend);
        Tensor reset_hidden_prev_t =
            batch_reset_hidden_prev->Slice(bstart, bend);
        Tensor hidden_t = batch_hidden->Slice(bstart, bend);
        gru_value.output_value = hidden_t.data<T>();
        gru_value.gate_value = gate_t.data<T>();
        gru_value.reset_output_value = reset_hidden_prev_t.data<T>();
393

T
tensor-tang 已提交
394 395 396 397 398 399
        if (gru_value.prev_out_value) {
          blas.GEMM_COMPUTE(
              CblasNoTrans, CblasPacked, cur_batch_size, frame_size * 2,
              frame_size, gru_value.prev_out_value, frame_size, packed_gate,
              frame_size * 2, T(1), gru_value.gate_value, frame_size * 3);
        }
400

401
        phi::funcs::detail::forward_reset_output<DeviceContext>(
F
Feiyu Chan 已提交
402 403
            phi::funcs::detail::forward::gru_resetOutput<T>(), gru_value,
            frame_size, cur_batch_size, active_gate);
T
tensor-tang 已提交
404 405 406 407 408 409 410 411 412

        if (gru_value.prev_out_value) {
          blas.GEMM_COMPUTE(
              CblasNoTrans, CblasPacked, cur_batch_size, frame_size, frame_size,
              gru_value.reset_output_value, frame_size, packed_state,
              frame_size, T(1), gru_value.gate_value + frame_size * 2,
              frame_size * 3);
        }

413
        phi::funcs::detail::forward_final_output<DeviceContext>(
F
Feiyu Chan 已提交
414 415
            phi::funcs::detail::forward::gru_finalOutput<T>(), gru_value,
            frame_size, cur_batch_size, active_node, origin_mode);
T
tensor-tang 已提交
416 417

        gru_value.prev_out_value = gru_value.output_value;
418 419
      }

T
tensor-tang 已提交
420 421 422
      blas.GEMM_FREE(packed_gate);
      blas.GEMM_FREE(packed_state);
    } else {
423
#endif
T
tensor-tang 已提交
424 425 426 427 428 429 430 431 432 433 434 435 436
      for (size_t n = 0; n < seq_len; n++) {
        int bstart = static_cast<int>(batch_starts[n]);
        int bend = static_cast<int>(batch_starts[n + 1]);
        int cur_batch_size = bend - bstart;

        Tensor gate_t = batch_gate->Slice(bstart, bend);
        Tensor reset_hidden_prev_t =
            batch_reset_hidden_prev->Slice(bstart, bend);
        Tensor hidden_t = batch_hidden->Slice(bstart, bend);
        gru_value.output_value = hidden_t.data<T>();
        gru_value.gate_value = gate_t.data<T>();
        gru_value.reset_output_value = reset_hidden_prev_t.data<T>();

F
Feiyu Chan 已提交
437
        phi::funcs::GRUUnitFunctor<DeviceContext, T>::compute(
T
tensor-tang 已提交
438
            dev_ctx, gru_value, frame_size, cur_batch_size, active_node,
Q
Qiao Longfei 已提交
439
            active_gate, origin_mode);
T
tensor-tang 已提交
440 441 442

        gru_value.prev_out_value = gru_value.output_value;
      }
443
#ifdef PADDLE_WITH_MKLML
T
tensor-tang 已提交
444
    }
445
#endif
F
Feiyu Chan 已提交
446
    phi::funcs::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
447 448 449 450 451 452 453 454 455
    batch_hidden->set_lod(batch_gate->lod());
    to_seq(dev_ctx, *batch_hidden, hidden);
  }

  void Compute(const framework::ExecutionContext& context) const override {
    BatchCompute(context);
  }
};

456 457 458 459 460 461
template <typename T>
class GRUGradOpMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
462
  void Apply(GradOpPtr<T> grad_op) const override {
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
    grad_op->SetType("gru_grad");
    grad_op->SetInput("Input", this->Input("Input"));
    grad_op->SetInput("H0", this->Input("H0"));
    grad_op->SetInput("Bias", this->Input("Bias"));
    grad_op->SetInput("Weight", this->Input("Weight"));

    grad_op->SetInput("BatchGate", this->Output("BatchGate"));
    grad_op->SetInput("BatchResetHiddenPrev",
                      this->Output("BatchResetHiddenPrev"));
    grad_op->SetInput("BatchHidden", this->Output("BatchHidden"));
    grad_op->SetInput("Hidden", this->Output("Hidden"));

    grad_op->SetInput(framework::GradVarName("Hidden"),
                      this->OutputGrad("Hidden"));

    grad_op->SetOutput(framework::GradVarName("H0"), this->InputGrad("H0"));
    grad_op->SetOutput(framework::GradVarName("Input"),
                       this->InputGrad("Input"));
    grad_op->SetOutput(framework::GradVarName("Weight"),
                       this->InputGrad("Weight"));
    grad_op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));

    grad_op->SetAttrMap(this->Attrs());
  }
};

489
DECLARE_NO_NEED_BUFFER_VARS_INFERER(GRUGradOpNoNeedBufferVarInferer, "Input",
490
                                    "Bias");
491

G
guosheng 已提交
492 493 494 495
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
496 497 498 499
REGISTER_OPERATOR(gru, ops::GRUOp, ops::GRUOpMaker,
                  ops::GRUGradOpMaker<paddle::framework::OpDesc>,
                  ops::GRUGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(gru_grad, ops::GRUGradOp,
500
                  ops::GRUGradOpNoNeedBufferVarInferer);
501 502
REGISTER_OP_CPU_KERNEL(gru, ops::GRUCPUKernel<float>,
                       ops::GRUCPUKernel<double>);
Q
QI JUN 已提交
503 504 505
REGISTER_OP_CPU_KERNEL(
    gru_grad, ops::GRUGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GRUGradKernel<paddle::platform::CPUDeviceContext, double>);