rnn_op.cu.cc 32.2 KB
Newer Older
G
Guo Sheng 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/utils.h"
19
#ifdef PADDLE_WITH_CUDA
G
Guo Sheng 已提交
20
#include "paddle/fluid/platform/cudnn_helper.h"
21 22 23 24
#endif
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/platform/miopen_helper.h"
#endif
G
Guo Sheng 已提交
25 26 27 28 29 30 31 32 33 34 35

namespace paddle {
namespace operators {

using LoDTensor = framework::LoDTensor;
using Tensor = framework::Tensor;

class RNNDescriptors {
 public:
  RNNDescriptors(int seq_length, int batch_size, int input_size,
                 int hidden_size, int num_layers, float dropout_prob, int seed,
36 37 38
#ifdef PADDLE_WITH_HIP
                 int weight_numel, miopenRNNMode_t mode, bool is_bidirec,
#else
G
Guo Sheng 已提交
39
                 int weight_numel, cudnnRNNMode_t mode, bool is_bidirec,
40
#endif
G
Guo Sheng 已提交
41 42 43 44 45 46 47 48 49 50 51
                 bool is_test)
      : seq_length_(seq_length),
        batch_size_(batch_size),
        input_size_(input_size),
        hidden_size_(hidden_size),
        num_layers_(num_layers),
        dropout_prob_(dropout_prob),
        seed_(seed),
        weight_numel_(weight_numel),
        mode_(mode),
        is_bidirec_(is_bidirec),
52 53
        is_test_(is_test) {
  }
G
Guo Sheng 已提交
54 55

  template <typename T>
56 57 58
#ifdef PADDLE_WITH_HIP
  void Create(const miopenHandle_t &handle, const platform::Place &place,
#else
G
Guo Sheng 已提交
59
  void Create(const cudnnHandle_t &handle, const platform::Place &place,
60
#endif
G
Guo Sheng 已提交
61 62 63
              const std::vector<int> &sequence_length, size_t *workspace_size,
              size_t *reserve_size, framework::Tensor *dropout_state) {
    int numDirections = is_bidirec_ ? 2 : 1;
64 65 66
#ifdef PADDLE_WITH_HIP
    miopenDataType_t cudnn_type = platform::CudnnDataType<T>::type;
#else
G
Guo Sheng 已提交
67
    cudnnDataType_t cudnn_type = platform::CudnnDataType<T>::type;
68
#endif
G
Guo Sheng 已提交
69 70 71 72 73 74 75 76 77 78
    // ------------------- cudnn x, y descriptors ---------------------
    std::vector<int> dims_x = {batch_size_, input_size_, 1};
    std::vector<int> strides_x = {input_size_, 1, 1};
    std::vector<int> dims_y = {batch_size_, hidden_size_ * numDirections, 1};
    std::vector<int> strides_y = {hidden_size_ * numDirections, 1, 1};
    for (int i = 0; i < seq_length_; ++i) {
      x_descs_.emplace_back(x_desc_.descriptor<T>(dims_x, strides_x));
      y_descs_.emplace_back(y_desc_.descriptor<T>(dims_y, strides_y));
    }

79
#if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION >= 7201
G
Guo Sheng 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
    if (!sequence_length.empty()) {
      x_seq_desc_.descriptor<T>(seq_length_, batch_size_, input_size_, true,
                                sequence_length);
      y_seq_desc_.descriptor<T>(seq_length_, batch_size_,
                                hidden_size_ * numDirections, true,
                                sequence_length);
    }
#endif

    // ------------------- cudnn hx, hy, cx, cy descriptors----------
    std::vector<int> dims_hx = {num_layers_ * numDirections, batch_size_,
                                hidden_size_};
    std::vector<int> strides_hx = {hidden_size_ * batch_size_, hidden_size_, 1};
    init_h_desc_.descriptor<T>(dims_hx, strides_hx);
    init_c_desc_.descriptor<T>(dims_hx, strides_hx);
    last_h_desc_.descriptor<T>(dims_hx, strides_hx);
    last_c_desc_.descriptor<T>(dims_hx, strides_hx);

    // ------------------- cudnn dropout descriptors ---------------------
    size_t state_size;
100 101
    bool is_initialized = dropout_state->IsInitialized();
    if (!is_test_ && !is_initialized) {
102 103 104 105 106 107
#ifdef PADDLE_WITH_HIP
      PADDLE_ENFORCE_CUDA_SUCCESS(
          platform::dynload::miopenDropoutGetStatesSize(handle, &state_size));
      dropout_state->mutable_data<uint8_t>({static_cast<int64_t>(state_size)},
                                           place);
#else
G
Guo Sheng 已提交
108 109 110 111
      PADDLE_ENFORCE_CUDA_SUCCESS(
          platform::dynload::cudnnDropoutGetStatesSize(handle, &state_size));
      dropout_state->mutable_data<uint8_t>({static_cast<int64_t>(state_size)},
                                           place);
112
#endif
G
Guo Sheng 已提交
113
    }
114 115 116
    dropout_desc_.descriptor(handle, place, is_initialized, dropout_prob_,
                             is_test_ ? nullptr : dropout_state, seed_,
                             state_size);
G
Guo Sheng 已提交
117 118

// ------------------- cudnn rnn descriptors ---------------------
119
#ifdef PADDLE_WITH_HIP
R
ronnywang 已提交
120 121 122
    PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetRNNDescriptor_V2(
        rnn_desc_.desc(), hidden_size_, num_layers_, dropout_desc_.desc(),
        miopenRNNlinear,
123
        is_bidirec_ ? miopenRNNbidirection : miopenRNNunidirection, mode_,
R
ronnywang 已提交
124
        miopenRNNwithBias, miopenRNNdefault, cudnn_type));
125
#elif CUDNN_VERSION >= 6000
G
Guo Sheng 已提交
126 127 128 129 130 131 132 133 134 135 136 137 138
    PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor_v6(
        handle, rnn_desc_.desc(), hidden_size_, num_layers_,
        dropout_desc_.desc(), CUDNN_LINEAR_INPUT,
        is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, mode_,
        CUDNN_RNN_ALGO_STANDARD, cudnn_type));
#else
    PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor(
        rnn_desc_.desc(), hidden_size_, num_layers_, dropout_desc_.desc(),
        CUDNN_LINEAR_INPUT,
        is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, mode_,
        cudnn_type));
#endif

139
#if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION >= 7201
G
Guo Sheng 已提交
140 141 142 143 144 145 146 147
    if (!sequence_length.empty()) {
      PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNPaddingMode(
          rnn_desc_.desc(), CUDNN_RNN_PADDED_IO_ENABLED));
    }
#endif

    // ------------------- cudnn weights_size ---------------------
    size_t weights_size_;
148 149 150 151
#ifdef PADDLE_WITH_HIP
    PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenGetRNNParamsSize(
        handle, rnn_desc_.desc(), x_descs_[0], &weights_size_, cudnn_type));
#else
G
Guo Sheng 已提交
152 153
    PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnGetRNNParamsSize(
        handle, rnn_desc_.desc(), x_descs_[0], &weights_size_, cudnn_type));
154
#endif
G
Guo Sheng 已提交
155 156 157 158 159 160 161 162 163
    PADDLE_ENFORCE_EQ(
        weights_size_, sizeof(T) * weight_numel_,
        platform::errors::InvalidArgument(
            "The cudnn rnn and setting weight size should be same."));
    // ------------------- cudnn weight descriptors ---------------------
    platform::DataLayout layout = platform::DataLayout::kNCHW;
    int dim_tmp = weights_size_ / sizeof(T);
    std::vector<int> dim_w = {dim_tmp, 1, 1};
    weight_desc_.descriptor<T>(layout, dim_w);
164 165 166 167 168 169 170 171 172 173
// ------------------- cudnn workspace, reserve size ---------------------
#ifdef PADDLE_WITH_HIP
    PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenGetRNNWorkspaceSize(
        handle, rnn_desc_.desc(), seq_length_, x_descs_.data(),
        workspace_size));
    PADDLE_ENFORCE_CUDA_SUCCESS(
        platform::dynload::miopenGetRNNTrainingReserveSize(
            handle, rnn_desc_.desc(), seq_length_, x_descs_.data(),
            reserve_size));
#else
G
Guo Sheng 已提交
174 175 176 177 178 179 180
    PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnGetRNNWorkspaceSize(
        handle, rnn_desc_.desc(), seq_length_, x_descs_.data(),
        workspace_size));
    PADDLE_ENFORCE_CUDA_SUCCESS(
        platform::dynload::cudnnGetRNNTrainingReserveSize(
            handle, rnn_desc_.desc(), seq_length_, x_descs_.data(),
            reserve_size));
181
#endif
G
Guo Sheng 已提交
182
  }
183 184 185 186 187 188 189 190 191 192 193
#ifdef PADDLE_WITH_HIP
  miopenTensorDescriptor_t *x_descs() { return x_descs_.data(); }
  miopenTensorDescriptor_t *y_descs() { return y_descs_.data(); }
  miopenTensorDescriptor_t init_h_desc() { return init_h_desc_.desc(); }
  miopenTensorDescriptor_t init_c_desc() { return init_c_desc_.desc(); }
  miopenTensorDescriptor_t last_h_desc() { return last_h_desc_.desc(); }
  miopenTensorDescriptor_t last_c_desc() { return last_c_desc_.desc(); }
  miopenRNNDescriptor_t rnn_desc() { return rnn_desc_.desc(); }
  miopenDropoutDescriptor_t dropout_desc() { return dropout_desc_.desc(); }
  miopenTensorDescriptor_t weight_desc() { return weight_desc_.desc(); }
#else
G
Guo Sheng 已提交
194 195 196 197 198 199 200 201 202 203 204 205 206
  cudnnTensorDescriptor_t *x_descs() { return x_descs_.data(); }
  cudnnTensorDescriptor_t *y_descs() { return y_descs_.data(); }
#if CUDNN_VERSION >= 7201
  cudnnRNNDataDescriptor_t x_seq_desc() { return x_seq_desc_.desc(); }
  cudnnRNNDataDescriptor_t y_seq_desc() { return y_seq_desc_.desc(); }
#endif
  cudnnTensorDescriptor_t init_h_desc() { return init_h_desc_.desc(); }
  cudnnTensorDescriptor_t init_c_desc() { return init_c_desc_.desc(); }
  cudnnTensorDescriptor_t last_h_desc() { return last_h_desc_.desc(); }
  cudnnTensorDescriptor_t last_c_desc() { return last_c_desc_.desc(); }
  cudnnRNNDescriptor_t rnn_desc() { return rnn_desc_.desc(); }
  cudnnDropoutDescriptor_t dropout_desc() { return dropout_desc_.desc(); }
  cudnnFilterDescriptor_t weight_desc() { return weight_desc_.desc(); }
207
#endif
G
Guo Sheng 已提交
208 209 210 211 212 213 214 215 216 217

 private:
  int seq_length_;
  int batch_size_;
  int input_size_;
  int hidden_size_;
  int num_layers_;
  float dropout_prob_;
  int seed_;
  int weight_numel_;
218 219 220
#ifdef PADDLE_WITH_HIP
  miopenRNNMode_t mode_;
#else
G
Guo Sheng 已提交
221
  cudnnRNNMode_t mode_;
222
#endif
G
Guo Sheng 已提交
223 224
  bool is_bidirec_;
  bool is_test_;
225 226 227 228
#ifdef PADDLE_WITH_HIP
  std::vector<miopenTensorDescriptor_t> x_descs_;
  std::vector<miopenTensorDescriptor_t> y_descs_;
#else
G
Guo Sheng 已提交
229 230
  std::vector<cudnnTensorDescriptor_t> x_descs_;
  std::vector<cudnnTensorDescriptor_t> y_descs_;
231
#endif
G
Guo Sheng 已提交
232 233 234

  platform::ScopedTensorDescriptor x_desc_;
  platform::ScopedTensorDescriptor y_desc_;
235
#if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION >= 7201
G
Guo Sheng 已提交
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
  platform::ScopedRNNTensorDescriptor x_seq_desc_;
  platform::ScopedRNNTensorDescriptor y_seq_desc_;
#endif
  platform::ScopedTensorDescriptor init_h_desc_;
  platform::ScopedTensorDescriptor init_c_desc_;
  platform::ScopedTensorDescriptor last_h_desc_;
  platform::ScopedTensorDescriptor last_c_desc_;
  platform::ScopedDropoutDescriptor dropout_desc_;
  platform::ScopedFilterDescriptor weight_desc_;
  platform::ScopedRNNDescriptor rnn_desc_;
};

template <typename T, typename Type>
bool is_continuous(const Type &weight_list) {
  bool continuous = true;
  for (size_t i = 0; i < weight_list.size() - 1; ++i) {
    auto *in_data = weight_list[i]->template data<T>();
    auto *in_after_data = weight_list[i + 1]->template data<T>();
    auto in_size = weight_list[i]->numel();
    bool temp = in_data + in_size == in_after_data;
    continuous = continuous && temp;
  }
  return continuous;
}

template <typename T>
262
void weight_to_tensor(const platform::Place &place, gpuStream_t stream,
G
Guo Sheng 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
                      const std::vector<const Tensor *> &weight_list,
                      Tensor *weight) {
  auto weight_data = weight->data<T>();
  int weight_offset = 0;
  for (size_t i = 0; i < weight_list.size(); ++i) {
    const T *in_data = weight_list[i]->data<T>();
    auto in_size = weight_list[i]->numel();

    memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, weight->place()),
                 weight_data + weight_offset,
                 BOOST_GET_CONST(platform::CUDAPlace, weight_list[i]->place()),
                 in_data, in_size * sizeof(T), stream);
    weight_offset += in_size;
  }
}

template <typename T>
280
void weight_to_tensor_list(const platform::Place &place, gpuStream_t stream,
G
Guo Sheng 已提交
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
                           std::vector<Tensor *> *weight_grad,
                           const std::vector<const Tensor *> &weight_input,
                           const Tensor *weight) {
  int weight_offset = 0;
  auto *weight_data = weight->data<T>();
  for (size_t i = 0; i < weight_input.size(); ++i) {
    auto in_size = weight_input[i]->numel();
    T *weight_grad_data = (*weight_grad)[i]->mutable_data<T>(place);
    const T *src = weight_data + weight_offset;

    memory::Copy(
        BOOST_GET_CONST(platform::CUDAPlace, (*weight_grad)[i]->place()),
        weight_grad_data, BOOST_GET_CONST(platform::CUDAPlace, weight->place()),
        src, in_size * sizeof(T), stream);
    weight_offset += in_size;
  }
}

template <typename T>
class RNNCudnnKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    const Tensor *x = ctx.Input<Tensor>("Input");
    auto pre_state = ctx.MultiInput<Tensor>("PreState");

    Tensor *out = ctx.Output<Tensor>("Out");
    auto state = ctx.MultiOutput<Tensor>("State");
    Tensor *reserve = ctx.Output<Tensor>("Reserve");
    Tensor *state_out = ctx.Output<Tensor>("DropoutState");

    float dropout_prob = ctx.Attr<float>("dropout_prob");
    bool is_bidirec = ctx.Attr<bool>("is_bidirec");
    int hidden_size = ctx.Attr<int>("hidden_size");
    int num_layers = ctx.Attr<int>("num_layers");
    auto mode = ctx.Attr<std::string>("mode");
316 317 318 319 320 321 322 323 324 325 326
#ifdef PADDLE_WITH_HIP
    miopenRNNMode_t rnn_mode = miopenLSTM;
    if (mode == "LSTM")
      rnn_mode = miopenLSTM;
    else if (mode == "GRU")
      rnn_mode = miopenGRU;
    else if (mode == "RNN_RELU")
      rnn_mode = miopenRNNRELU;
    else if (mode == "RNN_TANH")
      rnn_mode = miopenRNNTANH;
#else
G
Guo Sheng 已提交
327 328 329 330 331 332 333 334 335
    cudnnRNNMode_t rnn_mode = CUDNN_LSTM;
    if (mode == "LSTM")
      rnn_mode = CUDNN_LSTM;
    else if (mode == "GRU")
      rnn_mode = CUDNN_GRU;
    else if (mode == "RNN_RELU")
      rnn_mode = CUDNN_RNN_RELU;
    else if (mode == "RNN_TANH")
      rnn_mode = CUDNN_RNN_TANH;
336
#endif
G
Guo Sheng 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
    else
      PADDLE_THROW(platform::errors::InvalidArgument(
          "rnn_mode should be LSTM, GRU, RNN_RELU or RNN_TANH, but received: "
          "%s.",
          mode));

    bool is_test = ctx.Attr<bool>("is_test");
    int seed = ctx.Attr<int>("seed");
    if (!is_test) {
      int device_id =
          BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()).GetDeviceId();
      auto gen_cuda = framework::GetDefaultCUDAGenerator(device_id);
      if (gen_cuda->GetIsInitPy() && seed == 0) {
        // If perform `manual_seed` in python and inner seed is not specified
        // (equals 0), use global generator generated seed.
        seed = static_cast<int>(gen_cuda->Random64());
      } else if (seed == 0) {
        // use random generated seed
        std::random_device rd;
        seed = rd();
      }  // else use `ctx.Attr<int>("seed")` specified seed
    }

    const T *x_data = x->data<T>();
    const T *init_h_data = pre_state[0]->data<T>();
    const T *init_c_data = nullptr;
    T *out_data = out->mutable_data<T>(ctx.GetPlace());
    T *last_h_data = state[0]->mutable_data<T>(ctx.GetPlace());
    T *last_c_data = nullptr;
366 367 368
#ifdef PADDLE_WITH_HIP
    if (rnn_mode == miopenLSTM) {
#else
G
Guo Sheng 已提交
369
    if (rnn_mode == CUDNN_LSTM) {
370
#endif
G
Guo Sheng 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
      init_c_data = pre_state[1]->data<T>();
      last_c_data = state[1]->mutable_data<T>(ctx.GetPlace());
    }

    bool has_seq_length = ctx.HasInput("SequenceLength");
    std::vector<int> SequenceLength;
    if (has_seq_length) {
      auto *sequence_length = ctx.Input<Tensor>("SequenceLength");
      SequenceLength = operators::GetDataFromTensor<int>(sequence_length);
    }

    auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    auto handle = dev_ctx.cudnn_handle();

    int seq_length = x->dims()[0];
    int batch_size = x->dims()[1];
    int input_size = x->dims()[2];

    size_t workspace_size;
    size_t reserve_size;
    Tensor weight_whole;
    T *w_data = nullptr;
    auto place = ctx.GetPlace();
    auto stream = reinterpret_cast<const platform::CUDADeviceContext &>(
                      ctx.device_context())
                      .stream();
    auto weight_list = ctx.MultiInput<framework::Tensor>("WeightList");
    auto weight_numel = std::accumulate(
        weight_list.begin(), weight_list.end(), 0,
        [](int64_t num, const Tensor *t) { return num + t->numel(); });
    bool continuous =
        is_continuous<T, std::vector<const Tensor *>>(weight_list);
    if (!continuous) {
      LOG_FIRST_N(WARNING, 2)
          << "If the memory space of the Input WeightList is not continuous, "
             "less efficient calculation will be called. Please call "
             "flatten_parameters() to make the input memory continuous.";
      weight_whole.mutable_data<T>({weight_numel}, place);
      weight_to_tensor<T>(place, stream, weight_list, &weight_whole);
      w_data = weight_whole.data<T>();
      if (is_test) {  // maybe also reset small weights' ptr for training
        int offset = 0;
        for (size_t i = 0; i < weight_list.size(); ++i) {
          size_t len = weight_list[i]->numel();
          auto dim = weight_list[i]->dims();
          const_cast<Tensor *>(weight_list[i])
              ->ShareDataWith(
                  weight_whole.Slice(static_cast<int64_t>(offset),
                                     static_cast<int64_t>(offset + len)))
              .Resize(dim);
          offset += len;
        }
      }
    } else {
      w_data = const_cast<T *>(weight_list[0]->data<T>());
    }

    RNNDescriptors rnn(seq_length, batch_size, input_size, hidden_size,
                       num_layers, dropout_prob, seed, weight_numel, rnn_mode,
                       is_bidirec, is_test);
    rnn.Create<T>(handle, ctx.GetPlace(), SequenceLength, &workspace_size,
                  &reserve_size, state_out);

    framework::Tensor workspace_data_;
    workspace_data_.mutable_data<uint8_t>(
        {static_cast<int64_t>(workspace_size)}, ctx.GetPlace());

    auto *reserve_data = reserve->mutable_data<uint8_t>(
        {static_cast<int64_t>(reserve_size)}, ctx.GetPlace());

    if (is_test) {
      RNNInferece(has_seq_length, handle, seq_length, &rnn, x_data, init_h_data,
                  init_c_data, w_data, out_data, last_h_data, last_c_data,
                  &workspace_data_, workspace_size);
    } else {
      if (!has_seq_length) {
447 448 449 450 451 452 453 454 455 456 457
// for train
// This interface is used when the input/output is unpadded.
#ifdef PADDLE_WITH_HIP
        PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenRNNForwardTraining(
            handle, rnn.rnn_desc(), seq_length, rnn.x_descs(), x_data,
            rnn.init_h_desc(), init_h_data, rnn.init_c_desc(), init_c_data,
            rnn.weight_desc(), w_data, rnn.y_descs(), out_data,
            rnn.last_h_desc(), last_h_data, rnn.last_c_desc(), last_c_data,
            workspace_data_.data<uint8_t>(), workspace_size, reserve_data,
            reserve_size));
#else
G
Guo Sheng 已提交
458 459 460 461 462 463 464
        PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNForwardTraining(
            handle, rnn.rnn_desc(), seq_length, rnn.x_descs(), x_data,
            rnn.init_h_desc(), init_h_data, rnn.init_c_desc(), init_c_data,
            rnn.weight_desc(), w_data, rnn.y_descs(), out_data,
            rnn.last_h_desc(), last_h_data, rnn.last_c_desc(), last_c_data,
            workspace_data_.data<uint8_t>(), workspace_size, reserve_data,
            reserve_size));
465
#endif
G
Guo Sheng 已提交
466
      } else {
467
#if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION >= 7201
G
Guo Sheng 已提交
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
        // for train
        // This interface is used when the input/output is padded.
        PADDLE_ENFORCE_CUDA_SUCCESS(
            platform::dynload::cudnnRNNForwardTrainingEx(
                handle, rnn.rnn_desc(), rnn.x_seq_desc(), x_data,
                rnn.init_h_desc(), init_h_data, rnn.init_c_desc(), init_c_data,
                rnn.weight_desc(), w_data, rnn.y_seq_desc(), out_data,
                rnn.last_h_desc(), last_h_data, rnn.last_c_desc(), last_c_data,
                nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
                nullptr, workspace_data_.data<uint8_t>(), workspace_size,
                reserve_data, reserve_size));
#else
        PADDLE_THROW(platform::errors::Unavailable(
            "The padded input is supported by "
            "cudnnRNNForwardTrainingEx, but it only works when "
            "the version of cudnn is larger than 7.2.1"));
#endif
      }
    }
  }

489 490 491
#ifdef PADDLE_WITH_HIP
  void RNNInferece(const bool &has_seq_length, const miopenHandle_t &handle,
#else
G
Guo Sheng 已提交
492
  void RNNInferece(const bool &has_seq_length, const cudnnHandle_t &handle,
493
#endif
G
Guo Sheng 已提交
494 495 496 497 498 499
                   const int &seq_length, RNNDescriptors *rnn, const T *x_data,
                   const T *init_h_data, const T *init_c_data, const T *w_data,
                   T *out_data, T *last_h_data, T *last_c_data,
                   framework::Tensor *workspace_data,
                   const size_t &workspace_size) const {
    if (!has_seq_length) {
500 501 502 503 504 505 506 507 508 509
// for inference
// This interface is used when the input/output is unpadded.
#ifdef PADDLE_WITH_HIP
      PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenRNNForwardInference(
          handle, rnn->rnn_desc(), seq_length, rnn->x_descs(), x_data,
          rnn->init_h_desc(), init_h_data, rnn->init_c_desc(), init_c_data,
          rnn->weight_desc(), w_data, rnn->y_descs(), out_data,
          rnn->last_h_desc(), last_h_data, rnn->last_c_desc(), last_c_data,
          workspace_data->data<uint8_t>(), workspace_size));
#else
G
Guo Sheng 已提交
510 511 512 513 514 515
      PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNForwardInference(
          handle, rnn->rnn_desc(), seq_length, rnn->x_descs(), x_data,
          rnn->init_h_desc(), init_h_data, rnn->init_c_desc(), init_c_data,
          rnn->weight_desc(), w_data, rnn->y_descs(), out_data,
          rnn->last_h_desc(), last_h_data, rnn->last_c_desc(), last_c_data,
          workspace_data->data<uint8_t>(), workspace_size));
516
#endif
G
Guo Sheng 已提交
517
    } else {
518
#if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION >= 7201
G
Guo Sheng 已提交
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
      // for inference
      // This interface is used when the input/output is padded.
      PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNForwardInferenceEx(
          handle, rnn->rnn_desc(), rnn->x_seq_desc(), x_data,
          rnn->init_h_desc(), init_h_data, rnn->init_c_desc(), init_c_data,
          rnn->weight_desc(), w_data, rnn->y_seq_desc(), out_data,
          rnn->last_h_desc(), last_h_data, rnn->last_c_desc(), last_c_data,
          nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
          nullptr, workspace_data->data<uint8_t>(), workspace_size));
#else
      // CUDNN VERSION has to >=7.2.1
      PADDLE_THROW(platform::errors::Unavailable(
          "The padded input is supported by "
          "cudnnRNNForwardInferenceEx, but it only works when "
          "the version of cudnn is larger than 7.2.1"));
#endif
    }
  }
};

template <typename T>
class RNNGradCudnnKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    auto *input = ctx.Input<Tensor>("Input");
    auto pre_state = ctx.MultiInput<Tensor>("PreState");
    auto weight_list = ctx.MultiInput<Tensor>("WeightList");
    auto *state_out = ctx.Input<Tensor>("DropoutState");
    auto *reserve = ctx.Input<Tensor>("Reserve");
    auto *out = ctx.Input<Tensor>("Out");
    // auto state = ctx.MultiInput<Tensor>("State");

    auto *out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
    auto state_grad = ctx.MultiInput<Tensor>(framework::GradVarName("State"));

    auto *in_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
    auto pre_state_grad =
        ctx.MultiOutput<Tensor>(framework::GradVarName("PreState"));
    auto weight_grad_list =
        ctx.MultiOutput<Tensor>(framework::GradVarName("WeightList"));

    float dropout_prob = ctx.Attr<float>("dropout_prob");
    bool is_bidirec = ctx.Attr<bool>("is_bidirec");
    int hidden_size = ctx.Attr<int>("hidden_size");
    int num_layers = ctx.Attr<int>("num_layers");
    auto mode = ctx.Attr<std::string>("mode");
565 566 567 568 569 570 571 572 573 574 575
#ifdef PADDLE_WITH_HIP
    miopenRNNMode_t rnn_mode = miopenLSTM;
    if (mode == "LSTM")
      rnn_mode = miopenLSTM;
    else if (mode == "GRU")
      rnn_mode = miopenGRU;
    else if (mode == "RNN_RELU")
      rnn_mode = miopenRNNRELU;
    else if (mode == "RNN_TANH")
      rnn_mode = miopenRNNTANH;
#else
G
Guo Sheng 已提交
576 577 578 579 580 581 582 583 584
    cudnnRNNMode_t rnn_mode = CUDNN_LSTM;
    if (mode == "LSTM")
      rnn_mode = CUDNN_LSTM;
    else if (mode == "GRU")
      rnn_mode = CUDNN_GRU;
    else if (mode == "RNN_RELU")
      rnn_mode = CUDNN_RNN_RELU;
    else if (mode == "RNN_TANH")
      rnn_mode = CUDNN_RNN_TANH;
585
#endif
G
Guo Sheng 已提交
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
    else
      PADDLE_THROW(platform::errors::InvalidArgument(
          "rnn_mode should be LSTM, GRU, RNN_RELU or RNN_TANH, but received: "
          "%s.",
          mode));
    bool is_test = ctx.Attr<bool>("is_test");
    int seed = ctx.Attr<int>("seed");

    auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    auto handle = dev_ctx.cudnn_handle();

    auto place = ctx.GetPlace();
    auto weight_numel = std::accumulate(
        weight_list.begin(), weight_list.end(), 0,
        [](int64_t num, const Tensor *t) { return num + t->numel(); });
    bool continuous =
        is_continuous<T, std::vector<const Tensor *>>(weight_list);

    auto stream = reinterpret_cast<const platform::CUDADeviceContext &>(
                      ctx.device_context())
                      .stream();
    Tensor weight_whole;
    T *weight_data = nullptr;

    if (!continuous) {
      weight_whole.mutable_data<T>({weight_numel}, place);
      weight_to_tensor<T>(place, stream, weight_list, &weight_whole);
      weight_data = weight_whole.data<T>();
    } else {
      weight_data = const_cast<T *>(weight_list[0]->data<T>());
    }

    Tensor weight_grad;
    math::SetConstant<paddle::platform::CUDADeviceContext, T> zero;
    weight_grad.mutable_data<T>({weight_numel}, ctx.GetPlace());
    zero(dev_ctx, &weight_grad, static_cast<T>(0.0));
    T *weight_grad_data = weight_grad.data<T>();

    int offset = 0;
    for (size_t i = 0; i < weight_grad_list.size(); ++i) {
      size_t len = weight_grad_list[i]->numel();
      auto dim = weight_grad_list[i]->dims();
      weight_grad_list[i]
          ->ShareDataWith(weight_grad.Slice(static_cast<int64_t>(offset),
                                            static_cast<int64_t>(offset + len)))
          .Resize(dim);
      offset += len;
    }

635 636 637 638 639 640
    Tensor input_grad_value;
    if (!in_grad) {
      in_grad = &input_grad_value;
      in_grad->Resize(input->dims());
    }

G
Guo Sheng 已提交
641 642 643 644 645 646 647 648 649 650 651
    auto *init_h_data = pre_state[0]->data<T>();
    // auto *last_h_data = state[0]->data<T>();
    auto *last_h_grad_data = state_grad[0]->data<T>();
    const T *init_c_data = nullptr;
    // const T *last_c_data = nullptr;
    const T *last_c_grad_data = nullptr;
    T *init_h_grad_data =
        pre_state_grad.size() != 0 && pre_state_grad[0]
            ? pre_state_grad[0]->mutable_data<T>(ctx.GetPlace())
            : nullptr;
    T *init_c_grad_data = nullptr;
652 653 654
#ifdef PADDLE_WITH_HIP
    if (rnn_mode == miopenLSTM) {
#else
G
Guo Sheng 已提交
655
    if (rnn_mode == CUDNN_LSTM) {
656
#endif
G
Guo Sheng 已提交
657 658 659 660 661 662 663 664 665 666
      init_c_data = pre_state[1]->data<T>();
      // last_c_data = state[1]->data<T>();
      last_c_grad_data = state_grad[1]->data<T>();
      init_c_grad_data =
          pre_state_grad.size() != 0 && pre_state_grad[1]
              ? pre_state_grad[1]->mutable_data<T>(ctx.GetPlace())
              : nullptr;
    }
    auto *out_data = out->data<T>();
    auto *out_grad_data = out_grad->data<T>();
667 668 669 670 671 672

    // need check exist
    T *in_grad_data = nullptr;
    if (in_grad) {
      in_grad_data = in_grad->mutable_data<T>(ctx.GetPlace());
    }
G
Guo Sheng 已提交
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701

    bool has_seq_length = ctx.HasInput("SequenceLength");
    std::vector<int> SequenceLength;
    if (has_seq_length) {
      auto *sequence_length = ctx.Input<Tensor>("SequenceLength");
      SequenceLength = operators::GetDataFromTensor<int>(sequence_length);
    }

    auto input_dims = input->dims();
    int seq_length = input_dims[0];
    int batch_size = input_dims[1];
    int input_size = input_dims[2];

    size_t workspace_size;
    size_t reserve_size;

    RNNDescriptors rnn(seq_length, batch_size, input_size, hidden_size,
                       num_layers, dropout_prob, seed, weight_numel, rnn_mode,
                       is_bidirec, is_test);

    rnn.Create<T>(handle, ctx.GetPlace(), SequenceLength, &workspace_size,
                  &reserve_size, const_cast<Tensor *>(state_out));

    framework::Tensor workspace_data_;
    workspace_data_.mutable_data<uint8_t>(
        {static_cast<int64_t>(workspace_size)}, ctx.GetPlace());
    const uint8_t *reserve_data = reserve->data<uint8_t>();

    if (!has_seq_length) {
702
      if (in_grad) {
703 704 705 706 707 708 709 710 711 712 713
#ifdef PADDLE_WITH_HIP
        PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenRNNBackwardData(
            handle, rnn.rnn_desc(), seq_length, rnn.y_descs(), out_data,
            rnn.y_descs(), out_grad_data, rnn.last_h_desc(), last_h_grad_data,
            rnn.last_c_desc(), last_c_grad_data, rnn.weight_desc(), weight_data,
            rnn.init_h_desc(), init_h_data, rnn.init_c_desc(), init_c_data,
            rnn.x_descs(), in_grad_data, rnn.init_h_desc(), init_h_grad_data,
            rnn.init_c_desc(), init_c_grad_data,
            workspace_data_.data<uint8_t>(), workspace_size,
            const_cast<uint8_t *>(reserve_data), reserve_size));
#else
714 715 716 717 718 719 720 721 722 723
        // This interface is used when the input/output is unpadded.
        PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNBackwardData(
            handle, rnn.rnn_desc(), seq_length, rnn.y_descs(), out_data,
            rnn.y_descs(), out_grad_data, rnn.last_h_desc(), last_h_grad_data,
            rnn.last_c_desc(), last_c_grad_data, rnn.weight_desc(), weight_data,
            rnn.init_h_desc(), init_h_data, rnn.init_c_desc(), init_c_data,
            rnn.x_descs(), in_grad_data, rnn.init_h_desc(), init_h_grad_data,
            rnn.init_c_desc(), init_c_grad_data,
            workspace_data_.data<uint8_t>(), workspace_size,
            const_cast<uint8_t *>(reserve_data), reserve_size));
724
#endif
725 726
      }
      if (!weight_grad_list.empty()) {
727 728 729 730 731 732 733 734
#ifdef PADDLE_WITH_HIP
        PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenRNNBackwardWeights(
            handle, rnn.rnn_desc(), seq_length, rnn.x_descs(), input->data<T>(),
            rnn.init_h_desc(), init_h_data, rnn.y_descs(), out->data<T>(),
            rnn.weight_desc(), weight_grad_data,
            workspace_data_.data<uint8_t>(), workspace_size,
            const_cast<uint8_t *>(reserve_data), reserve_size));
#else
735 736 737 738 739 740
        PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNBackwardWeights(
            handle, rnn.rnn_desc(), seq_length, rnn.x_descs(), input->data<T>(),
            rnn.init_h_desc(), init_h_data, rnn.y_descs(), out->data<T>(),
            workspace_data_.data<uint8_t>(), workspace_size, rnn.weight_desc(),
            weight_grad_data, const_cast<uint8_t *>(reserve_data),
            reserve_size));
741
#endif
742
      }
G
Guo Sheng 已提交
743
    } else {
744
#if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION >= 7201
G
Guo Sheng 已提交
745 746
      // for train
      // This interface is used when the input/output is padded.
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768
      if (in_grad) {
        PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNBackwardDataEx(
            handle, rnn.rnn_desc(), rnn.y_seq_desc(), out_data,
            rnn.y_seq_desc(), out_grad_data, nullptr, nullptr,
            rnn.last_h_desc(), last_h_grad_data, rnn.last_c_desc(),
            last_c_grad_data, rnn.weight_desc(), weight_data, rnn.init_h_desc(),
            init_h_data, rnn.init_c_desc(), init_c_data, rnn.x_seq_desc(),
            in_grad_data, rnn.init_h_desc(), init_h_grad_data,
            rnn.init_c_desc(), init_c_grad_data, nullptr, nullptr,
            workspace_data_.data<uint8_t>(), workspace_size,
            const_cast<uint8_t *>(reserve_data), reserve_size));
      }

      if (!weight_grad_list.empty()) {
        PADDLE_ENFORCE_CUDA_SUCCESS(
            platform::dynload::cudnnRNNBackwardWeightsEx(
                handle, rnn.rnn_desc(), rnn.x_seq_desc(), input->data<T>(),
                rnn.init_h_desc(), init_h_data, rnn.y_seq_desc(),
                out->data<T>(), workspace_data_.data<uint8_t>(), workspace_size,
                rnn.weight_desc(), weight_grad_data,
                const_cast<uint8_t *>(reserve_data), reserve_size));
      }
G
Guo Sheng 已提交
769 770 771 772 773 774 775 776 777 778 779 780 781 782
#else
      PADDLE_THROW(platform::errors::Unavailable(
          "The padded input of rnn is supported by cudnnRNNBackwardDataEx, "
          "cudnnRNNBackwardWeightsEx, but it only works when the version "
          "of cudnn is larger than 7.2.1"));
#endif
    }
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
783 784 785 786 787
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
REGISTER_OP_CUDA_KERNEL(rnn, ops::RNNCudnnKernel<float>);
REGISTER_OP_CUDA_KERNEL(rnn_grad, ops::RNNGradCudnnKernel<float>);
#else
G
Guo Sheng 已提交
788 789 790 791
REGISTER_OP_CUDA_KERNEL(rnn, ops::RNNCudnnKernel<float>,
                        ops::RNNCudnnKernel<double>);
REGISTER_OP_CUDA_KERNEL(rnn_grad, ops::RNNGradCudnnKernel<float>,
                        ops::RNNGradCudnnKernel<double>);
792
#endif