gru_compute.cu 7.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
guosheng 已提交
2 3 4 5 6 7 8 9 10 11
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yu Yang 已提交
12
#include <paddle/fluid/platform/device_context.h>
Y
Yu Yang 已提交
13
#include "paddle/fluid/operators/math/blas.h"
Y
Yi Wang 已提交
14 15 16
#include "paddle/fluid/operators/math/detail/gru_gpu_kernel.h"
#include "paddle/fluid/operators/math/detail/gru_kernel.h"
#include "paddle/fluid/operators/math/gru_compute.h"
G
guosheng 已提交
17 18 19 20 21 22

namespace paddle {
namespace operators {
namespace math {

template <typename T>
Q
QI JUN 已提交
23 24
struct GRUUnitFunctor<platform::CUDADeviceContext, T> {
  static void compute(const platform::CUDADeviceContext &context,
25
                      GRUMetaValue<T> value, int frame_size, int batch_size,
26
                      const detail::ActivationType active_node,
Q
Qiao Longfei 已提交
27 28
                      const detail::ActivationType active_gate,
                      bool origin_mode) {
Q
QI JUN 已提交
29
    auto stream = context.stream();
G
guosheng 已提交
30 31
    dim3 threads;
    dim3 grid;
G
guosheng 已提交
32 33 34 35 36
    if (batch_size == 1) {
      int frame_per_block = frame_size <= 1024 ? frame_size : 1024;
      int frame_blocks = (frame_size + 1024 - 1) / 1024;
      threads = dim3(frame_per_block, 1);
      grid = dim3(frame_blocks, 1);
G
guosheng 已提交
37 38
    } else {
      threads = dim3(32, 32);
G
guosheng 已提交
39
      grid = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32);
G
guosheng 已提交
40
    }
Y
Yu Yang 已提交
41
    auto blas = math::GetBlas<platform::CUDADeviceContext, T>(context);
G
guosheng 已提交
42
    if (value.prev_out_value) {
Y
Yu Yang 已提交
43 44 45
      blas.GEMM(false, false, batch_size, frame_size * 2, frame_size, 1,
                value.prev_out_value, frame_size, value.gate_weight,
                frame_size * 2, 1, value.gate_value, frame_size * 3);
G
guosheng 已提交
46 47
    }

G
guosheng 已提交
48
    if (batch_size == 1) {
G
guosheng 已提交
49
      detail::KeGruForwardResetOutput<detail::forward::gru_resetOutput<T>,
G
guosheng 已提交
50
                                      /* is_batch= */ false,
G
guosheng 已提交
51
                                      T><<<grid, threads, 0, stream>>>(
G
guosheng 已提交
52 53 54
          detail::forward::gru_resetOutput<T>(), value.gate_value,
          value.reset_output_value, value.prev_out_value, frame_size,
          batch_size, active_gate);
G
guosheng 已提交
55 56
    } else {
      detail::KeGruForwardResetOutput<detail::forward::gru_resetOutput<T>,
G
guosheng 已提交
57
                                      /* is_batch= */ true,
G
guosheng 已提交
58
                                      T><<<grid, threads, 0, stream>>>(
G
guosheng 已提交
59 60 61
          detail::forward::gru_resetOutput<T>(), value.gate_value,
          value.reset_output_value, value.prev_out_value, frame_size,
          batch_size, active_gate);
G
guosheng 已提交
62 63
    }

G
guosheng 已提交
64
    if (value.prev_out_value) {
Y
Yu Yang 已提交
65 66 67 68
      blas.GEMM(false, false, batch_size, frame_size, frame_size, 1,
                value.reset_output_value, frame_size, value.state_weight,
                frame_size, 1, value.gate_value + frame_size * 2,
                frame_size * 3);
G
guosheng 已提交
69 70
    }

G
guosheng 已提交
71
    if (batch_size == 1) {
G
guosheng 已提交
72
      detail::KeGruForwardFinalOutput<detail::forward::gru_finalOutput<T>,
G
guosheng 已提交
73
                                      /* is_batch= */ false,
G
guosheng 已提交
74
                                      T><<<grid, threads, 0, stream>>>(
G
guosheng 已提交
75 76
          detail::forward::gru_finalOutput<T>(), value.gate_value,
          value.prev_out_value, value.output_value, frame_size, batch_size,
Q
Qiao Longfei 已提交
77
          active_node, origin_mode);
G
guosheng 已提交
78 79
    } else {
      detail::KeGruForwardFinalOutput<detail::forward::gru_finalOutput<T>,
G
guosheng 已提交
80
                                      /* is_batch= */ true,
G
guosheng 已提交
81
                                      T><<<grid, threads, 0, stream>>>(
G
guosheng 已提交
82 83
          detail::forward::gru_finalOutput<T>(), value.gate_value,
          value.prev_out_value, value.output_value, frame_size, batch_size,
Q
Qiao Longfei 已提交
84
          active_node, origin_mode);
G
guosheng 已提交
85 86 87 88 89
    }
  }
};

template <typename T>
Q
QI JUN 已提交
90 91
struct GRUUnitGradFunctor<platform::CUDADeviceContext, T> {
  static void compute(const platform::CUDADeviceContext &context,
92
                      GRUMetaValue<T> value, GRUMetaGrad<T> grad,
G
guosheng 已提交
93
                      int frame_size, int batch_size,
94 95
                      const detail::ActivationType active_node,
                      const detail::ActivationType active_gate) {
Q
QI JUN 已提交
96
    auto stream = context.stream();
G
guosheng 已提交
97 98
    dim3 threads;
    dim3 grid;
G
guosheng 已提交
99 100 101 102 103
    if (batch_size == 1) {
      int frame_per_block = frame_size <= 1024 ? frame_size : 1024;
      int frame_blocks = (frame_size + 1024 - 1) / 1024;
      threads = dim3(frame_per_block, 1);
      grid = dim3(frame_blocks, 1);
G
guosheng 已提交
104 105
    } else {
      threads = dim3(32, 32);
G
guosheng 已提交
106
      grid = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32);
G
guosheng 已提交
107 108
    }

G
guosheng 已提交
109
    if (batch_size == 1) {
G
guosheng 已提交
110 111
      detail::KeGruBackwardStateGrad<
          detail::backward::gru_stateGrad<T>,
G
guosheng 已提交
112 113 114 115
          /* is_batch= */ false><<<grid, threads, 0, stream>>>(
          detail::backward::gru_stateGrad<T>(), value.gate_value,
          grad.gate_grad, value.prev_out_value, grad.prev_out_grad,
          grad.output_grad, frame_size, batch_size, active_node);
G
guosheng 已提交
116 117 118
    } else {
      detail::KeGruBackwardStateGrad<
          detail::backward::gru_stateGrad<T>,
G
guosheng 已提交
119 120 121 122
          /* is_batch= */ true><<<grid, threads, 0, stream>>>(
          detail::backward::gru_stateGrad<T>(), value.gate_value,
          grad.gate_grad, value.prev_out_value, grad.prev_out_grad,
          grad.output_grad, frame_size, batch_size, active_node);
G
guosheng 已提交
123 124
    }

Y
Yu Yang 已提交
125 126
    auto blas = math::GetBlas<platform::CUDADeviceContext, T>(context);

G
guosheng 已提交
127
    if (value.prev_out_value && grad.prev_out_grad) {
Y
Yu Yang 已提交
128 129 130 131
      blas.GEMM(false, true, batch_size, frame_size, frame_size, 1,
                grad.gate_grad + frame_size * 2, frame_size * 3,
                value.state_weight, frame_size, 0, grad.reset_output_grad,
                frame_size);
G
guosheng 已提交
132

G
guosheng 已提交
133
      if (grad.state_weight_grad) {
Y
Yu Yang 已提交
134 135 136 137
        blas.GEMM(true, false, frame_size, frame_size, batch_size, 1,
                  value.reset_output_value, frame_size,
                  grad.gate_grad + frame_size * 2, frame_size * 3, 1,
                  grad.state_weight_grad, frame_size);
G
guosheng 已提交
138 139 140
      }
    }

G
guosheng 已提交
141
    if (batch_size == 1) {
G
guosheng 已提交
142 143
      detail::KeGruBackwardResetGrad<
          detail::backward::gru_resetGrad<T>,
G
guosheng 已提交
144 145 146 147
          /* is_batch= */ false><<<grid, threads, 0, stream>>>(
          detail::backward::gru_resetGrad<T>(), value.gate_value,
          grad.gate_grad, value.prev_out_value, grad.prev_out_grad,
          grad.reset_output_grad, frame_size, batch_size, active_gate);
G
guosheng 已提交
148 149 150
    } else {
      detail::KeGruBackwardResetGrad<
          detail::backward::gru_resetGrad<T>,
G
guosheng 已提交
151 152 153 154
          /* is_batch= */ true><<<grid, threads, 0, stream>>>(
          detail::backward::gru_resetGrad<T>(), value.gate_value,
          grad.gate_grad, value.prev_out_value, grad.prev_out_grad,
          grad.reset_output_grad, frame_size, batch_size, active_gate);
G
guosheng 已提交
155 156
    }

G
guosheng 已提交
157
    if (grad.prev_out_grad && value.prev_out_value) {
Y
Yu Yang 已提交
158 159 160
      blas.GEMM(false, true, batch_size, frame_size, frame_size * 2, 1,
                grad.gate_grad, frame_size * 3, value.gate_weight,
                frame_size * 2, 1, grad.prev_out_grad, frame_size);
G
guosheng 已提交
161

G
guosheng 已提交
162
      if (grad.gate_weight_grad) {
Y
Yu Yang 已提交
163 164 165
        blas.GEMM(true, false, frame_size, frame_size * 2, batch_size, 1,
                  value.prev_out_value, frame_size, grad.gate_grad,
                  frame_size * 3, 1, grad.gate_weight_grad, frame_size * 2);
G
guosheng 已提交
166 167 168 169 170
      }
    }
  }
};

Q
QI JUN 已提交
171 172 173 174
template struct GRUUnitFunctor<platform::CUDADeviceContext, float>;
template struct GRUUnitFunctor<platform::CUDADeviceContext, double>;
template struct GRUUnitGradFunctor<platform::CUDADeviceContext, float>;
template struct GRUUnitGradFunctor<platform::CUDADeviceContext, double>;
G
guosheng 已提交
175 176 177

}  // namespace math
}  // namespace operators
G
guosheng 已提交
178
}  // namespace paddle