gru_unit_op.h 10.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
guosheng 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
G
guosheng 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
G
guosheng 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
G
guosheng 已提交
14 15 16

#pragma once

Y
Yi Wang 已提交
17 18
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
Y
Yu Yang 已提交
19 20
#include "paddle/fluid/operators/activation_op.h"
#include "paddle/fluid/operators/math/blas.h"
G
guosheng 已提交
21 22 23 24 25 26 27 28 29

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;

D
dzhwinter 已提交
30 31 32 33
template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;

34 35
enum GRUActivationType { identity = 0, sigmoid = 1, tanh = 2, relu = 3 };

Q
QI JUN 已提交
36
template <typename DeviceContext, typename T>
37
class GRUUnitKernel : public framework::OpKernel<T> {
G
guosheng 已提交
38
 public:
39 40 41 42 43 44 45 46 47 48 49
  template <typename Device, typename X, typename Y>
  void ActCompute(const int act_type, const Device& d, X x, Y y) const {
    if (act_type == identity)
      y.device(d) = x;
    else if (act_type == sigmoid)
      SigmoidFunctor<T>()(d, x, y);
    else if (act_type == tanh)
      TanhFunctor<T>()(d, x, y);
    else if (act_type == relu)
      ReluFunctor<T>()(d, x, y);
    else
50 51 52
      PADDLE_THROW(platform::errors::Unimplemented(
          "Unsupported activation type, only supports identity, sigmoid, tanh "
          "and relu."));
53 54
  }

G
guosheng 已提交
55
  void Compute(const framework::ExecutionContext& context) const override {
56 57 58 59 60
    auto* input = context.Input<Tensor>("Input");
    auto* hidden_prev = context.Input<Tensor>("HiddenPrev");
    auto* weight = context.Input<Tensor>("Weight");
    auto* bias = context.Input<Tensor>("Bias");
    auto* gate = context.Output<Tensor>("Gate");
G
guosheng 已提交
61
    gate->mutable_data<T>(context.GetPlace());
62
    auto* reset_hidden_prev = context.Output<Tensor>("ResetHiddenPrev");
G
guosheng 已提交
63
    reset_hidden_prev->mutable_data<T>(context.GetPlace());
64
    auto* hidden = context.Output<Tensor>("Hidden");
G
guosheng 已提交
65 66 67 68 69 70 71 72 73 74
    hidden->mutable_data<T>(context.GetPlace());

    int batch_size = input->dims()[0];
    int frame_size = hidden_prev->dims()[1];

    auto x = EigenMatrix<T>::From(*input);
    auto h_p = EigenMatrix<T>::From(*hidden_prev);
    auto g = EigenMatrix<T>::From(*gate);
    auto r_h_p = EigenMatrix<T>::From(*reset_hidden_prev);
    auto h = EigenMatrix<T>::From(*hidden);
Q
QI JUN 已提交
75 76
    auto& place =
        *context.template device_context<DeviceContext>().eigen_device();
G
guosheng 已提交
77 78

    // calculate unactivated gate outputs
G
guosheng 已提交
79 80 81 82 83 84 85 86
    if (bias) {
      auto b = EigenMatrix<T>::From(*bias);
      g.device(place) = x +
                        b.reshape(Eigen::array<int, 2>({{1, frame_size * 3}}))
                            .broadcast(Eigen::array<int, 2>({{batch_size, 1}}));
    } else {
      g.device(place) = x;
    }
G
guosheng 已提交
87 88 89 90
    const T* hidden_prev_data = hidden_prev->data<T>();
    const T* weight_data = weight->data<T>();
    T* gate_data = gate->data<T>();
    T* reset_hidden_prev_data = reset_hidden_prev->data<T>();
Y
Yu Yang 已提交
91 92 93 94
    auto blas = math::GetBlas<DeviceContext, T>(context);
    blas.GEMM(false, false, batch_size, 2 * frame_size, frame_size, 1,
              hidden_prev_data, frame_size, weight_data, frame_size * 2, 1,
              gate_data, frame_size * 3);
G
guosheng 已提交
95 96

    // calculate activited gate
97 98
    Eigen::array<int, 2> extents{{batch_size, frame_size}};
    Eigen::array<int, 2> u_offsets{{0, 0}};
99 100
    ActCompute(context.Attr<int>("gate_activation"), place,
               g.slice(u_offsets, extents), g.slice(u_offsets, extents));
G
guosheng 已提交
101
    auto u = g.slice(u_offsets, extents);  // update gate
102
    Eigen::array<int, 2> r_offsets{{0, frame_size}};
103 104
    ActCompute(context.Attr<int>("gate_activation"), place,
               g.slice(r_offsets, extents), g.slice(r_offsets, extents));
G
guosheng 已提交
105 106
    auto r = g.slice(r_offsets, extents);  // reset gate
    r_h_p.device(place) = r * h_p;         // reset previous hidden state
Y
Yu Yang 已提交
107 108 109 110
    blas.GEMM(false, false, batch_size, frame_size, frame_size, 1,
              reset_hidden_prev_data, frame_size,
              weight_data + frame_size * frame_size * 2, frame_size, 1,
              gate_data + frame_size * 2, frame_size * 3);
G
guosheng 已提交
111

112
    Eigen::array<int, 2> c_offsets{{0, frame_size * 2}};
113 114
    ActCompute(context.Attr<int>("activation"), place,
               g.slice(c_offsets, extents), g.slice(c_offsets, extents));
G
guosheng 已提交
115 116 117
    auto c = g.slice(c_offsets, extents);  // output candidate

    // calculate final output
Q
Qiao Longfei 已提交
118
    if (context.Attr<bool>("origin_mode")) {
Q
Qiao Longfei 已提交
119 120 121 122
      h.device(place) = c + u * (h_p - c);  // (1 - u) * c + u * h_p
    } else {
      h.device(place) = u * (c - h_p) + h_p;  // u * c + (1 - u) * h_p
    }
G
guosheng 已提交
123 124 125
  }
};

Q
QI JUN 已提交
126
template <typename DeviceContext, typename T>
127
class GRUUnitGradKernel : public framework::OpKernel<T> {
G
guosheng 已提交
128
 public:
129 130 131 132 133 134 135 136 137 138 139 140 141
  template <typename Device, typename X, typename Y, typename DX, typename DY>
  void ActGradCompute(const int act_type, const Device& d, X x, Y y, DX dx,
                      DY dy) const {
    // x is dummy and won't be used even in Relu(use y instead)
    if (act_type == identity)
      dx.device(d) = dy;
    else if (act_type == sigmoid)
      SigmoidGradFunctor<T>()(d, x, y, dy, dx);
    else if (act_type == tanh)
      TanhGradFunctor<T>()(d, x, y, dy, dx);
    else if (act_type == relu)
      ReluGradFunctor<T>()(d, x, y, dy, dx);
    else
142 143 144
      PADDLE_THROW(platform::errors::Unimplemented(
          "Unsupported activation type, only supports identity, sigmoid, tanh "
          "and relu."));
145 146
  }

G
guosheng 已提交
147
  void Compute(const framework::ExecutionContext& context) const override {
148 149 150 151 152 153 154
    auto* input = context.Input<Tensor>("Input");
    auto* hidden_prev = context.Input<Tensor>("HiddenPrev");
    auto* weight = context.Input<Tensor>("Weight");
    auto* gate = context.Input<Tensor>("Gate");
    auto* reset_hidden_prev = context.Input<Tensor>("ResetHiddenPrev");
    auto* hidden_grad = context.Input<Tensor>(framework::GradVarName("Hidden"));
    auto* input_grad = context.Output<Tensor>(framework::GradVarName("Input"));
G
guosheng 已提交
155
    auto* hidden_prev_grad =
156
        context.Output<Tensor>(framework::GradVarName("HiddenPrev"));
G
guosheng 已提交
157
    auto* weight_grad =
158 159
        context.Output<Tensor>(framework::GradVarName("Weight"));
    auto* bias_grad = context.Output<Tensor>(framework::GradVarName("Bias"));
G
guosheng 已提交
160 161 162 163 164
    Tensor gate_grad;
    Tensor reset_hidden_prev_grad;

    const T* hidden_prev_data = hidden_prev->data<T>();
    const T* weight_data = weight->data<T>();
165 166
    T* gate_grad_data =
        gate_grad.mutable_data<T>(input->dims(), context.GetPlace());
G
guosheng 已提交
167
    const T* reset_hidden_prev_data = reset_hidden_prev->data<T>();
168 169
    T* reset_hidden_prev_grad_data = reset_hidden_prev_grad.mutable_data<T>(
        reset_hidden_prev->dims(), context.GetPlace());
G
guosheng 已提交
170 171 172 173 174 175

    auto h_p = EigenMatrix<T>::From(*hidden_prev);
    auto g = EigenMatrix<T>::From(*gate);
    auto d_h = EigenMatrix<T>::From(*hidden_grad);
    auto d_g = EigenMatrix<T>::From(gate_grad);
    auto d_r_h_p = EigenMatrix<T>::From(reset_hidden_prev_grad);
Q
QI JUN 已提交
176 177
    auto& place =
        *context.template device_context<DeviceContext>().eigen_device();
G
guosheng 已提交
178

179 180 181
    int batch_size = input->dims()[0];
    int frame_size = hidden_prev->dims()[1];

182 183
    Eigen::array<int, 2> extents{{batch_size, frame_size}};
    Eigen::array<int, 2> u_offsets{{0, 0}};
G
guosheng 已提交
184
    auto u = g.slice(u_offsets, extents);  // update gate
185
    Eigen::array<int, 2> r_offsets{{0, frame_size}};
G
guosheng 已提交
186
    auto r = g.slice(r_offsets, extents);  // reset gate
187
    Eigen::array<int, 2> c_offsets{{0, frame_size * 2}};
G
guosheng 已提交
188 189 190
    auto c = g.slice(c_offsets, extents);  // output candidate

    // backward for unactivated update gate
Q
Qiao Longfei 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203
    if (context.Attr<bool>("origin_mode")) {
      ActGradCompute(context.Attr<int>("gate_activation"), place, u, u,
                     d_g.slice(u_offsets, extents), d_h * (h_p - c));
      // backward for unactivated output candidate
      ActGradCompute(context.Attr<int>("activation"), place, c, c,
                     d_g.slice(c_offsets, extents), d_h * (1 - u));
    } else {
      ActGradCompute(context.Attr<int>("gate_activation"), place, u, u,
                     d_g.slice(u_offsets, extents), d_h * (c - h_p));
      // backward for unactivated output candidate
      ActGradCompute(context.Attr<int>("activation"), place, c, c,
                     d_g.slice(c_offsets, extents), d_h * u);
    }
G
guosheng 已提交
204
    // backward for reset_hidden_prev
Y
Yu Yang 已提交
205 206 207 208 209
    auto blas = math::GetBlas<DeviceContext, T>(context);
    blas.GEMM(false, true, batch_size, frame_size, frame_size, 1,
              gate_grad_data + frame_size * 2, frame_size * 3,
              weight_data + frame_size * frame_size * 2, frame_size, 0,
              reset_hidden_prev_grad_data, frame_size);
G
guosheng 已提交
210
    // backward for unactivated reset gate
211 212
    ActGradCompute(context.Attr<int>("gate_activation"), place, r, r,
                   d_g.slice(r_offsets, extents), d_r_h_p * h_p);
213 214 215 216
    // backward for weight
    if (weight_grad) {
      T* weight_grad_data = weight_grad->mutable_data<T>(context.GetPlace());
      // backward for state_weight
Y
Yu Yang 已提交
217 218 219 220
      blas.GEMM(true, false, frame_size, frame_size, batch_size, 1,
                reset_hidden_prev_data, frame_size,
                gate_grad_data + frame_size * 2, frame_size * 3, 0,
                weight_grad_data + frame_size * frame_size * 2, frame_size);
221 222

      // backward for update_gate_weight and reset_gate_weight
Y
Yu Yang 已提交
223 224 225
      blas.GEMM(true, false, frame_size, frame_size * 2, batch_size, 1,
                hidden_prev_data, frame_size, gate_grad_data, frame_size * 3, 0,
                weight_grad_data, frame_size * 2);
226
    }
G
guosheng 已提交
227
    // backward for hidden_prev
228 229 230 231
    if (hidden_prev_grad) {
      T* hidden_prev_grad_data =
          hidden_prev_grad->mutable_data<T>(context.GetPlace());
      auto d_h_p = EigenMatrix<T>::From(*hidden_prev_grad);
Q
Qiao Longfei 已提交
232
      if (context.Attr<bool>("origin_mode")) {
Q
Qiao Longfei 已提交
233
        d_h_p.device(place) = d_r_h_p * r + d_h * u;
Q
Qiao Longfei 已提交
234
      } else {
Q
Qiao Longfei 已提交
235
        d_h_p.device(place) = d_r_h_p * r + d_h * (1 - u);
Q
Qiao Longfei 已提交
236
      }
Y
Yu Yang 已提交
237 238 239
      blas.GEMM(false, true, batch_size, frame_size, frame_size * 2, 1,
                gate_grad_data, frame_size * 3, weight_data, frame_size * 2, 1,
                hidden_prev_grad_data, frame_size);
240
    }
G
guosheng 已提交
241
    // backward for input
242 243 244 245 246
    if (input_grad) {
      input_grad->mutable_data<T>(context.GetPlace());
      auto d_x = EigenMatrix<T>::From(*input_grad);
      d_x.device(place) = d_g;
    }
G
guosheng 已提交
247
    // backward for bias
G
guosheng 已提交
248 249
    if (bias_grad) {
      bias_grad->mutable_data<T>(context.GetPlace());
D
dzhwinter 已提交
250
      auto d_b = EigenVector<T>::Flatten(*bias_grad);
G
guosheng 已提交
251 252
      d_b.device(place) = d_g.sum(Eigen::array<int, 1>({{0}}));
    }
G
guosheng 已提交
253 254 255 256 257
  }
};

}  // namespace operators
}  // namespace paddle