gru_unit_op.h 10.3 KB
Newer Older
G
guosheng 已提交
1 2
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
G
guosheng 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
G
guosheng 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
G
guosheng 已提交
14 15 16

#pragma once

Y
Yi Wang 已提交
17 18
#include "paddle/fluid/operators/activation_op.h"
#include "paddle/fluid/operators/math/math_function.h"
G
guosheng 已提交
19

Y
Yi Wang 已提交
20 21
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
G
guosheng 已提交
22 23 24 25 26 27 28 29 30

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;

D
dzhwinter 已提交
31 32 33 34
template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;

35 36
enum GRUActivationType { identity = 0, sigmoid = 1, tanh = 2, relu = 3 };

Q
QI JUN 已提交
37
template <typename DeviceContext, typename T>
38
class GRUUnitKernel : public framework::OpKernel<T> {
G
guosheng 已提交
39
 public:
40 41 42 43 44 45 46 47 48 49 50 51 52 53
  template <typename Device, typename X, typename Y>
  void ActCompute(const int act_type, const Device& d, X x, Y y) const {
    if (act_type == identity)
      y.device(d) = x;
    else if (act_type == sigmoid)
      SigmoidFunctor<T>()(d, x, y);
    else if (act_type == tanh)
      TanhFunctor<T>()(d, x, y);
    else if (act_type == relu)
      ReluFunctor<T>()(d, x, y);
    else
      PADDLE_THROW("unsupported activation type");
  }

G
guosheng 已提交
54
  void Compute(const framework::ExecutionContext& context) const override {
55 56 57 58 59
    auto* input = context.Input<Tensor>("Input");
    auto* hidden_prev = context.Input<Tensor>("HiddenPrev");
    auto* weight = context.Input<Tensor>("Weight");
    auto* bias = context.Input<Tensor>("Bias");
    auto* gate = context.Output<Tensor>("Gate");
G
guosheng 已提交
60
    gate->mutable_data<T>(context.GetPlace());
61
    auto* reset_hidden_prev = context.Output<Tensor>("ResetHiddenPrev");
G
guosheng 已提交
62
    reset_hidden_prev->mutable_data<T>(context.GetPlace());
63
    auto* hidden = context.Output<Tensor>("Hidden");
G
guosheng 已提交
64 65 66 67 68 69 70 71 72 73
    hidden->mutable_data<T>(context.GetPlace());

    int batch_size = input->dims()[0];
    int frame_size = hidden_prev->dims()[1];

    auto x = EigenMatrix<T>::From(*input);
    auto h_p = EigenMatrix<T>::From(*hidden_prev);
    auto g = EigenMatrix<T>::From(*gate);
    auto r_h_p = EigenMatrix<T>::From(*reset_hidden_prev);
    auto h = EigenMatrix<T>::From(*hidden);
Q
QI JUN 已提交
74 75
    auto& place =
        *context.template device_context<DeviceContext>().eigen_device();
G
guosheng 已提交
76 77

    // calculate unactivated gate outputs
G
guosheng 已提交
78 79 80 81 82 83 84 85
    if (bias) {
      auto b = EigenMatrix<T>::From(*bias);
      g.device(place) = x +
                        b.reshape(Eigen::array<int, 2>({{1, frame_size * 3}}))
                            .broadcast(Eigen::array<int, 2>({{batch_size, 1}}));
    } else {
      g.device(place) = x;
    }
G
guosheng 已提交
86 87 88 89
    const T* hidden_prev_data = hidden_prev->data<T>();
    const T* weight_data = weight->data<T>();
    T* gate_data = gate->data<T>();
    T* reset_hidden_prev_data = reset_hidden_prev->data<T>();
Q
QI JUN 已提交
90 91 92 93
    math::gemm<DeviceContext, T>(
        context.template device_context<DeviceContext>(), false, false,
        batch_size, 2 * frame_size, frame_size, 1, hidden_prev_data, frame_size,
        weight_data, frame_size * 2, 1, gate_data, frame_size * 3);
G
guosheng 已提交
94 95 96 97

    // calculate activited gate
    Eigen::array<int, 2> extents({{batch_size, frame_size}});
    Eigen::array<int, 2> u_offsets({{0, 0}});
98 99
    ActCompute(context.Attr<int>("gate_activation"), place,
               g.slice(u_offsets, extents), g.slice(u_offsets, extents));
G
guosheng 已提交
100 101
    auto u = g.slice(u_offsets, extents);  // update gate
    Eigen::array<int, 2> r_offsets({{0, frame_size}});
102 103
    ActCompute(context.Attr<int>("gate_activation"), place,
               g.slice(r_offsets, extents), g.slice(r_offsets, extents));
G
guosheng 已提交
104 105
    auto r = g.slice(r_offsets, extents);  // reset gate
    r_h_p.device(place) = r * h_p;         // reset previous hidden state
Q
QI JUN 已提交
106 107 108 109 110
    math::gemm<DeviceContext, T>(
        context.template device_context<DeviceContext>(), false, false,
        batch_size, frame_size, frame_size, 1, reset_hidden_prev_data,
        frame_size, weight_data + frame_size * frame_size * 2, frame_size, 1,
        gate_data + frame_size * 2, frame_size * 3);
G
guosheng 已提交
111 112

    Eigen::array<int, 2> c_offsets({{0, frame_size * 2}});
113 114
    ActCompute(context.Attr<int>("activation"), place,
               g.slice(c_offsets, extents), g.slice(c_offsets, extents));
G
guosheng 已提交
115 116 117
    auto c = g.slice(c_offsets, extents);  // output candidate

    // calculate final output
G
guosheng 已提交
118
    h.device(place) = u * (c - h_p) + h_p;
G
guosheng 已提交
119 120 121
  }
};

Q
QI JUN 已提交
122
template <typename DeviceContext, typename T>
123
class GRUUnitGradKernel : public framework::OpKernel<T> {
G
guosheng 已提交
124
 public:
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
  template <typename Device, typename X, typename Y, typename DX, typename DY>
  void ActGradCompute(const int act_type, const Device& d, X x, Y y, DX dx,
                      DY dy) const {
    // x is dummy and won't be used even in Relu(use y instead)
    if (act_type == identity)
      dx.device(d) = dy;
    else if (act_type == sigmoid)
      SigmoidGradFunctor<T>()(d, x, y, dy, dx);
    else if (act_type == tanh)
      TanhGradFunctor<T>()(d, x, y, dy, dx);
    else if (act_type == relu)
      ReluGradFunctor<T>()(d, x, y, dy, dx);
    else
      PADDLE_THROW("unsupported activation type");
  }

G
guosheng 已提交
141
  void Compute(const framework::ExecutionContext& context) const override {
142 143 144 145 146 147 148
    auto* input = context.Input<Tensor>("Input");
    auto* hidden_prev = context.Input<Tensor>("HiddenPrev");
    auto* weight = context.Input<Tensor>("Weight");
    auto* gate = context.Input<Tensor>("Gate");
    auto* reset_hidden_prev = context.Input<Tensor>("ResetHiddenPrev");
    auto* hidden_grad = context.Input<Tensor>(framework::GradVarName("Hidden"));
    auto* input_grad = context.Output<Tensor>(framework::GradVarName("Input"));
G
guosheng 已提交
149
    auto* hidden_prev_grad =
150
        context.Output<Tensor>(framework::GradVarName("HiddenPrev"));
G
guosheng 已提交
151
    auto* weight_grad =
152 153
        context.Output<Tensor>(framework::GradVarName("Weight"));
    auto* bias_grad = context.Output<Tensor>(framework::GradVarName("Bias"));
G
guosheng 已提交
154 155 156 157 158
    Tensor gate_grad;
    Tensor reset_hidden_prev_grad;

    const T* hidden_prev_data = hidden_prev->data<T>();
    const T* weight_data = weight->data<T>();
159 160
    T* gate_grad_data =
        gate_grad.mutable_data<T>(input->dims(), context.GetPlace());
G
guosheng 已提交
161
    const T* reset_hidden_prev_data = reset_hidden_prev->data<T>();
162 163
    T* reset_hidden_prev_grad_data = reset_hidden_prev_grad.mutable_data<T>(
        reset_hidden_prev->dims(), context.GetPlace());
G
guosheng 已提交
164 165 166 167 168 169

    auto h_p = EigenMatrix<T>::From(*hidden_prev);
    auto g = EigenMatrix<T>::From(*gate);
    auto d_h = EigenMatrix<T>::From(*hidden_grad);
    auto d_g = EigenMatrix<T>::From(gate_grad);
    auto d_r_h_p = EigenMatrix<T>::From(reset_hidden_prev_grad);
Q
QI JUN 已提交
170 171
    auto& place =
        *context.template device_context<DeviceContext>().eigen_device();
G
guosheng 已提交
172

173 174 175
    int batch_size = input->dims()[0];
    int frame_size = hidden_prev->dims()[1];

G
guosheng 已提交
176 177 178 179 180 181 182 183 184
    Eigen::array<int, 2> extents({{batch_size, frame_size}});
    Eigen::array<int, 2> u_offsets({{0, 0}});
    auto u = g.slice(u_offsets, extents);  // update gate
    Eigen::array<int, 2> r_offsets({{0, frame_size}});
    auto r = g.slice(r_offsets, extents);  // reset gate
    Eigen::array<int, 2> c_offsets({{0, frame_size * 2}});
    auto c = g.slice(c_offsets, extents);  // output candidate

    // backward for unactivated update gate
185
    ActGradCompute(context.Attr<int>("gate_activation"), place, u, u,
G
guosheng 已提交
186
                   d_g.slice(u_offsets, extents), d_h * (c - h_p));
G
guosheng 已提交
187
    // backward for unactivated output candidate
188
    ActGradCompute(context.Attr<int>("activation"), place, c, c,
G
guosheng 已提交
189
                   d_g.slice(c_offsets, extents), d_h * u);
G
guosheng 已提交
190
    // backward for reset_hidden_prev
Q
QI JUN 已提交
191 192 193 194 195
    math::gemm<DeviceContext, T>(
        context.template device_context<DeviceContext>(), false, true,
        batch_size, frame_size, frame_size, 1, gate_grad_data + frame_size * 2,
        frame_size * 3, weight_data + frame_size * frame_size * 2, frame_size,
        0, reset_hidden_prev_grad_data, frame_size);
G
guosheng 已提交
196
    // backward for unactivated reset gate
197 198
    ActGradCompute(context.Attr<int>("gate_activation"), place, r, r,
                   d_g.slice(r_offsets, extents), d_r_h_p * h_p);
199 200 201 202
    // backward for weight
    if (weight_grad) {
      T* weight_grad_data = weight_grad->mutable_data<T>(context.GetPlace());
      // backward for state_weight
Q
QI JUN 已提交
203 204 205 206
      math::gemm<DeviceContext, T>(
          context.template device_context<DeviceContext>(), true, false,
          frame_size, frame_size, batch_size, 1, reset_hidden_prev_data,
          frame_size, gate_grad_data + frame_size * 2, frame_size * 3, 0,
207 208 209
          weight_grad_data + frame_size * frame_size * 2, frame_size);

      // backward for update_gate_weight and reset_gate_weight
Q
QI JUN 已提交
210 211 212 213 214
      math::gemm<DeviceContext, T>(
          context.template device_context<DeviceContext>(), true, false,
          frame_size, frame_size * 2, batch_size, 1, hidden_prev_data,
          frame_size, gate_grad_data, frame_size * 3, 0, weight_grad_data,
          frame_size * 2);
215
    }
G
guosheng 已提交
216
    // backward for hidden_prev
217 218 219 220 221
    if (hidden_prev_grad) {
      T* hidden_prev_grad_data =
          hidden_prev_grad->mutable_data<T>(context.GetPlace());
      auto d_h_p = EigenMatrix<T>::From(*hidden_prev_grad);
      d_h_p.device(place) = d_r_h_p * r + d_h * (u.constant(T(1)) - u);
Q
QI JUN 已提交
222 223 224 225 226
      math::gemm<DeviceContext, T>(
          context.template device_context<DeviceContext>(), false, true,
          batch_size, frame_size, frame_size * 2, 1, gate_grad_data,
          frame_size * 3, weight_data, frame_size * 2, 1, hidden_prev_grad_data,
          frame_size);
227
    }
G
guosheng 已提交
228
    // backward for input
229 230 231 232 233
    if (input_grad) {
      input_grad->mutable_data<T>(context.GetPlace());
      auto d_x = EigenMatrix<T>::From(*input_grad);
      d_x.device(place) = d_g;
    }
G
guosheng 已提交
234
    // backward for bias
G
guosheng 已提交
235 236
    if (bias_grad) {
      bias_grad->mutable_data<T>(context.GetPlace());
D
dzhwinter 已提交
237
      auto d_b = EigenVector<T>::Flatten(*bias_grad);
G
guosheng 已提交
238 239
      d_b.device(place) = d_g.sum(Eigen::array<int, 1>({{0}}));
    }
G
guosheng 已提交
240 241 242 243 244
  }
};

}  // namespace operators
}  // namespace paddle