gru_cpu_kernel.h 35.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
guosheng 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
#include <type_traits>
17 18
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/operators/activation_op.h"
F
Feiyu Chan 已提交
19 20
#include "paddle/phi/kernels/funcs/detail/activation_functions.h"
#include "paddle/phi/kernels/funcs/gru_compute.h"
G
guosheng 已提交
21

F
Feiyu Chan 已提交
22 23
namespace phi {
namespace funcs {
G
guosheng 已提交
24
namespace detail {
25
using Array1 = Eigen::DSizes<int64_t, 1>;
F
Feiyu Chan 已提交
26 27
template <typename T,
          int MajorType = Eigen::RowMajor,
28
          typename IndexType = Eigen::DenseIndex>
F
Feiyu Chan 已提交
29
using EigenVector = paddle::framework::EigenVector<T, MajorType, IndexType>;
G
guosheng 已提交
30

31
#if !defined(__NVCC__) && !defined(__HIPCC___)  // @{ Group for GRU CPU
G
guosheng 已提交
32
template <class OpResetOutput, typename T>
F
Feiyu Chan 已提交
33 34 35 36 37 38 39 40
void hl_naive_gru_forward_reset_output(OpResetOutput op_reset_output,
                                       T *gate_value,
                                       T *reset_output_value,
                                       const T *prev_output_value,
                                       int frame_size,
                                       ActivationType active_gate,
                                       bool old_version = true,
                                       const T *reset_bias = nullptr) {
G
guosheng 已提交
41 42 43 44
  T r_value_update_gate;
  T r_value_reset_gate;
  T r_value_reset_output;
  T r_prev_out = 0;
45 46 47 48 49 50 51 52 53 54
  T r_reset_bias = 0;
  T *update_gate = nullptr;
  T *reset_gate = nullptr;
  if (old_version) {
    update_gate = gate_value;
    reset_gate = gate_value + frame_size;
  } else {
    reset_gate = gate_value;
    update_gate = gate_value + frame_size;
  }
G
guosheng 已提交
55 56 57
  for (int i = 0; i < frame_size; i++) {
    r_value_update_gate = update_gate[i];
    r_value_reset_gate = reset_gate[i];
58 59 60 61
    if (!old_version) {
      r_value_reset_output = reset_output_value[i];
      r_reset_bias = reset_bias[i];
    }
G
guosheng 已提交
62 63
    if (prev_output_value) {
      r_prev_out = prev_output_value[i];
G
guosheng 已提交
64 65
    }

F
Feiyu Chan 已提交
66 67 68 69 70 71
    op_reset_output(&r_value_update_gate,
                    &r_value_reset_gate,
                    &r_prev_out,
                    &r_value_reset_output,
                    active_gate,
                    &r_reset_bias,
72
                    old_version);
G
guosheng 已提交
73

G
guosheng 已提交
74 75 76
    update_gate[i] = r_value_update_gate;
    reset_gate[i] = r_value_reset_gate;
    reset_output_value[i] = r_value_reset_output;
G
guosheng 已提交
77 78 79 80
  }
}

template <class OpFinalOutput, typename T>
F
Feiyu Chan 已提交
81 82 83 84 85 86 87 88
void hl_naive_gru_forward_final_output(OpFinalOutput op_final_output,
                                       T *gate_value,
                                       const T *prev_output_value,
                                       T *output_value,
                                       int frame_size,
                                       ActivationType active_node,
                                       bool origin_mode,
                                       bool old_version = true) {
G
guosheng 已提交
89 90 91 92
  T r_value_update_gate;
  T r_value_frame_state;
  T r_prev_out = 0;
  T r_output;
93 94 95 96 97 98
  T *update_gate;
  if (old_version) {
    update_gate = gate_value;
  } else {
    update_gate = gate_value + frame_size;
  }
G
guosheng 已提交
99 100 101 102 103 104 105
  T *frame_state = gate_value + frame_size * 2;

  for (int i = 0; i < frame_size; i++) {
    r_value_update_gate = update_gate[i];
    r_value_frame_state = frame_state[i];
    if (prev_output_value) {
      r_prev_out = prev_output_value[i];
G
guosheng 已提交
106 107
    }

F
Feiyu Chan 已提交
108 109 110 111 112 113
    op_final_output(&r_value_update_gate,
                    &r_value_frame_state,
                    &r_prev_out,
                    &r_output,
                    active_node,
                    origin_mode);
G
guosheng 已提交
114

G
guosheng 已提交
115 116
    frame_state[i] = r_value_frame_state;
    output_value[i] = r_output;
G
guosheng 已提交
117 118 119 120
  }
}

template <class OpResetOutput, typename T>
G
guosheng 已提交
121
void hl_avx_gru_forward_reset_output(OpResetOutput op_reset_output,
F
Feiyu Chan 已提交
122 123 124 125
                                     T *gate_value,
                                     T *reset_output_value,
                                     const T *prev_output_value,
                                     int frame_size,
126 127 128
                                     ActivationType active_gate,
                                     bool old_version = true,
                                     const T *reset_bias = nullptr) {
G
guosheng 已提交
129
#ifdef __AVX__
130 131
  __m256 r_value_update_gate, r_value_update_gate_last = _mm256_set1_ps(0.0f);
  __m256 r_value_reset_gate, r_value_reset_gate_last = _mm256_set1_ps(0.0f);
G
guosheng 已提交
132
  __m256 r_value_reset_output;
133 134
  __m256 r_prev_out = _mm256_set1_ps(0.0f),
         r_prev_out_last = _mm256_set1_ps(0.0f);
135 136 137 138 139 140 141 142 143 144
  __m256 r_reset_bias = _mm256_set1_ps(0.0f);
  T *update_gate;
  T *reset_gate;
  if (old_version) {
    update_gate = gate_value;
    reset_gate = gate_value + frame_size;
  } else {
    reset_gate = gate_value;
    update_gate = gate_value + frame_size;
  }
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
  int block = 8;
  const int n = frame_size;
  const int rest = n % block;
  const int end = n - rest;
  int i = 0;

  if (rest > 0) {
    i = n - block;
    r_value_update_gate_last =
        _mm256_loadu_ps((const float *)(update_gate + i));
    r_value_reset_gate_last = _mm256_loadu_ps((const float *)(reset_gate + i));
    if (prev_output_value) {
      r_prev_out_last = _mm256_loadu_ps((const float *)(prev_output_value + i));
    }
  }
G
guosheng 已提交
160

161 162 163
  for (i = 0; i < end; i += block) {
    r_value_update_gate = _mm256_loadu_ps((const float *)(update_gate + i));
    r_value_reset_gate = _mm256_loadu_ps((const float *)(reset_gate + i));
G
guosheng 已提交
164
    if (prev_output_value) {
165
      r_prev_out = _mm256_loadu_ps((const float *)(prev_output_value + i));
G
guosheng 已提交
166
    }
167 168 169 170 171
    if (!old_version) {
      r_reset_bias = _mm256_loadu_ps((const float *)(reset_bias + i));
      r_value_reset_output =
          _mm256_loadu_ps((const float *)(reset_output_value + i));
    }
G
guosheng 已提交
172

F
Feiyu Chan 已提交
173 174 175 176 177 178
    op_reset_output(&r_value_update_gate,
                    &r_value_reset_gate,
                    &r_prev_out,
                    &r_value_reset_output,
                    active_gate,
                    &r_reset_bias,
179
                    old_version);
G
guosheng 已提交
180

181 182 183 184 185 186 187 188 189 190 191
    _mm256_storeu_ps(reinterpret_cast<float *>(update_gate + i),
                     r_value_update_gate);
    _mm256_storeu_ps(reinterpret_cast<float *>(reset_gate + i),
                     r_value_reset_gate);
    _mm256_storeu_ps(reinterpret_cast<float *>(reset_output_value + i),
                     r_value_reset_output);
  }

  if (rest > 0) {
    i = n - block;

F
Feiyu Chan 已提交
192 193 194 195 196 197 198
    op_reset_output(&r_value_update_gate_last,
                    &r_value_reset_gate_last,
                    &r_prev_out_last,
                    &r_value_reset_output,
                    active_gate,
                    &r_reset_bias,
                    old_version);
199 200 201 202 203 204 205

    _mm256_storeu_ps(reinterpret_cast<float *>(update_gate + i),
                     r_value_update_gate_last);
    _mm256_storeu_ps(reinterpret_cast<float *>(reset_gate + i),
                     r_value_reset_gate_last);
    _mm256_storeu_ps(reinterpret_cast<float *>(reset_output_value + i),
                     r_value_reset_output);
G
guosheng 已提交
206 207 208 209 210
  }
#endif
}

template <class OpFinalOutput, typename T>
G
guosheng 已提交
211
void hl_avx_gru_forward_final_output(OpFinalOutput op_final_output,
F
Feiyu Chan 已提交
212 213 214 215
                                     T *gate_value,
                                     const T *prev_output_value,
                                     T *output_value,
                                     int frame_size,
Q
Qiao Longfei 已提交
216
                                     ActivationType active_node,
217 218
                                     bool origin_mode,
                                     bool old_version = true) {
G
guosheng 已提交
219
#ifdef __AVX__
220 221 222 223
  __m256 r_value_update_gate, r_value_update_gate_last = _mm256_set1_ps(0.0f);
  __m256 r_value_frame_state, r_value_frame_state_last = _mm256_set1_ps(0.0f);
  __m256 r_prev_out = _mm256_set1_ps(0.0f),
         r_prev_out_last = _mm256_set1_ps(0.0f);
G
guosheng 已提交
224
  __m256 r_output;
225 226 227 228 229 230 231
  T *update_gate;
  if (old_version) {
    update_gate = gate_value;
  } else {
    update_gate = gate_value + frame_size;
  }

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
  T *frame_state = gate_value + frame_size * 2;
  int block = 8;
  const int n = frame_size;
  const int rest = n % block;
  const int end = n - rest;
  int i = 0;

  if (rest > 0) {
    i = n - block;
    r_value_update_gate_last =
        _mm256_loadu_ps((const float *)(update_gate + i));
    r_value_frame_state_last =
        _mm256_loadu_ps((const float *)(frame_state + i));
    if (prev_output_value) {
      r_prev_out_last = _mm256_loadu_ps((const float *)(prev_output_value + i));
    }
  }
G
guosheng 已提交
249

250 251 252
  for (i = 0; i < end; i += block) {
    r_value_update_gate = _mm256_loadu_ps((const float *)(update_gate + i));
    r_value_frame_state = _mm256_loadu_ps((const float *)(frame_state + i));
G
guosheng 已提交
253
    if (prev_output_value) {
254
      r_prev_out = _mm256_loadu_ps((const float *)(prev_output_value + i));
G
guosheng 已提交
255 256
    }

F
Feiyu Chan 已提交
257 258 259 260 261 262
    op_final_output(&r_value_update_gate,
                    &r_value_frame_state,
                    &r_prev_out,
                    &r_output,
                    active_node,
                    origin_mode);
G
guosheng 已提交
263

264 265 266 267 268 269 270
    _mm256_storeu_ps(reinterpret_cast<float *>(frame_state + i),
                     r_value_frame_state);
    _mm256_storeu_ps(reinterpret_cast<float *>(output_value + i), r_output);
  }

  if (rest > 0) {
    i = n - block;
F
Feiyu Chan 已提交
271 272 273 274 275 276
    op_final_output(&r_value_update_gate_last,
                    &r_value_frame_state_last,
                    &r_prev_out_last,
                    &r_output,
                    active_node,
                    origin_mode);
277 278 279 280

    _mm256_storeu_ps(reinterpret_cast<float *>(frame_state + i),
                     r_value_frame_state_last);
    _mm256_storeu_ps(reinterpret_cast<float *>(output_value + i), r_output);
G
guosheng 已提交
281
  }
282

G
guosheng 已提交
283 284 285
#endif
}

286
template <typename T>
F
Feiyu Chan 已提交
287 288 289 290
inline void forward_reset_outputV2(
    const paddle::platform::CPUDeviceContext &context,
    phi::funcs::GRUMetaValue<T> value,
    int frame_size) {
291 292 293 294 295 296 297 298 299
  auto &place = *context.eigen_device();
  auto value_reset_gate =
      typename EigenVector<T>::Type(value.gate_value, Array1(frame_size));
  auto value_update_gate = typename EigenVector<T>::Type(
      value.gate_value + frame_size, Array1(frame_size));
  auto value_reset_output = typename EigenVector<T>::Type(
      value.reset_output_value, Array1(frame_size));
  auto value_reset_bias =
      typename EigenVector<T>::ConstType(value.reset_bias, Array1(frame_size));
F
Feiyu Chan 已提交
300 301 302 303
  paddle::operators::SigmoidFunctor<T>()(
      place, value_reset_gate, value_reset_gate);
  paddle::operators::SigmoidFunctor<T>()(
      place, value_update_gate, value_update_gate);
304 305 306 307
  value_reset_output.device(place) =
      (value_reset_output + value_reset_bias) * value_reset_gate;
}

G
guosheng 已提交
308
template <class OpResetOutput, typename T>
309
inline void forward_reset_output(
F
Feiyu Chan 已提交
310 311 312 313 314 315 316
    OpResetOutput op_reset_output,
    phi::funcs::GRUMetaValue<T> value,
    int frame_size,
    int batch_size,
    ActivationType active_gate,
    bool old_version = true,
    const paddle::platform::CPUDeviceContext *context = nullptr) {
G
guosheng 已提交
317
  for (int b = 0; b < batch_size; b++) {
318 319 320
    if (!old_version) {
      // use eigen
      forward_reset_outputV2(*context, value, frame_size);
G
guosheng 已提交
321
    } else {
322
      if (OpResetOutput::avx && (frame_size > static_cast<int>(8 - 1)) &&
323
          (sizeof(T) == 4)) {
F
Feiyu Chan 已提交
324 325 326 327 328 329 330 331
        hl_avx_gru_forward_reset_output(op_reset_output,
                                        value.gate_value,
                                        value.reset_output_value,
                                        value.prev_out_value,
                                        frame_size,
                                        active_gate,
                                        old_version,
                                        value.reset_bias);
332
      } else {
F
Feiyu Chan 已提交
333 334 335 336 337 338 339 340
        hl_naive_gru_forward_reset_output(op_reset_output,
                                          value.gate_value,
                                          value.reset_output_value,
                                          value.prev_out_value,
                                          frame_size,
                                          active_gate,
                                          old_version,
                                          value.reset_bias);
341
      }
G
guosheng 已提交
342
    }
G
guosheng 已提交
343 344 345 346
    value.gate_value += frame_size * 3;
    value.reset_output_value += frame_size;
    if (value.prev_out_value) {
      value.prev_out_value += frame_size;
G
guosheng 已提交
347 348 349 350
    }
  }
}

351
template <typename T>
F
Feiyu Chan 已提交
352 353 354 355
inline void forward_final_outputV2(
    const paddle::platform::CPUDeviceContext &context,
    phi::funcs::GRUMetaValue<T> value,
    int frame_size) {
356 357 358 359 360 361 362
  auto &place = *context.eigen_device();
  auto value_update_gate = typename EigenVector<T>::Type(
      value.gate_value + frame_size, Array1(frame_size));
  auto value_frame_state = typename EigenVector<T>::Type(
      value.gate_value + 2 * frame_size, Array1(frame_size));
  auto value_output =
      typename EigenVector<T>::Type(value.output_value, Array1(frame_size));
F
Feiyu Chan 已提交
363 364
  paddle::operators::TanhFunctor<T>()(
      place, value_frame_state, value_frame_state);
365 366 367 368 369 370 371 372 373 374
  value_output.device(place) =
      (static_cast<T>(1.0) - value_update_gate) * value_frame_state;
  if (value.prev_out_value) {
    auto value_prev_out = typename EigenVector<T>::ConstType(
        value.prev_out_value, Array1(frame_size));
    value_output.device(place) =
        value_output + value_update_gate * value_prev_out;
  }
}

G
guosheng 已提交
375
template <class OpFinalOutput, typename T>
376
inline void forward_final_output(
F
Feiyu Chan 已提交
377 378 379 380 381 382
    OpFinalOutput op_final_output,
    phi::funcs::GRUMetaValue<T> value,
    int frame_size,
    int batch_size,
    ActivationType active_node,
    bool origin_mode,
383
    bool old_version = true,
F
Feiyu Chan 已提交
384
    const paddle::platform::CPUDeviceContext *context = nullptr) {
G
guosheng 已提交
385
  for (int b = 0; b < batch_size; b++) {
386 387 388
    if (!old_version) {
      // eigen
      forward_final_outputV2(*context, value, frame_size);
G
guosheng 已提交
389
    } else {
390
      if (OpFinalOutput::avx && (frame_size > static_cast<int>(8 - 1)) &&
391
          (sizeof(T) == 4)) {
F
Feiyu Chan 已提交
392 393
        hl_avx_gru_forward_final_output(op_final_output,
                                        value.gate_value,
394
                                        value.prev_out_value,
F
Feiyu Chan 已提交
395 396 397 398 399
                                        value.output_value,
                                        frame_size,
                                        active_node,
                                        origin_mode,
                                        old_version);
400
      } else {
F
Feiyu Chan 已提交
401 402 403 404 405 406 407 408
        hl_naive_gru_forward_final_output(op_final_output,
                                          value.gate_value,
                                          value.prev_out_value,
                                          value.output_value,
                                          frame_size,
                                          active_node,
                                          origin_mode,
                                          old_version);
409
      }
G
guosheng 已提交
410
    }
G
guosheng 已提交
411 412 413 414
    value.gate_value += frame_size * 3;
    value.output_value += frame_size;
    if (value.prev_out_value) {
      value.prev_out_value += frame_size;
G
guosheng 已提交
415 416 417 418 419
    }
  }
}

template <class OpStateGrad, typename T>
F
Feiyu Chan 已提交
420 421 422 423 424 425
void hl_naive_gru_backward_state_grad(OpStateGrad op_state_grad,
                                      T *gate_value,
                                      T *gate_grad,
                                      const T *prev_out_value,
                                      T *prev_out_grad,
                                      T *output_grad,
G
guosheng 已提交
426
                                      int frame_size,
Q
Qiao Longfei 已提交
427 428
                                      ActivationType active_node,
                                      bool origin_mode) {
G
guosheng 已提交
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
  T r_update_gate_value;
  T r_update_gate_grad;
  T r_frame_state_value;
  T r_frame_state_grad;
  T r_out_grad;
  T r_prev_out_value = 0;
  T r_prev_out_grad = 0;
  T *update_gate_value = gate_value;
  T *update_gate_grad = gate_grad;
  T *frame_state_value = gate_value + frame_size * 2;
  T *frame_state_grad = gate_grad + frame_size * 2;

  for (int i = 0; i < frame_size; i++) {
    r_update_gate_value = update_gate_value[i];
    r_frame_state_value = frame_state_value[i];
    r_out_grad = output_grad[i];
    if (prev_out_value) {
      r_prev_out_value = prev_out_value[i];
G
guosheng 已提交
447
    }
G
guosheng 已提交
448 449
    if (prev_out_grad) {
      r_prev_out_grad = prev_out_grad[i];
G
guosheng 已提交
450 451
    }

F
Feiyu Chan 已提交
452 453 454 455 456 457 458 459 460
    op_state_grad(&r_update_gate_value,
                  &r_update_gate_grad,
                  &r_frame_state_value,
                  &r_frame_state_grad,
                  &r_prev_out_value,
                  &r_prev_out_grad,
                  &r_out_grad,
                  active_node,
                  origin_mode);
G
guosheng 已提交
461

G
guosheng 已提交
462 463 464 465
    update_gate_grad[i] = r_update_gate_grad;
    frame_state_grad[i] = r_frame_state_grad;
    if (prev_out_grad) {
      prev_out_grad[i] = r_prev_out_grad;
G
guosheng 已提交
466 467 468 469 470
    }
  }
}

template <class OpResetGrad, typename T>
F
Feiyu Chan 已提交
471 472 473 474 475 476
void hl_naive_gru_backward_reset_grad(OpResetGrad op_reset_grad,
                                      T *gate_value,
                                      T *gate_grad,
                                      const T *prev_out_value,
                                      T *prev_out_grad,
                                      T *reset_output_grad,
G
guosheng 已提交
477
                                      int frame_size,
Q
Qiao Longfei 已提交
478
                                      ActivationType active_gate) {
G
guosheng 已提交
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
  T r_update_gate_value;
  T r_update_gate_grad;
  T r_reset_gate_value;
  T r_reset_gate_grad;
  T r_reset_output_grad = 0;
  T r_prev_out_value = 0;
  T r_prev_out_grad = 0;
  T *update_gate_value = gate_value;
  T *update_gate_grad = gate_grad;
  T *reset_gate_value = gate_value + frame_size;
  T *reset_gate_grad = gate_grad + frame_size;

  for (int i = 0; i < frame_size; i++) {
    r_update_gate_value = update_gate_value[i];
    r_update_gate_grad = update_gate_grad[i];
    r_reset_gate_value = reset_gate_value[i];

    if (prev_out_value && prev_out_grad) {
      r_reset_output_grad = reset_output_grad[i];
G
guosheng 已提交
498
    }
G
guosheng 已提交
499 500
    if (prev_out_value) {
      r_prev_out_value = prev_out_value[i];
G
guosheng 已提交
501
    }
G
guosheng 已提交
502 503
    if (prev_out_grad) {
      r_prev_out_grad = prev_out_grad[i];
G
guosheng 已提交
504 505
    }

F
Feiyu Chan 已提交
506 507 508 509 510 511 512 513
    op_reset_grad(&r_update_gate_value,
                  &r_update_gate_grad,
                  &r_reset_gate_value,
                  &r_reset_gate_grad,
                  &r_prev_out_value,
                  &r_prev_out_grad,
                  &r_reset_output_grad,
                  active_gate);
G
guosheng 已提交
514

G
guosheng 已提交
515 516 517 518
    update_gate_grad[i] = r_update_gate_grad;
    reset_gate_grad[i] = r_reset_gate_grad;
    if (prev_out_grad) {
      prev_out_grad[i] = r_prev_out_grad;
G
guosheng 已提交
519 520 521 522 523
    }
  }
}

template <class OpStateGrad, typename T>
F
Feiyu Chan 已提交
524 525 526 527 528 529 530 531
void hl_avx_gru_backward_state_grad(OpStateGrad op_state_grad,
                                    T *gate_value,
                                    T *gate_grad,
                                    const T *prev_out_value,
                                    T *prev_out_grad,
                                    T *output_grad,
                                    int frame_size,
                                    ActivationType active_node,
Q
Qiao Longfei 已提交
532
                                    bool origin_mode) {
G
guosheng 已提交
533
#ifdef __AVX__
G
guosheng 已提交
534 535 536 537 538 539 540
  __m256 r_update_gate_value;
  __m256 r_update_gate_grad;
  __m256 r_frame_state_value;
  __m256 r_frame_state_grad;
  __m256 r_out_grad;
  __m256 r_prev_out_value = _mm256_set1_ps(0.0f);
  __m256 r_prev_out_grad = _mm256_set1_ps(0.0f);
541 542 543 544 545 546
  __m256 *update_gate_value = reinterpret_cast<__m256 *>(gate_value);
  __m256 *update_gate_grad = reinterpret_cast<__m256 *>(gate_grad);
  __m256 *frame_state_value =
      reinterpret_cast<__m256 *>(gate_value + frame_size * 2);
  __m256 *frame_state_grad =
      reinterpret_cast<__m256 *>(gate_grad + frame_size * 2);
G
guosheng 已提交
547 548 549 550

  for (int i = 0; i < frame_size / 8; i++) {
    r_update_gate_value = update_gate_value[i];
    r_frame_state_value = frame_state_value[i];
551
    r_out_grad = (reinterpret_cast<__m256 *>(output_grad))[i];
G
guosheng 已提交
552
    if (prev_out_value) {
553
      r_prev_out_value = (reinterpret_cast<const __m256 *>(prev_out_value))[i];
G
guosheng 已提交
554
    }
G
guosheng 已提交
555
    if (prev_out_grad) {
556
      r_prev_out_grad = (reinterpret_cast<__m256 *>(prev_out_grad))[i];
G
guosheng 已提交
557 558
    }

F
Feiyu Chan 已提交
559 560 561 562 563 564 565 566 567
    op_state_grad(&r_update_gate_value,
                  &r_update_gate_grad,
                  &r_frame_state_value,
                  &r_frame_state_grad,
                  &r_prev_out_value,
                  &r_prev_out_grad,
                  &r_out_grad,
                  active_node,
                  origin_mode);
G
guosheng 已提交
568

G
guosheng 已提交
569 570 571
    update_gate_grad[i] = r_update_gate_grad;
    frame_state_grad[i] = r_frame_state_grad;
    if (prev_out_grad) {
572
      (reinterpret_cast<__m256 *>(prev_out_grad))[i] = r_prev_out_grad;
G
guosheng 已提交
573 574 575 576 577 578
    }
  }
#endif
}

template <class OpResetGrad, typename T>
F
Feiyu Chan 已提交
579 580 581 582 583 584
void hl_avx_gru_backward_reset_grad(OpResetGrad op_reset_grad,
                                    T *gate_value,
                                    T *gate_grad,
                                    const T *prev_out_value,
                                    T *prev_out_grad,
                                    T *reset_output_grad,
Q
Qiao Longfei 已提交
585 586
                                    int frame_size,
                                    ActivationType active_gate) {
G
guosheng 已提交
587
#ifdef __AVX__
G
guosheng 已提交
588 589 590 591 592 593 594
  __m256 r_update_gate_value;
  __m256 r_update_gate_grad;
  __m256 r_reset_gate_value;
  __m256 r_reset_gate_grad;
  __m256 r_reset_output_grad = _mm256_set1_ps(0.0f);
  __m256 r_prev_out_value = _mm256_set1_ps(0.0f);
  __m256 r_prev_out_grad = _mm256_set1_ps(0.0f);
595 596 597 598 599
  __m256 *update_gate_value = reinterpret_cast<__m256 *>(gate_value);
  __m256 *update_gate_grad = reinterpret_cast<__m256 *>(gate_grad);
  __m256 *reset_gate_value =
      reinterpret_cast<__m256 *>(gate_value + frame_size);
  __m256 *reset_gate_grad = reinterpret_cast<__m256 *>(gate_grad + frame_size);
G
guosheng 已提交
600 601 602 603 604 605 606

  for (int i = 0; i < frame_size / 8; i++) {
    r_update_gate_value = update_gate_value[i];
    r_update_gate_grad = update_gate_grad[i];
    r_reset_gate_value = reset_gate_value[i];

    if (prev_out_value && prev_out_grad) {
607
      r_reset_output_grad = (reinterpret_cast<__m256 *>(reset_output_grad))[i];
G
guosheng 已提交
608
    }
G
guosheng 已提交
609
    if (prev_out_value) {
610
      r_prev_out_value = (reinterpret_cast<const __m256 *>(prev_out_value))[i];
G
guosheng 已提交
611
    }
G
guosheng 已提交
612
    if (prev_out_grad) {
613
      r_prev_out_grad = (reinterpret_cast<__m256 *>(prev_out_grad))[i];
G
guosheng 已提交
614 615
    }

F
Feiyu Chan 已提交
616 617 618 619 620 621 622 623
    op_reset_grad(&r_update_gate_value,
                  &r_update_gate_grad,
                  &r_reset_gate_value,
                  &r_reset_gate_grad,
                  &r_prev_out_value,
                  &r_prev_out_grad,
                  &r_reset_output_grad,
                  active_gate);
G
guosheng 已提交
624

G
guosheng 已提交
625 626 627
    update_gate_grad[i] = r_update_gate_grad;
    reset_gate_grad[i] = r_reset_gate_grad;
    if (prev_out_grad) {
628
      (reinterpret_cast<__m256 *>(prev_out_grad))[i] = r_prev_out_grad;
G
guosheng 已提交
629 630 631 632 633
    }
  }
#endif
}

634
template <class OpGruGrad, typename T>
F
Feiyu Chan 已提交
635 636 637 638 639 640 641 642 643 644
inline void hl_naive_gru_backward(OpGruGrad op_gru_grad,
                                  T *gate_value,
                                  T *gate_grad,
                                  const T *prev_out_value,
                                  T *prev_out_grad,
                                  T *reset_output_value,
                                  T *reset_output_grad,
                                  T *output_grad,
                                  int frame_size,
                                  ActivationType active_node,
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
                                  ActivationType active_gate) {
  T r_value_reset_gate;
  T r_grad_reset_gate;
  T r_value_update_gate;
  T r_grad_update_gate;
  T r_value_frame_state;
  T r_grad_frame_state;
  T r_value_prev_out = 0;
  T r_grad_prev_out = 0;
  T r_grad_output;
  T r_value_reset_output;
  T r_grad_reset_output = 0;
  T *reset_gate_value = gate_value;
  T *reset_gate_grad = gate_grad;
  T *update_gate_value = gate_value + frame_size;
  T *update_gate_grad = gate_grad + frame_size;
  T *frame_state_value = gate_value + 2 * frame_size;
  T *frame_state_grad = gate_grad + 2 * frame_size;

  for (int i = 0; i < frame_size; ++i) {
    r_value_reset_gate = reset_gate_value[i];
    r_grad_reset_gate = reset_gate_grad[i];
    r_value_update_gate = update_gate_value[i];
    r_grad_update_gate = update_gate_grad[i];
    r_value_frame_state = frame_state_value[i];
    r_grad_frame_state = frame_state_grad[i];
    if (prev_out_value) {
      r_value_prev_out = prev_out_value[i];
    }
    if (prev_out_grad) {
      r_grad_prev_out = prev_out_grad[i];
    }
    r_grad_output = output_grad[i];
    r_value_reset_output = reset_output_value[i];
    if (prev_out_value && prev_out_grad) {
      r_grad_reset_output = reset_output_grad[i];
    }

F
Feiyu Chan 已提交
683 684 685 686 687 688 689 690 691 692 693 694
    op_gru_grad(&r_value_reset_gate,
                &r_grad_reset_gate,
                &r_value_update_gate,
                &r_grad_update_gate,
                &r_value_frame_state,
                &r_grad_frame_state,
                &r_value_prev_out,
                &r_grad_prev_out,
                &r_grad_output,
                &r_value_reset_output,
                &r_grad_reset_output,
                active_node,
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
                active_gate);

    reset_gate_grad[i] = r_grad_reset_gate;
    update_gate_grad[i] = r_grad_update_gate;
    frame_state_grad[i] = r_grad_frame_state;
    if (prev_out_grad) {
      prev_out_grad[i] = r_grad_prev_out;
    }
    if (prev_out_value && prev_out_grad) {
      reset_output_grad[i] = r_grad_reset_output;
    }
  }
}

template <class OpGruGrad, typename T>
F
Feiyu Chan 已提交
710 711 712 713 714 715 716 717 718 719
inline void hl_avx_gru_backward(OpGruGrad op_gru_grad,
                                T *gate_value,
                                T *gate_grad,
                                const T *prev_out_value,
                                T *prev_out_grad,
                                T *reset_output_value,
                                T *reset_output_grad,
                                T *output_grad,
                                int frame_size,
                                ActivationType active_node,
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
                                ActivationType active_gate) {
#ifdef __AVX__
  __m256 r_value_reset_gate;
  __m256 r_grad_reset_gate;
  __m256 r_value_update_gate;
  __m256 r_grad_update_gate;
  __m256 r_value_frame_state;
  __m256 r_grad_frame_state;
  __m256 r_value_prev_out = _mm256_set1_ps(0.0f);
  __m256 r_grad_prev_out = _mm256_set1_ps(0.0f);
  __m256 r_grad_output;
  __m256 r_value_reset_output;
  __m256 r_grad_reset_output = _mm256_set1_ps(0.0f);
  __m256 *reset_gate_value = reinterpret_cast<__m256 *>(gate_value);
  __m256 *reset_gate_grad = reinterpret_cast<__m256 *>(gate_grad);
  __m256 *update_gate_value =
      reinterpret_cast<__m256 *>(gate_value + frame_size);
  __m256 *update_gate_grad = reinterpret_cast<__m256 *>(gate_grad + frame_size);
  __m256 *frame_state_value =
      reinterpret_cast<__m256 *>(gate_value + 2 * frame_size);
  __m256 *frame_state_grad =
      reinterpret_cast<__m256 *>(gate_grad + 2 * frame_size);

  for (int i = 0; i < frame_size / 8; ++i) {
    r_value_reset_gate = reset_gate_value[i];
    r_grad_reset_gate = reset_gate_grad[i];
    r_value_update_gate = update_gate_value[i];
    r_grad_update_gate = update_gate_grad[i];
    r_value_frame_state = frame_state_value[i];
    r_grad_frame_state = frame_state_grad[i];
    if (prev_out_value) {
      r_value_prev_out = (reinterpret_cast<const __m256 *>(prev_out_value))[i];
    }
    if (prev_out_grad) {
      r_grad_prev_out = (reinterpret_cast<__m256 *>(prev_out_grad))[i];
    }
    r_grad_output = (reinterpret_cast<__m256 *>(output_grad))[i];
    r_value_reset_output = (reinterpret_cast<__m256 *>(reset_output_value))[i];
    if (prev_out_value && prev_out_grad) {
      r_grad_reset_output = (reinterpret_cast<__m256 *>(reset_output_grad))[i];
    }

F
Feiyu Chan 已提交
762 763 764 765 766 767 768 769 770 771 772 773
    op_gru_grad(&r_value_reset_gate,
                &r_grad_reset_gate,
                &r_value_update_gate,
                &r_grad_update_gate,
                &r_value_frame_state,
                &r_grad_frame_state,
                &r_value_prev_out,
                &r_grad_prev_out,
                &r_grad_output,
                &r_value_reset_output,
                &r_grad_reset_output,
                active_node,
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
                active_gate);

    reset_gate_grad[i] = r_grad_reset_gate;
    update_gate_grad[i] = r_grad_update_gate;
    frame_state_grad[i] = r_grad_frame_state;
    if (prev_out_grad) {
      (reinterpret_cast<__m256 *>(prev_out_grad))[i] = r_grad_prev_out;
    }
    if (prev_out_value && prev_out_grad) {
      (reinterpret_cast<__m256 *>(reset_output_grad))[i] = r_grad_reset_output;
    }
  }
#endif
}

G
guosheng 已提交
789
template <class OpStateGrad, typename T>
G
guosheng 已提交
790
inline void backward_state_grad(OpStateGrad op_state_grad,
F
Feiyu Chan 已提交
791 792 793 794 795 796
                                phi::funcs::GRUMetaValue<T> value,
                                phi::funcs::GRUMetaGrad<T> grad,
                                int frame_size,
                                int batch_size,
                                ActivationType active_node,
                                bool origin_mode) {
G
guosheng 已提交
797 798
  for (int b = 0; b < batch_size; b++) {
    if (OpStateGrad::avx && !(frame_size & (8 - 1)) && (sizeof(T) == 4)) {
F
Feiyu Chan 已提交
799 800 801 802 803 804 805 806 807
      hl_avx_gru_backward_state_grad(op_state_grad,
                                     value.gate_value,
                                     grad.gate_grad,
                                     value.prev_out_value,
                                     grad.prev_out_grad,
                                     grad.output_grad,
                                     frame_size,
                                     active_node,
                                     origin_mode);
G
guosheng 已提交
808
    } else {
F
Feiyu Chan 已提交
809 810 811 812 813 814 815 816 817
      hl_naive_gru_backward_state_grad(op_state_grad,
                                       value.gate_value,
                                       grad.gate_grad,
                                       value.prev_out_value,
                                       grad.prev_out_grad,
                                       grad.output_grad,
                                       frame_size,
                                       active_node,
                                       origin_mode);
G
guosheng 已提交
818 819
    }

G
guosheng 已提交
820 821 822
    value.gate_value += frame_size * 3;
    if (value.prev_out_value) {
      value.prev_out_value += frame_size;
G
guosheng 已提交
823 824
    }

G
guosheng 已提交
825 826 827 828
    grad.gate_grad += frame_size * 3;
    grad.output_grad += frame_size;
    if (grad.prev_out_grad) {
      grad.prev_out_grad += frame_size;
G
guosheng 已提交
829 830 831 832 833
    }
  }
}

template <class OpResetGrad, typename T>
G
guosheng 已提交
834
inline void backward_reset_grad(OpResetGrad op_reset_grad,
F
Feiyu Chan 已提交
835 836 837 838
                                phi::funcs::GRUMetaValue<T> value,
                                phi::funcs::GRUMetaGrad<T> grad,
                                int frame_size,
                                int batch_size,
Q
Qiao Longfei 已提交
839
                                ActivationType active_gate) {
G
guosheng 已提交
840 841
  for (int b = 0; b < batch_size; b++) {
    if (OpResetGrad::avx && !(frame_size & (8 - 1)) && (sizeof(T) == 4)) {
F
Feiyu Chan 已提交
842 843 844 845 846 847 848 849
      hl_avx_gru_backward_reset_grad(op_reset_grad,
                                     value.gate_value,
                                     grad.gate_grad,
                                     value.prev_out_value,
                                     grad.prev_out_grad,
                                     grad.reset_output_grad,
                                     frame_size,
                                     active_gate);
G
guosheng 已提交
850
    } else {
F
Feiyu Chan 已提交
851 852 853 854 855 856 857 858
      hl_naive_gru_backward_reset_grad(op_reset_grad,
                                       value.gate_value,
                                       grad.gate_grad,
                                       value.prev_out_value,
                                       grad.prev_out_grad,
                                       grad.reset_output_grad,
                                       frame_size,
                                       active_gate);
G
guosheng 已提交
859 860
    }

G
guosheng 已提交
861 862 863
    value.gate_value += frame_size * 3;
    if (value.prev_out_value) {
      value.prev_out_value += frame_size;
G
guosheng 已提交
864 865
    }

G
guosheng 已提交
866 867 868 869
    grad.gate_grad += frame_size * 3;
    grad.reset_output_grad += frame_size;
    if (grad.prev_out_grad) {
      grad.prev_out_grad += frame_size;
G
guosheng 已提交
870 871 872 873
    }
  }
}

874
template <typename T>
F
Feiyu Chan 已提交
875 876 877
inline void gru_backward(const paddle::platform::CPUDeviceContext &context,
                         phi::funcs::GRUMetaValue<T> value,
                         phi::funcs::GRUMetaGrad<T> grad,
878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
                         int frame_size) {
  auto &place = *context.eigen_device();

  auto value_reset_gate =
      typename EigenVector<T>::Type(value.gate_value, Array1(frame_size));
  auto grad_reset_gate =
      typename EigenVector<T>::Type(grad.gate_grad, Array1(frame_size));
  auto value_update_gate = typename EigenVector<T>::Type(
      value.gate_value + frame_size, Array1(frame_size));
  auto grad_update_gate = typename EigenVector<T>::Type(
      grad.gate_grad + frame_size, Array1(frame_size));
  auto value_frame_state = typename EigenVector<T>::Type(
      value.gate_value + frame_size * 2, Array1(frame_size));
  auto grad_frame_state = typename EigenVector<T>::Type(
      grad.gate_grad + frame_size * 2, Array1(frame_size));

  auto grad_output =
      typename EigenVector<T>::Type(grad.output_grad, Array1(frame_size));
  auto value_reset_output = typename EigenVector<T>::Type(
      value.reset_output_value, Array1(frame_size));
  auto grad_reset_output =
      typename EigenVector<T>::Type(grad.reset_output_grad, Array1(frame_size));

  if (value.prev_out_value) {
    auto value_prev_out = typename EigenVector<T>::ConstType(
        value.prev_out_value, Array1(frame_size));
F
Feiyu Chan 已提交
904 905 906 907 908 909
    paddle::operators::SigmoidGradFunctor<T>()(
        place,
        1 /*useless*/,
        value_update_gate,
        (value_prev_out - value_frame_state) * grad_output,
        grad_update_gate);
910
  } else {
F
Feiyu Chan 已提交
911 912 913 914 915 916
    paddle::operators::SigmoidGradFunctor<T>()(
        place,
        1 /*useless*/,
        value_update_gate,
        static_cast<T>(-1) * value_frame_state * grad_output,
        grad_update_gate);
917 918 919 920 921 922 923
  }
  if (grad.prev_out_grad) {
    auto grad_prev_out =
        typename EigenVector<T>::Type(grad.prev_out_grad, Array1(frame_size));
    grad_prev_out.device(place) =
        grad_prev_out + grad_output * value_update_gate;
  }
F
Feiyu Chan 已提交
924 925 926 927 928 929 930 931 932 933
  paddle::operators::TanhGradFunctor<T>()(
      place,
      1 /*useless*/,
      value_frame_state,
      grad_output * (static_cast<T>(1.0) - value_update_gate),
      grad_frame_state);
  paddle::operators::SigmoidGradFunctor<T>()(
      place,
      1 /*useless*/,
      value_reset_gate,
934 935 936 937 938 939 940
      value_reset_output / value_reset_gate * grad_frame_state,
      grad_reset_gate);
  if (value.prev_out_value && grad.prev_out_grad) {
    grad_reset_output.device(place) = value_reset_gate * grad_frame_state;
  }
}

941
template <class OpGruGrad, typename T>
F
Feiyu Chan 已提交
942 943 944 945 946 947 948
inline void cpu_gru_backward(const paddle::platform::CPUDeviceContext &context,
                             OpGruGrad op_gru_grad,
                             phi::funcs::GRUMetaValue<T> value,
                             phi::funcs::GRUMetaGrad<T> grad,
                             int frame_size,
                             int batch_size,
                             ActivationType active_node,
949 950
                             ActivationType active_gate) {
  for (int b = 0; b < batch_size; ++b) {
951 952
    // eigen
    gru_backward(context, value, grad, frame_size);
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968

    value.gate_value += frame_size * 3;
    value.reset_output_value += frame_size;
    if (value.prev_out_value) {
      value.prev_out_value += frame_size;
    }

    grad.gate_grad += frame_size * 3;
    grad.output_grad += frame_size;
    grad.reset_output_grad += frame_size;
    if (grad.prev_out_grad) {
      grad.prev_out_grad += frame_size;
    }
  }
}

969
#endif  // @} End Group for GRU CPU
G
guosheng 已提交
970 971

}  // namespace detail
F
Feiyu Chan 已提交
972 973
}  // namespace funcs
}  // namespace phi