lstm_compute.cc 3.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
D
dangqingqing 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15 16 17
#include "paddle/fluid/operators/math/lstm_compute.h"
#include "paddle/fluid/operators/math/detail/lstm_cpu_kernel.h"
#include "paddle/fluid/operators/math/detail/lstm_kernel.h"
D
dangqingqing 已提交
18 19 20 21 22 23

namespace paddle {
namespace operators {
namespace math {

template <class T>
Q
QI JUN 已提交
24 25
struct LstmUnitFunctor<platform::CPUDeviceContext, T> {
  static void compute(const platform::CPUDeviceContext& context,
26
                      LstmMetaValue<T> value, int frame_size, int batch_size,
27
                      T cell_clip, const detail::ActivationType& gate_act,
28 29
                      const detail::ActivationType& cell_act,
                      const detail::ActivationType& cand_act) {
D
dangqingqing 已提交
30
    for (int b = 0; b < batch_size; b++) {
31
      detail::cpu_lstm_forward(detail::forward::lstm<T>(), value, frame_size,
32
                               cell_clip, cand_act, gate_act, cell_act);
33 34 35 36 37 38
      value.gate_value += frame_size * 4;
      value.state_value += frame_size;
      value.state_active_value += frame_size;
      value.output_value += frame_size;
      if (value.prev_state_value) {
        value.prev_state_value += frame_size;
D
dangqingqing 已提交
39 40 41 42 43 44
      }
    }
  }
};

template <class T>
Q
QI JUN 已提交
45 46
struct LstmUnitGradFunctor<platform::CPUDeviceContext, T> {
  static void compute(const platform::CPUDeviceContext& context,
47
                      LstmMetaValue<T> value, LstmMetaGrad<T> grad,
48
                      int frame_size, int batch_size, T cell_clip,
49 50 51
                      const detail::ActivationType& gate_act,
                      const detail::ActivationType& cell_act,
                      const detail::ActivationType& cand_act) {
52
    for (int b = 0; b < batch_size; b++) {
D
dangqingqing 已提交
53
      detail::cpu_lstm_backward(detail::backward::lstm<T>(), value, grad,
54 55
                                frame_size, cell_clip, cand_act, gate_act,
                                cell_act);
D
dangqingqing 已提交
56

57 58 59 60 61 62
      value.gate_value += frame_size * 4;
      value.state_value += frame_size;
      value.state_active_value += frame_size;
      value.output_value += frame_size;
      if (value.prev_state_value) {
        value.prev_state_value += frame_size;
D
dangqingqing 已提交
63 64
      }

65 66 67 68 69 70
      grad.gate_grad += frame_size * 4;
      grad.state_grad += frame_size;
      grad.state_active_grad += frame_size;
      grad.output_grad += frame_size;
      if (grad.prev_state_grad) {
        grad.prev_state_grad += frame_size;
D
dangqingqing 已提交
71 72
      }
    }
73 74 75
  }
};

Q
QI JUN 已提交
76 77 78 79
template class LstmUnitFunctor<platform::CPUDeviceContext, float>;
template class LstmUnitFunctor<platform::CPUDeviceContext, double>;
template class LstmUnitGradFunctor<platform::CPUDeviceContext, float>;
template class LstmUnitGradFunctor<platform::CPUDeviceContext, double>;
D
dangqingqing 已提交
80 81 82 83

}  // namespace math
}  // namespace operators
}  // namespace paddle