/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/jit_kernel.h" #include #include "paddle/fluid/operators/math/jit_kernel_macro.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/macros.h" #ifdef __AVX__ #include #endif namespace paddle { namespace operators { namespace math { #ifdef __AVX__ namespace detail { __m256 Exp(__m256 a); } // namespace detail #endif namespace jitkernel { namespace jit = platform::jit; #ifdef __AVX__ typedef enum { kSigmoid, kRelu, kTanh, kIdentity } act_type; class AVXAct { public: virtual ~AVXAct() = default; virtual __m256 Compute(__m256 x) const = 0; }; template class AVXActImpl : public AVXAct { public: __m256 Compute(__m256 x) const override { PADDLE_THROW("Unkown type!"); } }; template <> __m256 AVXActImpl::Compute(__m256 x) const { __m256 ones = _mm256_set1_ps(1.0f); x = _mm256_max_ps(x, _mm256_set1_ps(SIGMOID_THRESHOLD_MIN)); x = _mm256_min_ps(x, _mm256_set1_ps(SIGMOID_THRESHOLD_MAX)); x = _mm256_sub_ps(_mm256_set1_ps(0.0f), x); x = detail::Exp(x); x = _mm256_add_ps(ones, x); return _mm256_div_ps(ones, x); } template <> __m256 AVXActImpl::Compute(__m256 x) const { __m256 ones = _mm256_set1_ps(1.0f); x = _mm256_mul_ps(_mm256_set1_ps(-2.0f), x); x = _mm256_min_ps(x, _mm256_set1_ps(EXP_MAX_INPUT)); x = detail::Exp(x); x = _mm256_add_ps(ones, x); x = _mm256_div_ps(_mm256_set1_ps(2.0f), x); return _mm256_sub_ps(x, ones); } template <> __m256 AVXActImpl::Compute(__m256 x) const { return _mm256_max_ps(x, _mm256_setzero_ps()); } template <> __m256 AVXActImpl::Compute(__m256 x) const { return x; } #endif /* LSTM JitKernel */ template class LSTMKernelImpl : public LSTMKernel { public: explicit LSTMKernelImpl(int d, const std::string& act_gate, const std::string& act_cand, const std::string& act_cell) : LSTMKernel() { d_ = d; d2_ = d * 2; d3_ = d * 3; auto GetActKernel = [&](const std::string& type, int n) -> std::shared_ptr> { if (type == "sigmoid") { return std::dynamic_pointer_cast>( KernelPool::Instance().template Get>(n)); } else if (type == "relu") { return std::dynamic_pointer_cast>( KernelPool::Instance().template Get>(n)); } else if (type == "tanh") { return std::dynamic_pointer_cast>( KernelPool::Instance().template Get>(n)); } else if (type == "identity" || type == "") { return std::dynamic_pointer_cast>( KernelPool::Instance().template Get>(n)); } PADDLE_THROW("Not support type: %s", type); }; act_gate_3d_ = GetActKernel(act_gate, d * 3); act_cand_d_ = GetActKernel(act_cand, d); act_cell_d_ = GetActKernel(act_cell, d); vmul_d_ = KernelPool::Instance().template Get>(d); vadd_d_ = KernelPool::Instance().template Get>(d); #ifdef __AVX__ auto GetAVXAct = [&](const std::string& type) -> std::unique_ptr { if (type == "sigmoid") { return std::unique_ptr(new AVXActImpl()); } else if (type == "relu") { return std::unique_ptr(new AVXActImpl()); } else if (type == "tanh") { return std::unique_ptr(new AVXActImpl()); } else if (type == "identity" || type == "") { return std::unique_ptr(new AVXActImpl()); } PADDLE_THROW("Not support type: %s", type); }; avx_act_gate_ = GetAVXAct(act_gate); avx_act_cand_ = GetAVXAct(act_cand); avx_act_cell_ = GetAVXAct(act_cell); #endif } void ComputeCtHt(T* gates, const T* ct_1, T* ct, T* ht) const override { // gates: W_ch, W_ih, W_fh, W_oh act_gate_3d_->Compute(gates + d_, gates + d_); /* C_t = C_t-1 * fgated + cand_gated * igated */ act_cand_d_->Compute(gates, gates); vmul_d_->Compute(gates, gates + d_, gates + d_); vmul_d_->Compute(ct_1, gates + d2_, gates + d2_); vadd_d_->Compute(gates + d_, gates + d2_, ct); /* H_t = act_cell(C_t) * ogated */ act_cell_d_->Compute(ct, gates + d2_); vmul_d_->Compute(gates + d2_, gates + d3_, ht); } private: int d_, d2_, d3_; std::shared_ptr> act_gate_3d_, act_cand_d_, act_cell_d_; std::shared_ptr> vmul_d_; std::shared_ptr> vadd_d_; #ifdef __AVX__ std::unique_ptr avx_act_gate_, avx_act_cand_, avx_act_cell_; #endif }; #define INTRI8_FLOAT(isa) \ template <> \ void LSTMKernelImpl::ComputeCtHt( \ float* gates, const float* ct_1, float* ct, float* ht) const { \ /* gates: W_ch, W_ih, W_fh, W_oh */ \ __m256 c, i, f, o; \ c = _mm256_loadu_ps(gates); \ i = _mm256_loadu_ps(gates + 8); \ f = _mm256_loadu_ps(gates + 16); \ o = _mm256_loadu_ps(gates + 24); \ /* C_t = C_t-1 * fgated + cand_gated * igated*/ \ c = _mm256_mul_ps(avx_act_cand_->Compute(c), avx_act_gate_->Compute(i)); \ i = _mm256_loadu_ps(ct_1); \ f = _mm256_mul_ps(i, avx_act_gate_->Compute(f)); \ f = _mm256_add_ps(c, f); \ _mm256_storeu_ps(ct, f); \ /* H_t = act_cell(C_t) * ogated */ \ o = _mm256_mul_ps(avx_act_cell_->Compute(f), avx_act_gate_->Compute(o)); \ _mm256_storeu_ps(ht, o); \ } // TODO(TJ): optimize keq16 #ifdef __AVX__ INTRI8_FLOAT(jit::avx); #endif #ifdef __AVX2__ INTRI8_FLOAT(jit::avx2); #endif #ifdef __AVX512F__ INTRI8_FLOAT(jit::avx512f); #endif #define JITKERNEL_DECLARE_LSTM(ker_class, ker_dtype) \ template <> \ std::shared_ptr> \ KernelPool::Get, int, const std::string&, \ const std::string&, const std::string&>( \ int d, const std::string& act_gate, const std::string& act_cand, \ const std::string& act_cell) #define JITKERNEL_KEY_LSTM(ker_key, dtype_key) \ #ker_key #dtype_key + std::to_string(d) + act_gate + act_cand + act_cell #define JITKERNEL_NEW_LSTM_IMPL(ker, dtype, isa, k) \ p = std::dynamic_pointer_cast>( \ std::make_shared>(d, act_gate, act_cand, \ act_cell)) REGISTER_JITKERNEL_ARGS(lstm, LSTMKernel, JITKERNEL_DECLARE_LSTM, JITKERNEL_KEY_LSTM, JITKERNEL_NEW_LSTM_IMPL); #undef INTRI8_FLOAT #undef JITKERNEL_DECLARE_LSTM #undef JITKERNEL_KEY_LSTM #undef JITKERNEL_NEW_LSTM_IMPL } // namespace jitkernel } // namespace math } // namespace operators } // namespace paddle