/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include #include #include #include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { namespace math { namespace detail { #define SIGMOID_THRESHOLD_MIN -40.0 #define SIGMOID_THRESHOLD_MAX 13.0 #define EXP_MAX_INPUT 40.0 enum ActivationType { kSigmoid, KSigmoidV2, kReLU, kTanh, kTanhV2, kIdentity, }; inline ActivationType GetActivationType(const std::string &type) { if (type == "sigmoid") { return ActivationType::kSigmoid; } else if (type == "sigmoid_v2") { return ActivationType::KSigmoidV2; } else if (type == "relu") { return ActivationType::kReLU; } else if (type == "tanh") { return ActivationType::kTanh; } else if (type == "tanh_v2") { return ActivationType::kTanhV2; } else if (type == "identity" || type == "") { return ActivationType::kIdentity; } throw std::invalid_argument("The input type is not supported"); } namespace forward { template DEVICE T Identity(const T a) { return a; } template DEVICE T Relu(const T a) { return a > static_cast(0.0) ? a : static_cast(0.0); } template DEVICE T Sigmoid(const T a) { const T min = SIGMOID_THRESHOLD_MIN; const T max = SIGMOID_THRESHOLD_MAX; T tmp = (a < min) ? min : ((a > max) ? max : a); return static_cast(1.0) / (static_cast(1.0) + exp(-tmp)); } /* * Don't limit input in a threshold range. */ template DEVICE T SigmoidV2(const T a) { return static_cast(1.0) / (static_cast(1.0) + exp(-a)); } template DEVICE T Tanh(const T a) { T tmp = -2.0 * a; tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; return (2.0 / (1.0 + exp(tmp))) - 1.0; } /* * Don't limit input in a threshold range. */ template DEVICE T TanhV2(const T a) { T tmp = -2.0 * a; return (2.0 / (1.0 + exp(tmp))) - 1.0; } } // namespace forward namespace backward { template DEVICE T Identity(const T a, const T b) { return a; } template DEVICE T Relu(const T a, const T b) { return a * (b > 0.0 ? 1.0 : 0.0); } template DEVICE T Sigmoid(const T a, const T b) { return a * b * (1.0 - b); } template DEVICE T Tanh(const T a, const T b) { return a * (1.0 - b * b); } } // namespace backward template struct Active { typedef T (*Act)(T); typedef T (*ActGrad)(T, T); }; #ifdef PADDLE_WITH_CUDA static DEVICE Active::Act kActFloat[] = { &forward::Sigmoid, &forward::SigmoidV2, &forward::Relu, &forward::Tanh, &forward::TanhV2, &forward::Identity}; static DEVICE Active::ActGrad kActGradFloat[] = { &backward::Sigmoid, &backward::Sigmoid, &backward::Relu, &backward::Tanh, &backward::Tanh, &backward::Identity}; static DEVICE Active::Act kActDouble[] = { &forward::Sigmoid, &forward::SigmoidV2, &forward::Relu, &forward::Tanh, &forward::TanhV2, &forward::Identity}; static DEVICE Active::ActGrad kActGradDouble[] = { &backward::Sigmoid, &backward::Sigmoid, &backward::Relu, &backward::Tanh, &backward::Tanh, &backward::Identity}; namespace forward { inline DEVICE float activation(float a, int index) { return kActFloat[index](a); } inline DEVICE double activation(double a, int index) { return kActDouble[index](a); } } // namespace forward namespace backward { inline DEVICE float activation(float a, float b, int index) { return kActGradFloat[index](a, b); } inline DEVICE double activation(double a, double b, int index) { return kActGradDouble[index](a, b); } } // namespace backward #else // PADDLE_WITH_CUDA // Note(qili93): The above implementing not work in HIP // It will throw compile error when calling detail::forward::lstm() // Which used ActivationType in lstm_kernel.h, compile error is: // lstm_gpu_kernel.h:33:17: error: unsupported indirect call to function // // To-do(qili93): fix this after HIP issue fixed: // https://github.com/ROCm-Developer-Tools/HIP/issues/2186 namespace forward { inline DEVICE float activation(float a, int index) { switch (index) { case 0: return Sigmoid(a); case 1: return SigmoidV2(a); case 2: return Relu(a); case 3: return Tanh(a); case 4: return TanhV2(a); case 5: return Identity(a); default: return 0.0f; } } inline DEVICE double activation(double a, int index) { switch (index) { case 0: return Sigmoid(a); case 1: return SigmoidV2(a); case 2: return Relu(a); case 3: return Tanh(a); case 4: return TanhV2(a); case 5: return Identity(a); default: return 0.0f; } } } // namespace forward namespace backward { inline DEVICE float activation(float a, float b, int index) { switch (index) { case 0: return Sigmoid(a, b); case 1: return Sigmoid(a, b); case 2: return Relu(a, b); case 3: return Tanh(a, b); case 4: return Tanh(a, b); case 5: return Identity(a, b); default: return 0.0f; } } inline DEVICE double activation(double a, double b, int index) { switch (index) { case 0: return Sigmoid(a, b); case 1: return Sigmoid(a, b); case 2: return Relu(a, b); case 3: return Tanh(a, b); case 4: return Tanh(a, b); case 5: return Identity(a, b); default: return 0.0f; } } } // namespace backward #endif // PADDLE_WITH_CUDA #ifdef __AVX__ namespace forward { namespace avx { __m256 Relu(const __m256 a); __m256 Sigmoid(const __m256 a); __m256 SigmoidV2(const __m256 a); __m256 Tanh(const __m256 a); __m256 TanhV2(const __m256 a); __m256 Identity(const __m256 a); } // namespace avx } // namespace forward namespace backward { namespace avx { __m256 Relu(const __m256 a, const __m256 b); __m256 Sigmoid(const __m256 a, const __m256 b); __m256 Tanh(const __m256 a, const __m256 b); __m256 Identity(const __m256 a, const __m256 b); } // namespace avx } // namespace backward static Active<__m256>::Act kActAvx[] = { &forward::avx::Sigmoid, &forward::avx::SigmoidV2, &forward::avx::Relu, &forward::avx::Tanh, &forward::avx::TanhV2, &forward::avx::Identity}; static Active<__m256>::ActGrad kActGradAvx[] = { &backward::avx::Sigmoid, &backward::avx::Sigmoid, &backward::avx::Relu, &backward::avx::Tanh, &backward::avx::Tanh, &backward::avx::Identity}; namespace forward { inline __m256 activation(__m256 a, int index) { return kActAvx[index](a); } } // namespace forward namespace backward { inline __m256 activation(__m256 a, __m256 b, int index) { return kActGradAvx[index](a, b); } } // namespace backward #endif } // namespace detail } // namespace math } // namespace operators } // namespace paddle