提交 25976fe7 编写于 作者: T tensor-tang

optimize the sigmoid and tanh

上级 2eb46c2b
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <cmath>
#include <string> #include <string>
#include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/cpu_info.h"
#ifdef __AVX__ #ifdef __AVX__
...@@ -31,15 +32,24 @@ namespace math { ...@@ -31,15 +32,24 @@ namespace math {
#define SIGMOID_THRESHOLD_MAX 13.0 #define SIGMOID_THRESHOLD_MAX 13.0
template <typename T> template <typename T>
inline T sigmoid(T x) { inline void vec_exp(const int n, const T* x, T* y) {
return 1. / (1. + exp(-x)); for (int i = 0; i < n; ++i) {
y[i] = std::exp(x[i]);
}
} }
template <typename T> #ifdef PADDLE_WITH_MKLML
inline T tanh(T x) { template <>
return 2. * sigmoid(2. * x) - 1.; inline void vec_exp<float>(const int n, const float* x, float* y) {
platform::dynload::vsExp(n, x, y);
} }
template <>
inline void vec_exp<double>(const int n, const double* x, double* y) {
platform::dynload::vdExp(n, x, y);
}
#endif
template <typename T, platform::jit::cpu_isa_t isa = platform::jit::isa_any> template <typename T, platform::jit::cpu_isa_t isa = platform::jit::isa_any>
inline void vec_identity(const int n, const T* x, T* y) { inline void vec_identity(const int n, const T* x, T* y) {
// do nothing // do nothing
...@@ -51,15 +61,23 @@ inline void vec_sigmoid(const int n, const T* x, T* y) { ...@@ -51,15 +61,23 @@ inline void vec_sigmoid(const int n, const T* x, T* y) {
const T min = SIGMOID_THRESHOLD_MIN; const T min = SIGMOID_THRESHOLD_MIN;
const T max = SIGMOID_THRESHOLD_MAX; const T max = SIGMOID_THRESHOLD_MAX;
for (int i = 0; i < n; ++i) { for (int i = 0; i < n; ++i) {
T tmp = (x[i] < min) ? min : ((x[i] > max) ? max : x[i]); y[i] = (x[i] < min) ? min : ((x[i] > max) ? max : x[i]);
y[i] = sigmoid<T>(tmp); y[i] = static_cast<T>(0) - y[i];
}
vec_exp<T>(n, y, y);
for (int i = 0; i < n; ++i) {
y[i] = static_cast<T>(1) / (static_cast<T>(1) + y[i]);
} }
} }
template <typename T, platform::jit::cpu_isa_t isa = platform::jit::isa_any> template <typename T, platform::jit::cpu_isa_t isa = platform::jit::isa_any>
inline void vec_tanh(const int n, const T* x, T* y) { inline void vec_tanh(const int n, const T* x, T* y) {
for (int i = 0; i < n; ++i) { for (int i = 0; i < n; ++i) {
y[i] = tanh<T>(x[i]); y[i] = static_cast<T>(2) * x[i];
}
vec_exp<T>(n, y, y);
for (int i = 0; i < n; ++i) {
y[i] = static_cast<T>(2) * y[i] - static_cast<T>(1);
} }
} }
......
...@@ -33,12 +33,13 @@ inline T _sigmoid(T x) { ...@@ -33,12 +33,13 @@ inline T _sigmoid(T x) {
const T min = SIGMOID_THRESHOLD_MIN; const T min = SIGMOID_THRESHOLD_MIN;
const T max = SIGMOID_THRESHOLD_MAX; const T max = SIGMOID_THRESHOLD_MAX;
T tmp = (x < min) ? min : ((x > max) ? max : x); T tmp = (x < min) ? min : ((x > max) ? max : x);
return 1. / (1. + std::exp(-tmp)); return static_cast<T>(1) / (static_cast<T>(1) + std::exp(-tmp));
} }
template <typename T> template <typename T>
inline T _tanh(T x) { inline T _tanh(T x) {
return 2. * _sigmoid<T>(2. * x) - 1.; return static_cast<T>(2) * _sigmoid<T>(static_cast<T>(2) * x) -
static_cast<T>(1);
} }
template <typename T> template <typename T>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册