diff --git a/paddle/math/MathFunctions.cpp b/paddle/math/MathFunctions.cpp index 190c73142f4d97ff48ab74668795813d3a4415ef..f48119aa511578b21602a225277f01b4c6a9e9a8 100644 --- a/paddle/math/MathFunctions.cpp +++ b/paddle/math/MathFunctions.cpp @@ -307,6 +307,29 @@ void vAdd(const int n, const T* a, const T* b, T* r) { n); } +DEFINE_MATRIX_BINARY_OP(vInvSqrt, b = 1.0f / std::sqrt(a)); +template +void vInvSqrt(const int n, const T* a, T* r) { + hl_cpu_apply_binary_op, 0, 0>( + binary::vInvSqrt(), const_cast(a), r, 1, n, n, n); +} + +DEFINE_MATRIX_BINARY_OP(vLog1p, b = std::log(1.0f + a)); +template +void vLog1p(const int n, const T* a, T* r) { + hl_cpu_apply_binary_op, 0, 0>( + binary::vLog1p(), const_cast(a), r, 1, n, n, n); +} + +DEFINE_MATRIX_BINARY_OP(vTanh, T tmp = -2.0 * a; + tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; + b = 2.0 / (1.0 + std::exp(tmp)) - 1.0); +template +void vTanh(const int n, const T* a, T* r) { + hl_cpu_apply_binary_op, 0, 0>( + binary::vTanh(), const_cast(a), r, 1, n, n, n); +} + template void vExp(const int n, const float* a, float* r); template void vExp(const int n, const double* a, double* r); template void vLog(const int n, const float* a, float* r); @@ -315,6 +338,11 @@ template void vPow(const int n, const float* a, const float b, float* r); template void vPow(const int n, const double* a, const double b, double* r); template void vAdd(const int n, const float* a, const float* b, float* r); template void vAdd(const int n, const double* a, const double* b, double* r); - +template void vInvSqrt(const int n, const double* a, double* r); +template void vInvSqrt(const int n, const float* a, float* r); +template void vLog1p(const int n, const float* a, float* r); +template void vLog1p(const int n, const double* a, double* r); +template void vTanh(const int n, const float* a, float* r); +template void vTanh(const int n, const double* a, double* r); #endif } // namespace paddle