/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "MathFunctions.h" #include "hl_matrix_apply.cuh" #include "hl_matrix_ops.cuh" #include "paddle/utils/DynamicLoad.h" namespace dynload { std::once_flag lapack_dso_flag; void* lapack_dso_handle = nullptr; /** * The following macro definition can generate structs * (for each function) to dynamic load lapack routine * via operator overloading. * * note: default dynamic linked libs */ #define DYNAMIC_LOAD_LAPACK_WRAP(__name) \ struct DynLoad__##__name { \ template \ auto operator()(Args... args)->decltype(__name(args...)) { \ using lapack_func = decltype(__name(args...)) (*)(Args...); \ std::call_once(lapack_dso_flag, GetLapackDsoHandle, &lapack_dso_handle); \ void* p_##__name = dlsym(lapack_dso_handle, #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ } __name; // struct DynLoad__##__name // clang-format off #ifdef PADDLE_USE_LAPACK #ifdef PADDLE_USE_ATLAS #define LAPACK_ROUTINE_EACH(__macro) \ __macro(clapack_sgetrf) \ __macro(clapack_dgetrf) \ __macro(clapack_sgetri) \ __macro(clapack_dgetri) #else #define LAPACK_ROUTINE_EACH(__macro) \ __macro(LAPACKE_sgetrf) \ __macro(LAPACKE_dgetrf) \ __macro(LAPACKE_sgetri) \ __macro(LAPACKE_dgetri) #endif #endif LAPACK_ROUTINE_EACH(DYNAMIC_LOAD_LAPACK_WRAP) // clang-format on } // namespace dynload namespace paddle { template <> void gemm(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float alpha, const float* A, const int lda, const float* B, const int ldb, const float beta, float* C, const int ldc) { cblas_sgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc); } template <> void gemm(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const double alpha, const double* A, const int lda, const double* B, const int ldb, const double beta, double* C, const int ldc) { cblas_dgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc); } template <> int getrf(const CBLAS_ORDER order, const int M, const int N, float* A, const int lda, int* ipiv) { #ifdef PADDLE_USE_LAPACK #ifdef PADDLE_USE_ATLAS return dynload::clapack_sgetrf(order, M, N, A, lda, ipiv); #else return dynload::LAPACKE_sgetrf(order, M, N, A, lda, ipiv); #endif #else LOG(FATAL) << "Not implemented"; #endif return 0; } template <> int getrf(const CBLAS_ORDER order, const int M, const int N, double* A, const int lda, int* ipiv) { #ifdef PADDLE_USE_LAPACK #ifdef PADDLE_USE_ATLAS return dynload::clapack_dgetrf(order, M, N, A, lda, ipiv); #else return dynload::LAPACKE_dgetrf(order, M, N, A, lda, ipiv); #endif #else LOG(FATAL) << "Not implemented"; #endif return 0; } template <> int getri(const CBLAS_ORDER order, const int N, float* A, const int lda, const int* ipiv) { #ifdef PADDLE_USE_LAPACK #ifdef PADDLE_USE_ATLAS return dynload::clapack_sgetri(order, N, A, lda, ipiv); #else return dynload::LAPACKE_sgetri(order, N, A, lda, ipiv); #endif #else LOG(FATAL) << "Not implemented"; #endif return 0; } template <> int getri(const CBLAS_ORDER order, const int N, double* A, const int lda, const int* ipiv) { #ifdef PADDLE_USE_LAPACK #ifdef PADDLE_USE_ATLAS return dynload::clapack_dgetri(order, N, A, lda, ipiv); #else return dynload::LAPACKE_dgetri(order, N, A, lda, ipiv); #endif #else LOG(FATAL) << "Not implemented"; #endif return 0; } template <> void axpy(const int n, const float alpha, const float* x, float* y) { cblas_saxpy(n, alpha, x, 1, y, 1); } template <> void axpy(const int n, const double alpha, const double* x, double* y) { cblas_daxpy(n, alpha, x, 1, y, 1); } template <> float dotProduct(const int n, const float* x, const float* y) { return cblas_sdot(n, x, 1, y, 1); } template <> double dotProduct(const int n, const double* x, const double* y) { return cblas_ddot(n, x, 1, y, 1); } #ifdef PADDLE_USE_MKL template <> void vExp(const int n, const float* a, float* r) { vsExp(n, a, r); } template <> void vExp(const int n, const double* a, double* r) { vdExp(n, a, r); } template <> void vPow(const int n, const float* a, const float b, float* r) { vsPowx(n, a, b, r); } template <> void vPow(const int n, const double* a, const double b, double* r) { vdPowx(n, a, b, r); } template <> void vLog(const int n, const float* a, float* r) { vsLn(n, a, r); } template <> void vLog(const int n, const double* a, double* r) { vdLn(n, a, r); } template <> void vAdd(const int n, const float* a, const float* b, float* r) { vsAdd(n, a, b, r); } template <> void vAdd(const int n, const double* a, const double* b, double* r) { vdAdd(n, a, b, r); } template <> void vInvSqrt(const int n, const float* a, float* r) { vsInvSqrt(n, a, r); } template <> void vInvSqrt(const int n, const double* a, double* r) { vdInvSqrt(n, a, r); } template <> void vLog1p(const int n, const float* a, float* r) { vsLog1p(n, a, r); } template <> void vLog1p(const int n, const double* a, double* r) { vdLog1p(n, a, r); } template <> void vTanh(const int n, const float* a, float* r) { vsTanh(n, a, r); } template <> void vTanh(const int n, const double* a, double* r) { vdTanh(n, a, r); } #else DEFINE_MATRIX_BINARY_OP(vExp, b = std::exp(a)); template void vExp(const int n, const T* a, T* r) { hl_cpu_apply_binary_op, 0, 0>( binary::vExp(), const_cast(a), r, 1, n, n, n); } DEFINE_MATRIX_BINARY_OP(vLog, b = std::log(a)); template void vLog(const int n, const T* a, T* r) { hl_cpu_apply_binary_op, 0, 0>( binary::vLog(), const_cast(a), r, 1, n, n, n); } DEFINE_MATRIX_BINARY_OP(vInvSqrt, b = 1.0f / std::sqrt(a)); template void vInvSqrt(const int n, const T* a, T* r) { hl_cpu_apply_binary_op, 0, 0>( binary::vInvSqrt(), const_cast(a), r, 1, n, n, n); } DEFINE_MATRIX_BINARY_OP(vLog1p, b = std::log(1.0f + a)); template void vLog1p(const int n, const T* a, T* r) { hl_cpu_apply_binary_op, 0, 0>( binary::vLog1p(), const_cast(a), r, 1, n, n, n); } DEFINE_MATRIX_BINARY_OP(vTanh, T tmp = -2.0 * a; tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; b = 2.0 / (1.0 + std::exp(tmp)) - 1.0); template void vTanh(const int n, const T* a, T* r) { hl_cpu_apply_binary_op, 0, 0>( binary::vTanh(), const_cast(a), r, 1, n, n, n); } DEFINE_MATRIX_BINARY_PARAMETER_OP(vPow, ONE_PARAMETER, b = std::pow(a, p)); template void vPow(const int n, const T* a, const T b, T* r) { hl_cpu_apply_binary_op, 0, 0>( binary::vPow(b), const_cast(a), r, 1, n, n, n); } DEFINE_MATRIX_TERNARY_OP(vAdd, c = a + b); template void vAdd(const int n, const T* a, const T* b, T* r) { hl_cpu_apply_ternary_op, 0, 0>(ternary::vAdd(), const_cast(a), const_cast(b), r, 1, n, n, n, n); } template void vExp(const int n, const float* a, float* r); template void vExp(const int n, const double* a, double* r); template void vLog(const int n, const float* a, float* r); template void vLog(const int n, const double* a, double* r); template void vInvSqrt(const int n, const double* a, double* r); template void vInvSqrt(const int n, const float* a, float* r); template void vLog1p(const int n, const float* a, float* r); template void vLog1p(const int n, const double* a, double* r); template void vTanh(const int n, const float* a, float* r); template void vTanh(const int n, const double* a, double* r); template void vPow(const int n, const float* a, const float b, float* r); template void vPow(const int n, const double* a, const double b, double* r); template void vAdd(const int n, const float* a, const float* b, float* r); template void vAdd(const int n, const double* a, const double* b, double* r); #endif } // namespace paddle