MathFunctions.cpp 10.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Z
zhangjinchao01 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "MathFunctions.h"
#include "hl_matrix_apply.cuh"
Y
Yu Yang 已提交
17
#include "hl_matrix_ops.cuh"
L
liaogang 已提交
18
#include "paddle/utils/DynamicLoader.h"
L
liaogang 已提交
19 20 21 22 23 24 25 26 27 28 29 30 31

namespace dynload {

std::once_flag lapack_dso_flag;
void* lapack_dso_handle = nullptr;

/**
 * The following macro definition can generate structs
 * (for each function) to dynamic load lapack routine
 * via operator overloading.
 *
 * note: default dynamic linked libs
 */
32 33 34 35 36

// The argument for stringizing operator is not macro-expanded first.
// We have to use two levels of macro to do the expansion.
// See https://gcc.gnu.org/onlinedocs/cpp/Stringizing.html
#define STR(x) #x
L
liaogang 已提交
37 38 39

// clang-format off
#ifndef LAPACK_FOUND
L
liaogang 已提交
40 41 42
#define DYNAMIC_LOAD_LAPACK_WRAP(__name)                                       \
  struct DynLoad__##__name {                                                   \
    template <typename... Args>                                                \
L
liaogang 已提交
43
    auto operator()(Args... args) -> decltype(__name(args...)) {               \
L
liaogang 已提交
44 45
      using lapack_func = decltype(__name(args...)) (*)(Args...);              \
      std::call_once(lapack_dso_flag, GetLapackDsoHandle, &lapack_dso_handle); \
46 47 48
      void* p_##__name = dlsym(lapack_dso_handle, STR(__name));                \
      CHECK(p_##__name) << "Cannot find symbol " << STR(__name)                \
                        << " in liblapack.so";                                 \
L
liaogang 已提交
49 50 51
      return reinterpret_cast<lapack_func>(p_##__name)(args...);               \
    }                                                                          \
  } __name;  // struct DynLoad__##__name
L
liaogang 已提交
52 53 54 55 56 57 58 59 60
#else
#define DYNAMIC_LOAD_LAPACK_WRAP(__name)                                       \
  struct DynLoad__##__name {                                                   \
    template <typename... Args>                                                \
    auto operator()(Args... args) -> decltype(__name(args...)) {               \
      return __name(args...);                                                  \
    }                                                                          \
  } __name;  // struct DynLoad__##__name
#endif
L
liaogang 已提交
61 62

#ifdef PADDLE_USE_ATLAS
L
liaogang 已提交
63 64 65 66
  #define  PADDLE_SGETRF  clapack_sgetrf
  #define  PADDLE_DGETRF  clapack_dgetrf
  #define  PADDLE_SGETRI  clapack_sgetri
  #define  PADDLE_DGETRI  clapack_dgetri
L
liaogang 已提交
67
#else
L
liaogang 已提交
68 69 70 71
  #define  PADDLE_SGETRF  LAPACKE_sgetrf
  #define  PADDLE_DGETRF  LAPACKE_dgetrf
  #define  PADDLE_SGETRI  LAPACKE_sgetri
  #define  PADDLE_DGETRI  LAPACKE_dgetri
72
#endif
L
liaogang 已提交
73 74 75 76 77 78 79 80

#define LAPACK_ROUTINE_EACH(__macro)       \
  __macro(PADDLE_SGETRF)                   \
  __macro(PADDLE_DGETRF)                   \
  __macro(PADDLE_SGETRI)                   \
  __macro(PADDLE_DGETRI)
// clang-format on

L
liaogang 已提交
81 82
LAPACK_ROUTINE_EACH(DYNAMIC_LOAD_LAPACK_WRAP)

L
liaogang 已提交
83
}  // namespace dynload
Z
zhangjinchao01 已提交
84 85 86

namespace paddle {

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
template <>
void gemm<float>(const CBLAS_TRANSPOSE transA,
                 const CBLAS_TRANSPOSE transB,
                 const int M,
                 const int N,
                 const int K,
                 const float alpha,
                 const float* A,
                 const int lda,
                 const float* B,
                 const int ldb,
                 const float beta,
                 float* C,
                 const int ldc) {
  cblas_sgemm(CblasRowMajor,
              transA,
              transB,
              M,
              N,
              K,
              alpha,
              A,
              lda,
              B,
              ldb,
              beta,
              C,
              ldc);
}

template <>
void gemm<double>(const CBLAS_TRANSPOSE transA,
                  const CBLAS_TRANSPOSE transB,
                  const int M,
                  const int N,
                  const int K,
                  const double alpha,
                  const double* A,
                  const int lda,
                  const double* B,
                  const int ldb,
                  const double beta,
                  double* C,
                  const int ldc) {
  cblas_dgemm(CblasRowMajor,
              transA,
              transB,
              M,
              N,
              K,
              alpha,
              A,
              lda,
              B,
              ldb,
              beta,
              C,
              ldc);
}

template <>
int getrf<float>(const CBLAS_ORDER order,
                 const int M,
                 const int N,
                 float* A,
                 const int lda,
                 int* ipiv) {
L
liaogang 已提交
154
  return dynload::PADDLE_SGETRF(order, M, N, A, lda, ipiv);
L
lzhao4ever 已提交
155 156
}

157 158 159 160 161 162 163
template <>
int getrf<double>(const CBLAS_ORDER order,
                  const int M,
                  const int N,
                  double* A,
                  const int lda,
                  int* ipiv) {
L
liaogang 已提交
164
  return dynload::PADDLE_DGETRF(order, M, N, A, lda, ipiv);
L
lzhao4ever 已提交
165 166
}

167 168 169 170 171 172
template <>
int getri<float>(const CBLAS_ORDER order,
                 const int N,
                 float* A,
                 const int lda,
                 const int* ipiv) {
L
liaogang 已提交
173
  return dynload::PADDLE_SGETRI(order, N, A, lda, ipiv);
L
lzhao4ever 已提交
174 175
}

176 177 178 179 180 181
template <>
int getri<double>(const CBLAS_ORDER order,
                  const int N,
                  double* A,
                  const int lda,
                  const int* ipiv) {
L
liaogang 已提交
182
  return dynload::PADDLE_DGETRI(order, N, A, lda, ipiv);
183
  return 0;
L
lzhao4ever 已提交
184 185
}

186
template <>
Z
zhangjinchao01 已提交
187 188 189 190
void axpy<float>(const int n, const float alpha, const float* x, float* y) {
  cblas_saxpy(n, alpha, x, 1, y, 1);
}

191
template <>
Z
zhangjinchao01 已提交
192 193 194 195
void axpy<double>(const int n, const double alpha, const double* x, double* y) {
  cblas_daxpy(n, alpha, x, 1, y, 1);
}

196
template <>
Z
zhangjinchao01 已提交
197 198 199 200
float dotProduct<float>(const int n, const float* x, const float* y) {
  return cblas_sdot(n, x, 1, y, 1);
}

201
template <>
Z
zhangjinchao01 已提交
202 203 204 205 206 207
double dotProduct<double>(const int n, const double* x, const double* y) {
  return cblas_ddot(n, x, 1, y, 1);
}

#ifdef PADDLE_USE_MKL

208
template <>
Z
zhangjinchao01 已提交
209 210 211 212
void vExp<float>(const int n, const float* a, float* r) {
  vsExp(n, a, r);
}

213
template <>
Z
zhangjinchao01 已提交
214 215 216 217
void vExp<double>(const int n, const double* a, double* r) {
  vdExp(n, a, r);
}

218
template <>
Z
zhangjinchao01 已提交
219 220 221 222
void vPow<float>(const int n, const float* a, const float b, float* r) {
  vsPowx(n, a, b, r);
}

223
template <>
Z
zhangjinchao01 已提交
224 225 226 227
void vPow<double>(const int n, const double* a, const double b, double* r) {
  vdPowx(n, a, b, r);
}

228
template <>
Z
zhangjinchao01 已提交
229 230 231 232
void vLog<float>(const int n, const float* a, float* r) {
  vsLn(n, a, r);
}

233
template <>
Z
zhangjinchao01 已提交
234 235 236 237
void vLog<double>(const int n, const double* a, double* r) {
  vdLn(n, a, r);
}

238
template <>
Z
zhangjinchao01 已提交
239 240 241 242
void vAdd<float>(const int n, const float* a, const float* b, float* r) {
  vsAdd(n, a, b, r);
}

243
template <>
Z
zhangjinchao01 已提交
244 245 246 247
void vAdd<double>(const int n, const double* a, const double* b, double* r) {
  vdAdd(n, a, b, r);
}

248
template <>
Z
zhangjinchao01 已提交
249 250 251 252
void vInvSqrt<float>(const int n, const float* a, float* r) {
  vsInvSqrt(n, a, r);
}

253
template <>
Z
zhangjinchao01 已提交
254 255 256 257
void vInvSqrt<double>(const int n, const double* a, double* r) {
  vdInvSqrt(n, a, r);
}

258
template <>
Z
zhangjinchao01 已提交
259 260 261 262
void vLog1p<float>(const int n, const float* a, float* r) {
  vsLog1p(n, a, r);
}

263
template <>
Z
zhangjinchao01 已提交
264 265 266 267
void vLog1p<double>(const int n, const double* a, double* r) {
  vdLog1p(n, a, r);
}

268
template <>
Z
zhangjinchao01 已提交
269 270 271 272
void vTanh<float>(const int n, const float* a, float* r) {
  vsTanh(n, a, r);
}

273
template <>
Z
zhangjinchao01 已提交
274 275 276 277 278 279
void vTanh<double>(const int n, const double* a, double* r) {
  vdTanh(n, a, r);
}
#else

DEFINE_MATRIX_BINARY_OP(vExp, b = std::exp(a));
280
template <class T>
Z
zhangjinchao01 已提交
281 282
void vExp(const int n, const T* a, T* r) {
  hl_cpu_apply_binary_op<T, binary::vExp<T>, 0, 0>(
283
      binary::vExp<T>(), const_cast<T*>(a), r, 1, n, n, n);
Z
zhangjinchao01 已提交
284 285 286
}

DEFINE_MATRIX_BINARY_OP(vLog, b = std::log(a));
287
template <class T>
Z
zhangjinchao01 已提交
288 289
void vLog(const int n, const T* a, T* r) {
  hl_cpu_apply_binary_op<T, binary::vLog<T>, 0, 0>(
290
      binary::vLog<T>(), const_cast<T*>(a), r, 1, n, n, n);
Z
zhangjinchao01 已提交
291 292 293
}

DEFINE_MATRIX_BINARY_OP(vInvSqrt, b = 1.0f / std::sqrt(a));
294
template <class T>
Z
zhangjinchao01 已提交
295 296
void vInvSqrt(const int n, const T* a, T* r) {
  hl_cpu_apply_binary_op<T, binary::vInvSqrt<T>, 0, 0>(
297
      binary::vInvSqrt<T>(), const_cast<T*>(a), r, 1, n, n, n);
Z
zhangjinchao01 已提交
298 299 300
}

DEFINE_MATRIX_BINARY_OP(vLog1p, b = std::log(1.0f + a));
301
template <class T>
Z
zhangjinchao01 已提交
302 303
void vLog1p(const int n, const T* a, T* r) {
  hl_cpu_apply_binary_op<T, binary::vLog1p<T>, 0, 0>(
304
      binary::vLog1p<T>(), const_cast<T*>(a), r, 1, n, n, n);
Z
zhangjinchao01 已提交
305 306
}

307 308 309 310
DEFINE_MATRIX_BINARY_OP(vTanh, T tmp = -2.0 * a;
                        tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp;
                        b = 2.0 / (1.0 + std::exp(tmp)) - 1.0);
template <class T>
Z
zhangjinchao01 已提交
311 312
void vTanh(const int n, const T* a, T* r) {
  hl_cpu_apply_binary_op<T, binary::vTanh<T>, 0, 0>(
313
      binary::vTanh<T>(), const_cast<T*>(a), r, 1, n, n, n);
Z
zhangjinchao01 已提交
314 315 316
}

DEFINE_MATRIX_BINARY_PARAMETER_OP(vPow, ONE_PARAMETER, b = std::pow(a, p));
317
template <class T>
Z
zhangjinchao01 已提交
318 319
void vPow(const int n, const T* a, const T b, T* r) {
  hl_cpu_apply_binary_op<T, binary::vPow<T>, 0, 0>(
320
      binary::vPow<T>(b), const_cast<T*>(a), r, 1, n, n, n);
Z
zhangjinchao01 已提交
321 322 323
}

DEFINE_MATRIX_TERNARY_OP(vAdd, c = a + b);
324
template <class T>
Z
zhangjinchao01 已提交
325 326
void vAdd(const int n, const T* a, const T* b, T* r) {
  hl_cpu_apply_ternary_op<T, ternary::vAdd<T>, 0, 0>(ternary::vAdd<T>(),
327 328 329 330 331 332 333 334
                                                     const_cast<T*>(a),
                                                     const_cast<T*>(b),
                                                     r,
                                                     1,
                                                     n,
                                                     n,
                                                     n,
                                                     n);
Z
zhangjinchao01 已提交
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
}

template void vExp(const int n, const float* a, float* r);
template void vExp(const int n, const double* a, double* r);
template void vLog(const int n, const float* a, float* r);
template void vLog(const int n, const double* a, double* r);
template void vInvSqrt(const int n, const double* a, double* r);
template void vInvSqrt(const int n, const float* a, float* r);
template void vLog1p(const int n, const float* a, float* r);
template void vLog1p(const int n, const double* a, double* r);
template void vTanh(const int n, const float* a, float* r);
template void vTanh(const int n, const double* a, double* r);
template void vPow(const int n, const float* a, const float b, float* r);
template void vPow(const int n, const double* a, const double b, double* r);
template void vAdd(const int n, const float* a, const float* b, float* r);
template void vAdd(const int n, const double* a, const double* b, double* r);

#endif

}  // namespace paddle