MathFunctions.h 3.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Z
zhangjinchao01 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#ifndef MATHFUNCTIONS_H_
#define MATHFUNCTIONS_H_

T
tensor-tang 已提交
18
#ifdef PADDLE_USE_MKLML
19 20 21 22 23
#include <mkl_cblas.h>
#include <mkl_lapacke.h>
#include <mkl_vml_functions.h>
#endif

Z
zhangjinchao01 已提交
24 25
#ifdef PADDLE_USE_MKL
#include <mkl.h>
L
lzhao4ever 已提交
26
#include <mkl_lapacke.h>
L
liaogang 已提交
27 28
#endif

L
lzhao4ever 已提交
29 30
#ifdef PADDLE_USE_ATLAS
extern "C" {
L
liaogang 已提交
31
#include <cblas.h>
L
lzhao4ever 已提交
32 33
#include <clapack.h>
}
L
liaogang 已提交
34 35 36 37
#endif

#ifdef PADDLE_USE_OPENBLAS
#include <cblas.h>
L
lzhao4ever 已提交
38 39
#include <lapacke.h>
#endif
L
liaogang 已提交
40 41 42

#ifndef LAPACK_FOUND
extern "C" {
43
#ifndef PADDLE_USE_EIGEN_FOR_BLAS
L
liaogang 已提交
44
#include <cblas.h>
45 46 47 48 49 50
#else
typedef enum CBLAS_ORDER {
  CblasRowMajor = 101,
  CblasColMajor = 102
} CBLAS_ORDER;
#endif
L
liaogang 已提交
51 52 53 54 55 56 57 58
int LAPACKE_sgetrf(
    int matrix_layout, int m, int n, float* a, int lda, int* ipiv);
int LAPACKE_dgetrf(
    int matrix_layout, int m, int n, double* a, int lda, int* ipiv);
int LAPACKE_sgetri(
    int matrix_layout, int n, float* a, int lda, const int* ipiv);
int LAPACKE_dgetri(
    int matrix_layout, int n, double* a, int lda, const int* ipiv);
L
liaogang 已提交
59
}
Z
zhangjinchao01 已提交
60 61
#endif

L
liaogang 已提交
62 63
#include <cmath>

Z
zhangjinchao01 已提交
64 65
namespace paddle {

66
#ifndef PADDLE_USE_EIGEN_FOR_BLAS
67 68 69 70 71 72 73 74 75 76 77 78 79 80
template <class T>
void gemm(const CBLAS_TRANSPOSE transA,
          const CBLAS_TRANSPOSE transB,
          const int M,
          const int N,
          const int K,
          const T alpha,
          const T* A,
          const int lda,
          const T* B,
          const int ldb,
          const T beta,
          T* C,
          const int ldc);
81
#endif
82 83 84 85 86 87 88 89 90 91 92 93 94 95

template <class T>
int getrf(const CBLAS_ORDER Order,
          const int M,
          const int N,
          T* A,
          const int lda,
          int* ipiv);

template <class T>
int getri(
    const CBLAS_ORDER Order, const int N, T* A, const int lda, const int* ipiv);

template <class T>
96 97 98 99 100 101
void axpy(const int n, const T alpha, const T* x, T* y) {
  /// y = y + alpha * x
  for (int i = 0; i < n; i++) {
    y[i] = y[i] + alpha * x[i];
  }
}
Z
zhangjinchao01 已提交
102

103
template <class T>
104 105 106 107 108
T dotProduct(const int n, const T* x, const T* y) {
  T result = static_cast<T>(0);
  for (int i = 0; i < n; i++) {
    result += x[i] * y[i];
  }
L
Liu Yiqun 已提交
109
  return result;
110
}
Z
zhangjinchao01 已提交
111

112
template <class T>
Z
zhangjinchao01 已提交
113 114
void vExp(const int n, const T* a, T* r);

115
template <class T>
Z
zhangjinchao01 已提交
116 117
void vPow(const int n, const T* a, const T b, T* r);

118
template <class T>
Z
zhangjinchao01 已提交
119 120
void vLog(const int n, const T* a, T* r);

121
template <class T>
Z
zhangjinchao01 已提交
122 123
void vAdd(const int n, const T* a, const T* b, T* r);

124
template <class T>
Z
zhangjinchao01 已提交
125 126
void vInvSqrt(const int n, const T* a, T* r);

127
template <class T>
Z
zhangjinchao01 已提交
128 129
void vLog1p(const int n, const T* a, T* r);

130
template <class T>
Z
zhangjinchao01 已提交
131 132 133 134 135
void vTanh(const int n, const T* a, T* r);

}  // namespace paddle

#endif  // MATHFUNCTIONS_H_