math_function.h 4.7 KB
Newer Older
Q
qijun 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
T
tensor-tang 已提交
16
#ifdef PADDLE_WITH_MKLML
Q
qijun 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
#include <mkl_cblas.h>
#include <mkl_lapacke.h>
#include <mkl_vml_functions.h>
#endif

#ifdef PADDLE_USE_ATLAS
extern "C" {
#include <cblas.h>
#include <clapack.h>
}
#endif

#ifdef PADDLE_USE_OPENBLAS
#include <cblas.h>
#include <lapacke.h>
#endif

#ifndef LAPACK_FOUND
extern "C" {
#include <cblas.h>
int LAPACKE_sgetrf(int matrix_layout, int m, int n, float* a, int lda,
                   int* ipiv);
int LAPACKE_dgetrf(int matrix_layout, int m, int n, double* a, int lda,
                   int* ipiv);
int LAPACKE_sgetri(int matrix_layout, int n, float* a, int lda,
                   const int* ipiv);
int LAPACKE_dgetri(int matrix_layout, int n, double* a, int lda,
                   const int* ipiv);
}
#endif

#include <cmath>
Q
qijun 已提交
49

50
#include "paddle/framework/eigen.h"
Q
qijun 已提交
51
#include "paddle/framework/tensor.h"
D
dzhwinter 已提交
52
#include "paddle/framework/tensor_util.h"
Q
qijun 已提交
53
#include "paddle/platform/device_context.h"
Q
qijun 已提交
54
#include "paddle/platform/enforce.h"
Q
qijun 已提交
55 56 57 58 59

namespace paddle {
namespace operators {
namespace math {

Q
qijun 已提交
60 61
// Support continuous memory now
// If transA = N, and transB = N
M
Markus Kliegl 已提交
62
// Then matrixA: M * K, matrixB: K * N, matrixC : M * N
Q
qijun 已提交
63 64
// For more detailed info, please refer to
// http://www.netlib.org/lapack/explore-html/d4/de2/sgemm_8f.html
Q
qijun 已提交
65
template <typename Place, typename T>
66 67 68
void gemm(const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA,
          const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
          const T alpha, const T* A, const T* B, const T beta, T* C);
Q
qijun 已提交
69

G
guosheng 已提交
70 71 72 73 74 75 76
// gemm wrapper with stride args for matrix uncontinuous in memory
template <typename Place, typename T>
void gemm(const platform::DeviceContext& context, const bool transA,
          const bool transB, const int M, const int N, const int K,
          const T alpha, const T* A, const int lda, const T* B, const int ldb,
          const T beta, T* C, const int ldc);

Q
qijun 已提交
77
// matrix multiply with continuous memory
Q
qijun 已提交
78
template <typename Place, typename T>
79 80
void matmul(const platform::DeviceContext& context,
            const framework::Tensor& matrix_a, bool trans_a,
Q
qijun 已提交
81
            const framework::Tensor& matrix_b, bool trans_b, T alpha,
82
            framework::Tensor* matrix_out, T beta);
Q
qijun 已提交
83

M
Markus Kliegl 已提交
84 85 86 87 88 89 90 91
// Batched gemm
template <typename Place, typename T>
void batched_gemm(const platform::DeviceContext& context,
                  const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB,
                  const int M, const int N, const int K, const T alpha,
                  const T* A, const T* B, const T beta, T* C,
                  const int batchCount, const int strideA, const int strideB);

92 93 94 95 96
template <typename Place, typename T>
void gemv(const platform::DeviceContext& context, const bool trans_a,
          const int M, const int N, const T alpha, const T* A, const T* B,
          const T beta, T* C);

97 98 99 100 101 102 103 104 105 106 107
template <typename Place, typename T>
void axpy(const platform::DeviceContext& context, const int n, const T alpha,
          const T* x, T* y);

template <typename Place, typename T, int Rank>
struct Transpose {
  void operator()(const platform::DeviceContext& context,
                  const framework::Tensor& in, framework::Tensor* out,
                  const std::vector<int>& axis);
};

108
template <typename Place, typename T>
Q
qijun 已提交
109 110
struct SetConstant {
  void operator()(const platform::DeviceContext& context,
111
                  framework::Tensor* tensor, T num);
Q
qijun 已提交
112 113
};

114 115 116 117 118 119 120
template <typename Place>
void set_constant_with_place(const platform::DeviceContext& context,
                             framework::Tensor* tensor, float value);

void set_constant(const platform::DeviceContext& context,
                  framework::Tensor* tensor, float value);

121 122 123 124 125 126 127 128 129 130 131 132 133
template <typename Place, typename T>
struct RowwiseAdd {
  void operator()(const platform::DeviceContext& context,
                  const framework::Tensor& input, const framework::Tensor& vec,
                  framework::Tensor* output);
};

template <typename Place, typename T>
struct ColwiseSum {
  void operator()(const platform::DeviceContext& context,
                  const framework::Tensor& input, framework::Tensor* vec);
};

Q
qijun 已提交
134 135 136
}  // namespace math
}  // namespace operators
}  // namespace paddle