math_function.cu 5.5 KB
Newer Older
Q
qijun 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/operators/math/math_function.h"
Q
qijun 已提交
16

Q
qijun 已提交
17 18 19 20 21
namespace paddle {
namespace operators {
namespace math {

template <>
Q
qijun 已提交
22 23 24 25 26 27
void gemm<platform::GPUPlace, float>(const CBLAS_TRANSPOSE transA,
                                     const CBLAS_TRANSPOSE transB, const int M,
                                     const int N, const int K,
                                     const float alpha, const float* A,
                                     const float* B, const float beta, float* C,
                                     platform::DeviceContext* context) {
Q
qijun 已提交
28 29
  // Note that cublas follows fortran order, so the order is different from
  // the cblas convention.
Q
qijun 已提交
30 31
  int lda = (transA == CblasNoTrans) ? K : M;
  int ldb = (transB == CblasNoTrans) ? N : K;
Q
qijun 已提交
32
  cublasOperation_t cuTransA =
Q
qijun 已提交
33
      (transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
Q
qijun 已提交
34
  cublasOperation_t cuTransB =
Q
qijun 已提交
35
      (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
Q
qijun 已提交
36

Q
qijun 已提交
37
  PADDLE_ENFORCE(platform::dynload::cublasSgemm(
Q
qijun 已提交
38
      reinterpret_cast<platform::CUDADeviceContext*>(context)->cublas_handle(),
Q
qijun 已提交
39
      cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
Q
qijun 已提交
40 41 42
}

template <>
Q
qijun 已提交
43 44 45 46 47 48 49
void gemm<platform::GPUPlace, double>(const CBLAS_TRANSPOSE transA,
                                      const CBLAS_TRANSPOSE transB, const int M,
                                      const int N, const int K,
                                      const double alpha, const double* A,
                                      const double* B, const double beta,
                                      double* C,
                                      platform::DeviceContext* context) {
Q
qijun 已提交
50 51
  // Note that cublas follows fortran order, so the order is different from
  // the cblas convention.
Q
qijun 已提交
52 53
  int lda = (transA == CblasNoTrans) ? K : M;
  int ldb = (transB == CblasNoTrans) ? N : K;
Q
qijun 已提交
54
  cublasOperation_t cuTransA =
Q
qijun 已提交
55
      (transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
Q
qijun 已提交
56
  cublasOperation_t cuTransB =
Q
qijun 已提交
57
      (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
Q
qijun 已提交
58
  PADDLE_ENFORCE(platform::dynload::cublasDgemm(
Q
qijun 已提交
59
      reinterpret_cast<platform::CUDADeviceContext*>(context)->cublas_handle(),
Q
qijun 已提交
60
      cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
Q
qijun 已提交
61 62
}

Q
qijun 已提交
63
template <>
Q
qijun 已提交
64 65 66 67 68
void matmul<platform::GPUPlace, float>(const framework::Tensor& matrix_a,
                                       bool trans_a,
                                       const framework::Tensor& matrix_b,
                                       bool trans_b, float alpha,
                                       framework::Tensor* matrix_out,
Q
qijun 已提交
69 70
                                       float beta,
                                       platform::DeviceContext* context) {
Q
qijun 已提交
71 72 73 74 75 76 77 78 79
  auto dim_a = matrix_a.dims();
  auto dim_b = matrix_b.dims();
  auto dim_out = matrix_out->dims();
  PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
                 "The input and output of matmul be matrix");

  PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
                     platform::is_gpu_place(matrix_b.place()) &&
                     platform::is_gpu_place(matrix_out->place()),
Q
qijun 已提交
80
                 "Matrix must all be in GPUPlace");
Q
qijun 已提交
81

Q
qijun 已提交
82 83 84
  int M = dim_out[0];
  int N = dim_out[1];
  int K = (trans_a == false) ? dim_a[1] : dim_a[0];
Q
qijun 已提交
85

Q
qijun 已提交
86 87
  CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
  CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
Q
qijun 已提交
88

Q
qijun 已提交
89 90 91
  gemm<platform::GPUPlace, float>(
      transA, transB, M, N, K, alpha, matrix_a.data<float>(),
      matrix_b.data<float>(), beta, matrix_out->data<float>(), context);
Q
qijun 已提交
92 93 94
}

template <>
Q
qijun 已提交
95 96 97 98 99 100
void matmul<platform::GPUPlace, double>(const framework::Tensor& matrix_a,
                                        bool trans_a,
                                        const framework::Tensor& matrix_b,
                                        bool trans_b, double alpha,
                                        framework::Tensor* matrix_out,
                                        double beta,
Q
qijun 已提交
101
                                        platform::DeviceContext* context) {
Q
qijun 已提交
102 103 104 105 106 107 108 109 110
  auto dim_a = matrix_a.dims();
  auto dim_b = matrix_b.dims();
  auto dim_out = matrix_out->dims();
  PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
                 "The input and output of matmul be matrix");

  PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
                     platform::is_gpu_place(matrix_b.place()) &&
                     platform::is_gpu_place(matrix_out->place()),
Q
qijun 已提交
111
                 "Matrix must all be in GPUPlace");
Q
qijun 已提交
112

Q
qijun 已提交
113 114 115 116 117 118
  int M = dim_out[0];
  int N = dim_out[1];
  int K = (trans_a == false) ? dim_a[1] : dim_a[0];

  CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
  CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
Q
qijun 已提交
119

Q
qijun 已提交
120 121 122
  gemm<platform::GPUPlace, double>(
      transA, transB, M, N, K, alpha, matrix_a.data<double>(),
      matrix_b.data<double>(), beta, matrix_out->data<double>(), context);
Q
qijun 已提交
123
}
Q
qijun 已提交
124

Q
qijun 已提交
125 126 127
}  // namespace math
}  // namespace operators
}  // namespace paddle