blas_impl.h 13.6 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
T
tensor-tang 已提交
15
#include <limits>
Y
Yu Yang 已提交
16
#include <vector>
Y
Yu Yang 已提交
17 18 19 20 21 22 23 24 25
#include "paddle/fluid/operators/math/math_function.h"

namespace paddle {
namespace operators {
namespace math {

template <typename T>
struct CBlas;

26
#ifdef PADDLE_WITH_MKLML
Y
Yu Yang 已提交
27 28
template <>
struct CBlas<float> {
Y
Yu Yang 已提交
29 30
  template <typename... ARGS>
  static void GEMM(ARGS... args) {
31
    platform::dynload::cblas_sgemm(args...);
Y
Yu Yang 已提交
32
  }
Y
Yu Yang 已提交
33

T
tensor-tang 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
  template <typename... ARGS>
  static float *GEMM_ALLOC(ARGS... args) {
    return platform::dynload::cblas_sgemm_alloc(args...);
  }

  template <typename... ARGS>
  static void GEMM_PACK(ARGS... args) {
    platform::dynload::cblas_sgemm_pack(args...);
  }

  template <typename... ARGS>
  static void GEMM_COMPUTE(ARGS... args) {
    platform::dynload::cblas_sgemm_compute(args...);
  }

  template <typename... ARGS>
  static void GEMM_FREE(ARGS... args) {
    platform::dynload::cblas_sgemm_free(args...);
  }

T
tensor-tang 已提交
54 55 56 57 58 59
#ifdef PADDLE_WITH_LIBXSMM
  template <typename... ARGS>
  static void SMM_GEMM(ARGS... args) {
    libxsmm_sgemm(args...);
  }
#endif
T
tensor-tang 已提交
60

Y
Yu Yang 已提交
61 62
  template <typename... ARGS>
  static void AXPY(ARGS... args) {
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
    platform::dynload::cblas_saxpy(args...);
  }

  template <typename... ARGS>
  static void VCOPY(ARGS... args) {
    platform::dynload::cblas_scopy(args...);
  }

  template <typename... ARGS>
  static void GEMV(ARGS... args) {
    platform::dynload::cblas_sgemv(args...);
  }

  template <typename... ARGS>
  static void GEMM_BATCH(ARGS... args) {
    platform::dynload::cblas_sgemm_batch(args...);
Y
Yu Yang 已提交
79 80
  }

81 82
  template <typename... ARGS>
  static void VADD(ARGS... args) {
83 84 85 86 87 88 89 90 91 92 93
    platform::dynload::vsAdd(args...);
  }
};

template <>
struct CBlas<double> {
  template <typename... ARGS>
  static void GEMM(ARGS... args) {
    platform::dynload::cblas_dgemm(args...);
  }

T
tensor-tang 已提交
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
  template <typename... ARGS>
  static double *GEMM_ALLOC(ARGS... args) {
    return platform::dynload::cblas_dgemm_alloc(args...);
  }

  template <typename... ARGS>
  static void GEMM_PACK(ARGS... args) {
    platform::dynload::cblas_dgemm_pack(args...);
  }

  template <typename... ARGS>
  static void GEMM_COMPUTE(ARGS... args) {
    platform::dynload::cblas_dgemm_compute(args...);
  }

  template <typename... ARGS>
  static void GEMM_FREE(ARGS... args) {
    platform::dynload::cblas_dgemm_free(args...);
  }

T
tensor-tang 已提交
114 115 116 117 118 119
#ifdef PADDLE_WITH_LIBXSMM
  template <typename... ARGS>
  static void SMM_GEMM(ARGS... args) {
    libxsmm_dgemm(args...);
  }
#endif
T
tensor-tang 已提交
120

121 122 123
  template <typename... ARGS>
  static void AXPY(ARGS... args) {
    platform::dynload::cblas_daxpy(args...);
124 125 126 127
  }

  template <typename... ARGS>
  static void VCOPY(ARGS... args) {
128
    platform::dynload::cblas_dcopy(args...);
129 130
  }

Y
Yu Yang 已提交
131 132
  template <typename... ARGS>
  static void GEMV(ARGS... args) {
133
    platform::dynload::cblas_dgemv(args...);
Y
Yu Yang 已提交
134 135 136 137
  }

  template <typename... ARGS>
  static void GEMM_BATCH(ARGS... args) {
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
    platform::dynload::cblas_dgemm_batch(args...);
  }

  template <typename... ARGS>
  static void VADD(ARGS... args) {
    platform::dynload::vdAdd(args...);
  }
};

#else

template <>
struct CBlas<float> {
  template <typename... ARGS>
  static void GEMM(ARGS... args) {
    cblas_sgemm(args...);
  }

  template <typename... ARGS>
  static void AXPY(ARGS... args) {
    cblas_saxpy(args...);
  }

  template <typename... ARGS>
  static void VCOPY(ARGS... args) {
    cblas_scopy(args...);
  }

  template <typename... ARGS>
  static void GEMV(ARGS... args) {
    cblas_sgemv(args...);
Y
Yu Yang 已提交
169
  }
Y
Yu Yang 已提交
170 171 172 173
};

template <>
struct CBlas<double> {
Y
Yu Yang 已提交
174 175 176 177
  template <typename... ARGS>
  static void GEMM(ARGS... args) {
    cblas_dgemm(args...);
  }
Y
Yu Yang 已提交
178 179 180 181 182 183

  template <typename... ARGS>
  static void AXPY(ARGS... args) {
    cblas_daxpy(args...);
  }

184 185 186 187 188
  template <typename... ARGS>
  static void VCOPY(ARGS... args) {
    cblas_dcopy(args...);
  }

Y
Yu Yang 已提交
189 190 191 192
  template <typename... ARGS>
  static void GEMV(ARGS... args) {
    cblas_dgemv(args...);
  }
Y
Yu Yang 已提交
193
};
194
#endif
T
tensor-tang 已提交
195

Y
Yu Yang 已提交
196 197
template <>
struct CBlas<platform::float16> {
Y
Yu Yang 已提交
198
  static void GEMM(...) { PADDLE_THROW("float16 GEMM not supported on CPU"); }
T
tensor-tang 已提交
199 200 201
  static void SMM_GEMM(...) {
    PADDLE_THROW("float16 SMM_GEMM not supported on CPU");
  }
Y
Yu Yang 已提交
202 203 204 205 206
#ifdef PADDLE_WITH_MKLML
  static void GEMM_BATCH(...) {
    PADDLE_THROW("float16 GEMM_BATCH not supported on CPU");
  }
#endif
Y
Yu Yang 已提交
207
};
T
tensor-tang 已提交
208

T
tensor-tang 已提交
209
template <typename T>
T
tensor-tang 已提交
210 211
inline bool UseXSMM(const int &m, const int &n, const int &k, bool transa,
                    bool transb, const T &alpha, const T &beta) {
T
tensor-tang 已提交
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
#ifdef PADDLE_WITH_LIBXSMM
  // Refer to https://github.com/hfp/libxsmm/blob/master/README.md
  // But the threshold is custom
  constexpr int LIBXSMM_THRESHOLD = 20 * 20 * 20;
  if (m * n * k > LIBXSMM_THRESHOLD || transa || transb ||
      std::abs<T>(alpha - static_cast<T>(1) >
                  std::numeric_limits<T>::epsilon()) ||
      std::abs<T>(beta) > std::numeric_limits<T>::epsilon()) {
    return false;
  } else {
    return true;
  }
#endif
  return false;
}
Y
Yu Yang 已提交
227

T
tensor-tang 已提交
228 229 230 231 232 233 234 235
template <>
inline bool UseXSMM<platform::float16>(const int &m, const int &n, const int &k,
                                       bool transa, bool transb,
                                       const platform::float16 &alpha,
                                       const platform::float16 &beta) {
  return false;
}

Y
Yu Yang 已提交
236
template <typename T>
T
tensor-tang 已提交
237 238 239 240
inline void GEMM_WARP(CBLAS_ORDER order, CBLAS_TRANSPOSE transA,
                      CBLAS_TRANSPOSE transB, int M, int N, int K, T alpha,
                      const T *A, int lda, const T *B, int ldb, T beta, T *C,
                      int ldc) {
T
tensor-tang 已提交
241
#ifdef PADDLE_WITH_LIBXSMM
T
tensor-tang 已提交
242 243
  if (UseXSMM<T>(M, N, K, transA != CblasNoTrans, transB != CblasNoTrans, alpha,
                 beta)) {
T
tensor-tang 已提交
244 245 246
    // Note: SMM use ColMajor
    const char transa = 'N';
    const char transb = 'N';
247
    CBlas<T>::SMM_GEMM(&transa, &transb, &N, &M, &K, &alpha, B, &ldb, A, &lda,
T
tensor-tang 已提交
248
                       &beta, C, &ldc);
T
tensor-tang 已提交
249 250
    return;
  }
T
tensor-tang 已提交
251
#endif
T
tensor-tang 已提交
252 253

#ifdef PADDLE_MKL_SPLIT_GEMM
T
tensor-tang 已提交
254 255 256 257 258
  constexpr int bs = 2;
  if (M % bs == 0 && transA == CblasNoTrans && transB == CblasNoTrans) {
    for (int off = 0; off < M; off += bs) {
      CBlas<T>::GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans, bs, N, K, alpha,
                     A + off * lda, lda, B, ldb, beta, C + off * ldb, ldc);
T
tensor-tang 已提交
259
    }
T
tensor-tang 已提交
260
    return;
T
tensor-tang 已提交
261 262
  }
#endif
T
tensor-tang 已提交
263 264 265 266
  CBlas<T>::GEMM(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb,
                 beta, C, ldc);
}

T
tensor-tang 已提交
267
#ifdef PADDLE_WITH_MKLML
T
tensor-tang 已提交
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
template <>
template <typename T>
T *Blas<platform::CPUDeviceContext>::GEMM_ALLOC(const CBLAS_IDENTIFIER id,
                                                const int M, const int N,
                                                const int K) const {
  return CBlas<T>::GEMM_ALLOC(id, M, N, K);
}

template <>
template <typename T>
void Blas<platform::CPUDeviceContext>::GEMM_PACK(const CBLAS_IDENTIFIER id,
                                                 const CBLAS_TRANSPOSE trans,
                                                 int M, int N, int K,
                                                 const T alpha, const T *src,
                                                 const int ld, T *dst) const {
  CBlas<T>::GEMM_PACK(CblasRowMajor, id, trans, M, N, K, alpha, src, ld, dst);
}

template <>
template <typename T>
void Blas<platform::CPUDeviceContext>::GEMM_COMPUTE(
    int transA, int transB, int M, int N, int K, const T *A, const int lda,
    const T *B, const int ldb, T beta, T *C, const int ldc) const {
  CBlas<T>::GEMM_COMPUTE(CblasRowMajor, transA, transB, M, N, K, A, lda, B, ldb,
                         beta, C, ldc);
}

template <>
template <typename T>
void Blas<platform::CPUDeviceContext>::GEMM_FREE(T *data) const {
  CBlas<T>::GEMM_FREE(data);
}
T
tensor-tang 已提交
300
#endif
T
tensor-tang 已提交
301

T
tensor-tang 已提交
302 303 304 305 306 307 308 309 310 311 312
template <>
template <typename T>
void Blas<platform::CPUDeviceContext>::GEMM(CBLAS_TRANSPOSE transA,
                                            CBLAS_TRANSPOSE transB, int M,
                                            int N, int K, T alpha, const T *A,
                                            const T *B, T beta, T *C) const {
  int lda = (transA == CblasNoTrans) ? K : M;
  int ldb = (transB == CblasNoTrans) ? N : K;
  int ldc = N;
  GEMM_WARP<T>(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb,
               beta, C, ldc);
Y
Yu Yang 已提交
313 314 315 316
}

template <>
template <typename T>
Y
Yu Yang 已提交
317 318 319 320
void Blas<platform::CPUDeviceContext>::GEMM(bool transA, bool transB, int M,
                                            int N, int K, T alpha, const T *A,
                                            int lda, const T *B, int ldb,
                                            T beta, T *C, int ldc) const {
T
tensor-tang 已提交
321 322 323
  GEMM_WARP<T>(CblasRowMajor, transA == false ? CblasNoTrans : CblasTrans,
               transB == false ? CblasNoTrans : CblasTrans, M, N, K, alpha, A,
               lda, B, ldb, beta, C, ldc);
Y
Yu Yang 已提交
324 325
}

Y
Yu Yang 已提交
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
template <typename DeviceContext>
template <typename T>
void Blas<DeviceContext>::MatMul(const framework::Tensor &mat_a, bool trans_a,
                                 const framework::Tensor &mat_b, bool trans_b,
                                 T alpha, framework::Tensor *mat_out,
                                 T beta) const {
  auto dim_a = mat_a.dims();
  auto dim_b = mat_b.dims();
  auto dim_out = mat_out->dims();
  PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
                 "The input and output of matmul be matrix");
  PADDLE_ENFORCE(
      mat_a.place() == mat_b.place() && mat_a.place() == mat_out->place(),
      "The places of matrices must be same");

  int M = dim_out[0];
  int N = dim_out[1];
  int K = !trans_a ? dim_a[1] : dim_a[0];

  CBLAS_TRANSPOSE transA = !trans_a ? CblasNoTrans : CblasTrans;
  CBLAS_TRANSPOSE transB = !trans_b ? CblasNoTrans : CblasTrans;

  this->GEMM(transA, transB, M, N, K, alpha, mat_a.data<T>(), mat_b.data<T>(),
             beta, mat_out->data<T>());
}

Y
Yu Yang 已提交
352 353 354 355 356 357 358
template <>
template <typename T>
void Blas<platform::CPUDeviceContext>::AXPY(int n, T alpha, const T *x,
                                            T *y) const {
  CBlas<T>::AXPY(n, alpha, x, 1, y, 1);
}

359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
template <>
template <typename T>
void Blas<platform::CPUDeviceContext>::VCOPY(int n, const T *x, T *y) const {
  CBlas<T>::VCOPY(n, x, 1, y, 1);
}

template <>
template <typename T>
void Blas<platform::CPUDeviceContext>::VADD(int n, const T *x, const T *y,
                                            T *z) const {
#ifdef PADDLE_WITH_MKLML
  CBlas<T>::VADD(n, x, y, z);
#else
  this->template VCOPY<T>(n, y, z);
  this->template AXPY<T>(n, 1., x, z);
#endif
}

Y
Yu Yang 已提交
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
template <>
template <typename T>
void Blas<platform::CPUDeviceContext>::GEMV(bool trans_a, int M, int N, T alpha,
                                            const T *A, const T *B, T beta,
                                            T *C) const {
  CBLAS_TRANSPOSE transA = !trans_a ? CblasNoTrans : CblasTrans;
  CBlas<T>::GEMV(CblasRowMajor, transA, M, N, alpha, A, N, B, 1, beta, C, 1);
}

template <>
template <typename T>
void Blas<platform::CPUDeviceContext>::BatchedGEMM(
    CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int M, int N, int K,
    T alpha, const T *A, const T *B, T beta, T *C, int batchCount,
    int64_t strideA, int64_t strideB) const {
#ifdef PADDLE_WITH_MKLML
  int lda = (transA == CblasNoTrans) ? K : M;
  int ldb = (transB == CblasNoTrans) ? N : K;
  int ldc = N;
  auto a_array = std::vector<const T *>(batchCount);
  auto b_array = std::vector<const T *>(batchCount);
  auto c_array = std::vector<T *>(batchCount);
  for (int k = 0; k < batchCount; ++k) {
    a_array[k] = &A[k * strideA];
    b_array[k] = &B[k * strideB];
    c_array[k] = &C[k * M * N];
  }

  CBlas<T>::GEMM_BATCH(CblasRowMajor, &transA, &transB, &M, &N, &K, &alpha,
                       a_array.data(), &lda, b_array.data(), &ldb, &beta,
                       c_array.data(), &ldc, 1 /* group_count */, &batchCount);
#else
  for (int k = 0; k < batchCount; ++k) {
Y
yuyang18 已提交
410 411 412
    auto *Ak = &A[k * strideA];
    auto *Bk = &B[k * strideB];
    auto *Ck = &C[k * M * N];
Y
Yu Yang 已提交
413 414 415 416 417
    this->template GEMM<T>(transA, transB, M, N, K, alpha, Ak, Bk, beta, Ck);
  }
#endif
}

Y
Yu Yang 已提交
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
template <typename DeviceContext>
template <typename T>
void Blas<DeviceContext>::MatMul(const framework::Tensor &mat_a,
                                 const MatDescriptor &dim_a,
                                 const framework::Tensor &mat_b,
                                 const MatDescriptor &dim_b, T alpha,
                                 framework::Tensor *mat_out, T beta) const {
  PADDLE_ENFORCE_EQ(dim_a.width_, dim_b.height_);
  CBLAS_TRANSPOSE transA = !dim_a.trans_ ? CblasNoTrans : CblasTrans;
  CBLAS_TRANSPOSE transB = !dim_b.trans_ ? CblasNoTrans : CblasTrans;
  if (dim_a.batch_size_ == 0 && dim_b.batch_size_ == 0) {
    this->template GEMM<T>(transA, transB, dim_a.height_, dim_b.width_,
                           dim_a.width_, alpha, mat_a.data<T>(),
                           mat_b.data<T>(), beta, mat_out->data<T>());
  } else {
    PADDLE_ENFORCE(dim_a.batch_size_ == dim_b.batch_size_ ||
                   dim_a.batch_size_ == 0 || dim_b.batch_size_ == 0);
    this->template BatchedGEMM<T>(
        transA, transB, dim_a.height_, dim_b.width_, dim_a.width_, alpha,
        mat_a.data<T>(), mat_b.data<T>(), beta, mat_out->data<T>(),
        dim_a.batch_size_ == 0 ? dim_b.batch_size_ : dim_a.batch_size_,
        dim_a.stride_, dim_b.stride_);
  }
}

Y
Yu Yang 已提交
443 444 445
}  // namespace math
}  // namespace operators
}  // namespace paddle