mklml.h 4.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <mkl.h>
#include <mutex>  // NOLINT
#include "paddle/fluid/platform/dynload/dynamic_loader.h"
D
dzhwinter 已提交
17
#include "paddle/fluid/platform/port.h"
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33

namespace paddle {
namespace platform {
namespace dynload {

extern std::once_flag mklml_dso_flag;
extern void* mklml_dso_handle;

/**
 * The following macro definition can generate structs
 * (for each function) to dynamic load mklml routine
 * via operator overloading.
 */
#define DYNAMIC_LOAD_MKLML_WRAP(__name)                                    \
  struct DynLoad__##__name {                                               \
    template <typename... Args>                                            \
P
peizhilin 已提交
34
    auto operator()(Args... args) -> DECLARE_TYPE(__name, args...) {       \
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
      using mklmlFunc = decltype(&::__name);                               \
      std::call_once(mklml_dso_flag, []() {                                \
        mklml_dso_handle = paddle::platform::dynload::GetMKLMLDsoHandle(); \
      });                                                                  \
      static void* p_##_name = dlsym(mklml_dso_handle, #__name);           \
      return reinterpret_cast<mklmlFunc>(p_##_name)(args...);              \
    }                                                                      \
  };                                                                       \
  extern DynLoad__##__name __name

#define DECLARE_DYNAMIC_LOAD_MKLML_WRAP(__name) DYNAMIC_LOAD_MKLML_WRAP(__name)

#define MKLML_ROUTINE_EACH(__macro) \
  __macro(cblas_sgemm);             \
  __macro(cblas_dgemm);             \
T
tensor-tang 已提交
50
  __macro(cblas_saxpy);             \
51
  __macro(cblas_daxpy);             \
T
tensor-tang 已提交
52
  __macro(cblas_scopy);             \
53
  __macro(cblas_dcopy);             \
T
tensor-tang 已提交
54
  __macro(cblas_sgemv);             \
55
  __macro(cblas_dgemv);             \
T
tensor-tang 已提交
56 57
  __macro(cblas_sgemm_alloc);       \
  __macro(cblas_dgemm_alloc);       \
T
tensor-tang 已提交
58
  __macro(cblas_sgemm_pack);        \
T
tensor-tang 已提交
59
  __macro(cblas_dgemm_pack);        \
T
tensor-tang 已提交
60
  __macro(cblas_sgemm_compute);     \
T
tensor-tang 已提交
61
  __macro(cblas_dgemm_compute);     \
T
tensor-tang 已提交
62
  __macro(cblas_sgemm_free);        \
T
tensor-tang 已提交
63
  __macro(cblas_dgemm_free);        \
T
tensor-tang 已提交
64 65
  __macro(cblas_sgemm_batch);       \
  __macro(cblas_dgemm_batch);       \
T
tensor-tang 已提交
66 67
  __macro(cblas_sdot);              \
  __macro(cblas_ddot);              \
J
Jacek Czaja 已提交
68 69
  __macro(cblas_sasum);             \
  __macro(cblas_dasum);             \
70 71
  __macro(cblas_isamax);            \
  __macro(cblas_idamax);            \
T
tensor-tang 已提交
72 73
  __macro(cblas_sscal);             \
  __macro(cblas_dscal);             \
T
tensor-tang 已提交
74 75
  __macro(vsAdd);                   \
  __macro(vdAdd);                   \
76 77
  __macro(vsSub);                   \
  __macro(vdSub);                   \
T
tensor-tang 已提交
78 79
  __macro(vsMul);                   \
  __macro(vdMul);                   \
80 81
  __macro(vsDiv);                   \
  __macro(vdDiv);                   \
T
tensor-tang 已提交
82 83
  __macro(vsExp);                   \
  __macro(vdExp);                   \
T
tensor-tang 已提交
84 85 86 87
  __macro(vsSqr);                   \
  __macro(vdSqr);                   \
  __macro(vsPowx);                  \
  __macro(vdPowx);                  \
Y
Use mkl  
Yu Yang 已提交
88 89
  __macro(vsInv);                   \
  __macro(vdInv);                   \
Y
Yihua Xu 已提交
90 91
  __macro(vmsErf);                  \
  __macro(vmdErf);                  \
92 93 94 95
  __macro(MKL_Set_Num_Threads)

MKLML_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_MKLML_WRAP);

96 97 98 99 100
#if !defined(_WIN32)
DYNAMIC_LOAD_MKLML_WRAP(mkl_scsrmm);
DYNAMIC_LOAD_MKLML_WRAP(mkl_dcsrmm);
#endif

101 102 103 104 105
#undef DYNAMIC_LOAD_MKLML_WRAP

}  // namespace dynload
}  // namespace platform
}  // namespace paddle