Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
2dae8a46
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2dae8a46
编写于
6月 26, 2018
作者:
T
Tao Luo
提交者:
GitHub
6月 26, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #11596 from tensor-tang/refine/mklml/dyload
enable dynamic load mklml lib on fluid
上级
b20fa022
28a0ef95
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
207 addition
and
36 deletion
+207
-36
cmake/external/openblas.cmake
cmake/external/openblas.cmake
+6
-1
cmake/generic.cmake
cmake/generic.cmake
+9
-0
paddle/fluid/inference/tests/book/test_inference_nlp.cc
paddle/fluid/inference/tests/book/test_inference_nlp.cc
+2
-2
paddle/fluid/operators/math/blas.h
paddle/fluid/operators/math/blas.h
+2
-5
paddle/fluid/operators/math/blas_impl.h
paddle/fluid/operators/math/blas_impl.h
+66
-25
paddle/fluid/operators/math/math_function.h
paddle/fluid/operators/math/math_function.h
+1
-3
paddle/fluid/platform/dynload/CMakeLists.txt
paddle/fluid/platform/dynload/CMakeLists.txt
+4
-0
paddle/fluid/platform/dynload/dynamic_loader.cc
paddle/fluid/platform/dynload/dynamic_loader.cc
+15
-0
paddle/fluid/platform/dynload/dynamic_loader.h
paddle/fluid/platform/dynload/dynamic_loader.h
+1
-0
paddle/fluid/platform/dynload/mklml.cc
paddle/fluid/platform/dynload/mklml.cc
+30
-0
paddle/fluid/platform/dynload/mklml.h
paddle/fluid/platform/dynload/mklml.h
+71
-0
未找到文件。
cmake/external/openblas.cmake
浏览文件 @
2dae8a46
...
...
@@ -114,7 +114,12 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR})
SET
(
dummyfile
${
CMAKE_CURRENT_BINARY_DIR
}
/cblas_dummy.c
)
FILE
(
WRITE
${
dummyfile
}
"const char *dummy_cblas =
\"
${
dummyfile
}
\"
;"
)
ADD_LIBRARY
(
cblas STATIC
${
dummyfile
}
)
TARGET_LINK_LIBRARIES
(
cblas
${
CBLAS_LIBRARIES
}
)
IF
(
"
${
CBLAS_PROVIDER
}
"
STREQUAL
"MKLML"
)
TARGET_LINK_LIBRARIES
(
cblas dynload_mklml
)
ELSE
()
TARGET_LINK_LIBRARIES
(
cblas
${
CBLAS_LIBRARIES
}
)
ENDIF
(
"
${
CBLAS_PROVIDER
}
"
STREQUAL
"MKLML"
)
IF
(
NOT
${
CBLAS_FOUND
}
)
ADD_DEPENDENCIES
(
cblas extern_openblas
)
...
...
cmake/generic.cmake
浏览文件 @
2dae8a46
...
...
@@ -195,6 +195,15 @@ function(cc_library TARGET_NAME)
list
(
REMOVE_ITEM cc_library_DEPS warpctc
)
add_dependencies
(
${
TARGET_NAME
}
warpctc
)
endif
()
# Only deps libmklml.so, not link
if
(
"
${
cc_library_DEPS
}
;"
MATCHES
"mklml;"
)
list
(
REMOVE_ITEM cc_library_DEPS mklml
)
if
(
NOT
"
${
TARGET_NAME
}
"
MATCHES
"dynload_mklml"
)
list
(
APPEND cc_library_DEPS dynload_mklml
)
endif
()
add_dependencies
(
${
TARGET_NAME
}
mklml
)
target_link_libraries
(
${
TARGET_NAME
}
"-L
${
MKLML_LIB_DIR
}
-liomp5 -Wl,--as-needed"
)
endif
()
target_link_libraries
(
${
TARGET_NAME
}
${
cc_library_DEPS
}
)
add_dependencies
(
${
TARGET_NAME
}
${
cc_library_DEPS
}
)
endif
()
...
...
paddle/fluid/inference/tests/book/test_inference_nlp.cc
浏览文件 @
2dae8a46
...
...
@@ -19,8 +19,8 @@ limitations under the License. */
#include "gflags/gflags.h"
#include "gtest/gtest.h"
#include "paddle/fluid/inference/tests/test_helper.h"
#include "paddle/fluid/operators/math/blas.h"
#ifdef PADDLE_WITH_MKLML
#include <mkl_service.h>
#include <omp.h>
#endif
...
...
@@ -164,7 +164,7 @@ TEST(inference, nlp) {
// only use 1 thread number per std::thread
omp_set_dynamic
(
0
);
omp_set_num_threads
(
1
);
mkl_set_num_t
hreads
(
1
);
paddle
::
operators
::
math
::
SetNumT
hreads
(
1
);
#endif
double
start_ms
=
0
,
stop_ms
=
0
;
...
...
paddle/fluid/operators/math/blas.h
浏览文件 @
2dae8a46
...
...
@@ -18,10 +18,7 @@
#include "paddle/fluid/framework/tensor.h"
#ifdef PADDLE_WITH_MKLML
#include <mkl_cblas.h>
#include <mkl_lapacke.h>
#include <mkl_service.h>
#include <mkl_vml_functions.h>
#include "paddle/fluid/platform/dynload/mklml.h"
#endif
#ifdef PADDLE_USE_OPENBLAS
...
...
@@ -55,7 +52,7 @@ static void SetNumThreads(int num_threads) {
openblas_set_num_threads
(
real_num_threads
);
#elif defined(PADDLE_WITH_MKLML)
int
real_num_threads
=
num_threads
>
1
?
num_threads
:
1
;
mkl_set_num_t
hreads
(
real_num_threads
);
platform
::
dynload
::
MKL_Set_Num_T
hreads
(
real_num_threads
);
#else
PADDLE_ENFORCE
(
false
,
"To be implemented."
);
#endif
...
...
paddle/fluid/operators/math/blas_impl.h
浏览文件 @
2dae8a46
...
...
@@ -22,61 +22,109 @@ namespace math {
template
<
typename
T
>
struct
CBlas
;
#ifdef PADDLE_WITH_MKLML
template
<
>
struct
CBlas
<
float
>
{
template
<
typename
...
ARGS
>
static
void
GEMM
(
ARGS
...
args
)
{
cblas_sgemm
(
args
...);
platform
::
dynload
::
cblas_sgemm
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
AXPY
(
ARGS
...
args
)
{
cblas_saxpy
(
args
...);
platform
::
dynload
::
cblas_saxpy
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
VCOPY
(
ARGS
...
args
)
{
platform
::
dynload
::
cblas_scopy
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
GEMV
(
ARGS
...
args
)
{
platform
::
dynload
::
cblas_sgemv
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
GEMM_BATCH
(
ARGS
...
args
)
{
platform
::
dynload
::
cblas_sgemm_batch
(
args
...);
}
#ifdef PADDLE_WITH_MKLML
template
<
typename
...
ARGS
>
static
void
VADD
(
ARGS
...
args
)
{
vsAdd
(
args
...);
platform
::
dynload
::
vsAdd
(
args
...);
}
};
template
<
>
struct
CBlas
<
double
>
{
template
<
typename
...
ARGS
>
static
void
GEMM
(
ARGS
...
args
)
{
platform
::
dynload
::
cblas_dgemm
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
AXPY
(
ARGS
...
args
)
{
platform
::
dynload
::
cblas_daxpy
(
args
...);
}
#endif
template
<
typename
...
ARGS
>
static
void
VCOPY
(
ARGS
...
args
)
{
cblas_s
copy
(
args
...);
platform
::
dynload
::
cblas_d
copy
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
GEMV
(
ARGS
...
args
)
{
cblas_s
gemv
(
args
...);
platform
::
dynload
::
cblas_d
gemv
(
args
...);
}
#ifdef PADDLE_WITH_MKLML
template
<
typename
...
ARGS
>
static
void
GEMM_BATCH
(
ARGS
...
args
)
{
cblas_sgemm_batch
(
args
...);
platform
::
dynload
::
cblas_dgemm_batch
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
VADD
(
ARGS
...
args
)
{
platform
::
dynload
::
vdAdd
(
args
...);
}
#endif
};
#else
template
<
>
struct
CBlas
<
double
>
{
struct
CBlas
<
float
>
{
template
<
typename
...
ARGS
>
static
void
GEMM
(
ARGS
...
args
)
{
cblas_
d
gemm
(
args
...);
cblas_
s
gemm
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
AXPY
(
ARGS
...
args
)
{
cblas_
d
axpy
(
args
...);
cblas_
s
axpy
(
args
...);
}
#ifdef PADDLE_WITH_MKLML
template
<
typename
...
ARGS
>
static
void
VADD
(
ARGS
...
args
)
{
vdAdd
(
args
...);
static
void
VCOPY
(
ARGS
...
args
)
{
cblas_scopy
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
GEMV
(
ARGS
...
args
)
{
cblas_sgemv
(
args
...);
}
};
template
<
>
struct
CBlas
<
double
>
{
template
<
typename
...
ARGS
>
static
void
GEMM
(
ARGS
...
args
)
{
cblas_dgemm
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
AXPY
(
ARGS
...
args
)
{
cblas_daxpy
(
args
...);
}
#endif
template
<
typename
...
ARGS
>
static
void
VCOPY
(
ARGS
...
args
)
{
...
...
@@ -87,15 +135,8 @@ struct CBlas<double> {
static
void
GEMV
(
ARGS
...
args
)
{
cblas_dgemv
(
args
...);
}
#ifdef PADDLE_WITH_MKLML
template
<
typename
...
ARGS
>
static
void
GEMM_BATCH
(
ARGS
...
args
)
{
cblas_dgemm_batch
(
args
...);
}
#endif
};
#endif
template
<
>
struct
CBlas
<
platform
::
float16
>
{
static
void
GEMM
(...)
{
PADDLE_THROW
(
"float16 GEMM not supported on CPU"
);
}
...
...
paddle/fluid/operators/math/math_function.h
浏览文件 @
2dae8a46
...
...
@@ -14,9 +14,7 @@ limitations under the License. */
#pragma once
#ifdef PADDLE_WITH_MKLML
#include <mkl_cblas.h>
#include <mkl_lapacke.h>
#include <mkl_vml_functions.h>
#include "paddle/fluid/platform/dynload/mklml.h"
#endif
#ifdef PADDLE_USE_OPENBLAS
...
...
paddle/fluid/platform/dynload/CMakeLists.txt
浏览文件 @
2dae8a46
...
...
@@ -17,3 +17,7 @@ if (CUPTI_FOUND)
endif
(
CUPTI_FOUND
)
nv_library
(
dynload_cuda SRCS
${
CUDA_SRCS
}
DEPS dynamic_loader
)
cc_library
(
dynload_warpctc SRCS warpctc.cc DEPS dynamic_loader warpctc
)
if
(
WITH_MKLML
)
cc_library
(
dynload_mklml SRCS mklml.cc DEPS dynamic_loader mklml
)
endif
()
# TODO(TJ): add iomp, mkldnn?
paddle/fluid/platform/dynload/dynamic_loader.cc
浏览文件 @
2dae8a46
...
...
@@ -49,6 +49,8 @@ DEFINE_string(
tensorrt_dir
,
""
,
"Specify path for loading tensorrt library, such as libnvinfer.so."
);
DEFINE_string
(
mklml_dir
,
""
,
"Specify path for loading libmklml_intel.so."
);
namespace
paddle
{
namespace
platform
{
namespace
dynload
{
...
...
@@ -76,6 +78,7 @@ static inline void* GetDsoHandleFromDefaultPath(const std::string& dso_path,
VLOG
(
3
)
<<
"Try to find library: "
<<
dso_path
<<
" from default system path."
;
// default search from LD_LIBRARY_PATH/DYLD_LIBRARY_PATH
// and /usr/local/lib path
void
*
dso_handle
=
dlopen
(
dso_path
.
c_str
(),
dynload_flags
);
// DYLD_LIBRARY_PATH is disabled after Mac OS 10.11 to
...
...
@@ -97,6 +100,10 @@ static inline void* GetDsoHandleFromDefaultPath(const std::string& dso_path,
}
#endif
if
(
nullptr
==
dso_handle
)
{
LOG
(
WARNING
)
<<
"Can not find library: "
<<
dso_path
<<
". Please try to add the lib path to LD_LIBRARY_PATH."
;
}
return
dso_handle
;
}
...
...
@@ -206,6 +213,14 @@ void* GetTensorRtDsoHandle() {
#endif
}
void
*
GetMKLMLDsoHandle
()
{
#if defined(__APPLE__) || defined(__OSX__)
return
GetDsoHandleFromSearchPath
(
FLAGS_mklml_dir
,
"libmklml_intel.dylib"
);
#else
return
GetDsoHandleFromSearchPath
(
FLAGS_mklml_dir
,
"libmklml_intel.so"
);
#endif
}
}
// namespace dynload
}
// namespace platform
}
// namespace paddle
paddle/fluid/platform/dynload/dynamic_loader.h
浏览文件 @
2dae8a46
...
...
@@ -26,6 +26,7 @@ void* GetWarpCTCDsoHandle();
void
*
GetLapackDsoHandle
();
void
*
GetNCCLDsoHandle
();
void
*
GetTensorRtDsoHandle
();
void
*
GetMKLMLDsoHandle
();
}
// namespace dynload
}
// namespace platform
...
...
paddle/fluid/platform/dynload/mklml.cc
0 → 100644
浏览文件 @
2dae8a46
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/platform/dynload/mklml.h"
namespace
paddle
{
namespace
platform
{
namespace
dynload
{
std
::
once_flag
mklml_dso_flag
;
void
*
mklml_dso_handle
=
nullptr
;
#define DEFINE_WRAP(__name) DynLoad__##__name __name
MKLML_ROUTINE_EACH
(
DEFINE_WRAP
);
}
// namespace dynload
}
// namespace platform
}
// namespace paddle
paddle/fluid/platform/dynload/mklml.h
0 → 100644
浏览文件 @
2dae8a46
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <dlfcn.h>
#include <mkl.h>
#include <mutex> // NOLINT
#include "paddle/fluid/platform/dynload/dynamic_loader.h"
namespace
paddle
{
namespace
platform
{
namespace
dynload
{
extern
std
::
once_flag
mklml_dso_flag
;
extern
void
*
mklml_dso_handle
;
/**
* The following macro definition can generate structs
* (for each function) to dynamic load mklml routine
* via operator overloading.
*/
#define DYNAMIC_LOAD_MKLML_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
auto operator()(Args... args) -> decltype(__name(args...)) { \
using mklmlFunc = decltype(&::__name); \
std::call_once(mklml_dso_flag, []() { \
mklml_dso_handle = paddle::platform::dynload::GetMKLMLDsoHandle(); \
}); \
static void* p_##_name = dlsym(mklml_dso_handle, #__name); \
return reinterpret_cast<mklmlFunc>(p_##_name)(args...); \
} \
}; \
extern DynLoad__##__name __name
#define DECLARE_DYNAMIC_LOAD_MKLML_WRAP(__name) DYNAMIC_LOAD_MKLML_WRAP(__name)
#define MKLML_ROUTINE_EACH(__macro) \
__macro(cblas_sgemm); \
__macro(cblas_saxpy); \
__macro(cblas_scopy); \
__macro(cblas_sgemv); \
__macro(cblas_sgemm_batch); \
__macro(cblas_dgemm); \
__macro(cblas_daxpy); \
__macro(cblas_dcopy); \
__macro(cblas_dgemv); \
__macro(cblas_dgemm_batch); \
__macro(vsAdd); \
__macro(vdAdd); \
__macro(MKL_Set_Num_Threads)
MKLML_ROUTINE_EACH
(
DECLARE_DYNAMIC_LOAD_MKLML_WRAP
);
#undef DYNAMIC_LOAD_MKLML_WRAP
}
// namespace dynload
}
// namespace platform
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录