From 363f690d79aebc5b09bdeb5794ee70c968963e49 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Thu, 16 Nov 2017 23:14:37 +0800 Subject: [PATCH] expose only one WITH_MKL to user, covering WITH_MKLDNN and WITH_MKLML --- CMakeLists.txt | 20 ++++++++++++++------ doc/design/mkldnn/README.MD | 8 ++++---- doc/howto/dev/write_docs_cn.rst | 2 +- paddle/gserver/layers/MKLDNNLayer.cpp | 2 +- paddle/scripts/docker/README.md | 3 +-- paddle/scripts/docker/build.sh | 6 ++---- paddle/scripts/submit_local.sh.in | 10 +++++----- paddle/scripts/travis/build_doc.sh | 2 +- 8 files changed, 29 insertions(+), 24 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5209c40e0f5..9e30dff70fe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -36,8 +36,7 @@ include(simd) ################################ Configurations ####################################### option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_FOUND}) option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND}) -option(WITH_MKLDNN "Compile PaddlePaddle with mkl-dnn support." ${AVX_FOUND}) -option(WITH_MKLML "Compile PaddlePaddle with mklml package." ${AVX_FOUND}) +option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND}) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) option(WITH_TESTING "Compile PaddlePaddle with unit testing" ON) option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON) @@ -82,10 +81,8 @@ if(ANDROID OR IOS) "Disable PYTHON when cross-compiling for Android and iOS" FORCE) set(WITH_RDMA OFF CACHE STRING "Disable RDMA when cross-compiling for Android and iOS" FORCE) - set(WITH_MKLDNN OFF CACHE STRING - "Disable MKLDNN when cross-compiling for Android and iOS" FORCE) - set(WITH_MKLML OFF CACHE STRING - "Disable MKLML package when cross-compiling for Android and iOS" FORCE) + set(WITH_MKL OFF CACHE STRING + "Disable MKL when cross-compiling for Android and iOS" FORCE) # Compile PaddlePaddle mobile inference library if (NOT WITH_C_API) @@ -111,6 +108,17 @@ else() set(THIRD_PARTY_BUILD_TYPE Release) endif() +if(WITH_MKL) + set(WITH_MKLML ON) + set(WITH_MKLDNN ${AVX2_FOUND}) + if(NOT WITH_MKLDNN) + message(WARNING "Do not have AVX2 intrinsics and disabled MKL-DNN") + endif() +else() + set(WITH_MKLML OFF) + set(WITH_MKLDNN OFF) +endif() + ######################################################################################## include(external/mklml) # download mklml package diff --git a/doc/design/mkldnn/README.MD b/doc/design/mkldnn/README.MD index 16236763a73..ec6d4681836 100644 --- a/doc/design/mkldnn/README.MD +++ b/doc/design/mkldnn/README.MD @@ -36,13 +36,13 @@ Figure 1. PaddlePaddle on IA. 我们把集成方案大致分为了如下几个方面。 ### CMake -我们会在`CMakeLists.txt`中会添加`WITH_MKLDNN`的选项,当设置这个值为`ON`的时候会启用编译MKL-DNN功能。同时会自动开启OpenMP用于提高MKL-DNN的性能。 +我们会在`CMakeLists.txt`中会给用户添加一个`WITH_MKL`的开关,他是负责`WITH_MKLML`和`WITH_MKLDNN`的总开关。 -同时,我们会引入`WITH_MKLML`选项,用于选择是否使用MKL-DNN自带的MKLML安装包。这个安装包可以独立于MKL-DNN使用,但是建议在开启MKL-DNN的同时也打开MKLML的开关,这样才能发挥最好的性能。 +当打开`WITH_MKL`时,会开启MKLML的功能,作为PaddlePaddle的CBLAS和LAPACK库,同时会开启Intel OpenMP用于提高MKLML的性能。 如果系统支持AVX2指令集及以上,同时会开启MKL-DNN功能。 -所以,我们会在`cmake/external`目录新建`mkldnn.cmake`和`mklml.cmake`文件,它们会在编译PaddlePaddle的时候下载对应的软件包,并放到PaddlePaddle的third party目录中。 +当关闭`WITH_MKL`时,MKLML和MKL-DNN功能会同时关闭。 -**备注**:当`WITH_MKLML=ON`的时候,会优先使用这个包作为PaddlePaddle的CBLAS和LAPACK库,所以会稍微改动`cmake/cblas.cmake`中的逻辑。 +所以,我们会在`cmake/external`目录新建`mkldnn.cmake`和`mklml.cmake`文件,它们会在编译PaddlePaddle的时候下载对应的软件包,并放到PaddlePaddle的third party目录中。 ### Layers 所有MKL-DNN相关的C++ layers,都会按照PaddlePaddle的目录结构存放在 diff --git a/doc/howto/dev/write_docs_cn.rst b/doc/howto/dev/write_docs_cn.rst index 731a63f945c..61f3a223547 100644 --- a/doc/howto/dev/write_docs_cn.rst +++ b/doc/howto/dev/write_docs_cn.rst @@ -34,7 +34,7 @@ PaddlePaddle的文档构建有两种方式。 cd TO_YOUR_PADDLE_CLONE_PATH mkdir -p build cd build - cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKLDNN=OFF -DWITH_MKLML=OFF -DWITH_DOC=ON + cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON make gen_proto_py make paddle_docs paddle_docs_cn diff --git a/paddle/gserver/layers/MKLDNNLayer.cpp b/paddle/gserver/layers/MKLDNNLayer.cpp index e75ac5ba464..2125155c6cb 100644 --- a/paddle/gserver/layers/MKLDNNLayer.cpp +++ b/paddle/gserver/layers/MKLDNNLayer.cpp @@ -22,7 +22,7 @@ namespace paddle { bool MKLDNNLayer::init(const LayerMap& layerMap, const ParameterMap& parameterMap) { CHECK(FLAGS_use_mkldnn) << "MkldnnLayers only support use_mkldnn." - << "Please set WITH_MKLDNN=ON " + << "Please set WITH_MKL=ON " << "and set use_mkldnn=True"; CHECK(!useGpu_) << "Do not support GPU yet"; diff --git a/paddle/scripts/docker/README.md b/paddle/scripts/docker/README.md index b5fd68839dd..f3a6f1dba75 100644 --- a/paddle/scripts/docker/README.md +++ b/paddle/scripts/docker/README.md @@ -57,8 +57,7 @@ Users can specify the following Docker build arguments with either "ON" or "OFF" | `WITH_GPU` | OFF | Generates NVIDIA CUDA GPU code and relies on CUDA libraries. | | `WITH_AVX` | OFF | Set to "ON" to enable AVX support. | | `WITH_TESTING` | ON | Build unit tests binaries. | -| `WITH_MKLDNN` | ON | Build with [Intel® MKL DNN](https://github.com/01org/mkl-dnn) support. | -| `WITH_MKLML` | ON | Build with [Intel® MKL](https://software.intel.com/en-us/mkl) support. | +| `WITH_MKL` | ON | Build with [Intel® MKL](https://software.intel.com/en-us/mkl) and [Intel® MKL-DNN](https://github.com/01org/mkl-dnn) support. | | `WITH_GOLANG` | ON | Build fault-tolerant parameter server written in go. | | `WITH_SWIG_PY` | ON | Build with SWIG python API support. | | `WITH_C_API` | OFF | Build capi libraries for inference. | diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index e9c89eee1af..595d25fd483 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -34,8 +34,7 @@ function cmake_gen() { ${PYTHON_FLAGS} -DWITH_DOC=OFF -DWITH_GPU=${WITH_GPU:-OFF} - -DWITH_MKLDNN=${WITH_MKLDNN:-ON} - -DWITH_MKLML=${WITH_MKLML:-ON} + -DWITH_MKL=${WITH_MKL:-ON} -DWITH_AVX=${WITH_AVX:-OFF} -DWITH_GOLANG=${WITH_GOLANG:-ON} -DWITH_SWIG_PY=ON @@ -56,8 +55,7 @@ EOF ${PYTHON_FLAGS} \ -DWITH_DOC=OFF \ -DWITH_GPU=${WITH_GPU:-OFF} \ - -DWITH_MKLDNN=${WITH_MKLDNN:-ON} \ - -DWITH_MKLML=${WITH_MKLML:-ON} \ + -DWITH_MKL=${WITH_MKL:-ON} \ -DWITH_AVX=${WITH_AVX:-OFF} \ -DWITH_GOLANG=${WITH_GOLANG:-ON} \ -DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} \ diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in index b9a49526a7e..d71cb84df37 100755 --- a/paddle/scripts/submit_local.sh.in +++ b/paddle/scripts/submit_local.sh.in @@ -18,8 +18,8 @@ function version(){ echo "PaddlePaddle @PADDLE_VERSION@, compiled with" echo " with_avx: @WITH_AVX@" echo " with_gpu: @WITH_GPU@" + echo " with_mkl: @WITH_MKL@" echo " with_mkldnn: @WITH_MKLDNN@" - echo " with_mklml: @WITH_MKLML@" echo " with_double: @WITH_DOUBLE@" echo " with_python: @WITH_PYTHON@" echo " with_rdma: @WITH_RDMA@" @@ -45,8 +45,8 @@ function ver2num() { function cpu_config() { # auto set KMP_AFFINITY and OMP_DYNAMIC from Hyper Threading Status - # only when MKLDNN or MKLML enabled - if [ "@WITH_MKLDNN@" == "OFF" ] && [ "@WITH_MKLML@" == "OFF"]; then + # only when MKL enabled + if [ "@WITH_MKL@" == "OFF" ]; then return 0 fi ht=`lscpu |grep "per core"|awk -F':' '{print $2}'|xargs` @@ -70,8 +70,8 @@ function cpu_config() { function threads_config() { # auto set OMP_NUM_THREADS and MKL_NUM_THREADS # according to trainer_count and total processors - # only when MKLDNN or MKLML enabled - if [ "@WITH_MKLDNN@" == "OFF" ] && [ "@WITH_MKLML@" == "OFF"]; then + # only when MKL enabled + if [ "@WITH_MKL@" == "OFF" ]; then return 0 fi processors=`grep "processor" /proc/cpuinfo|sort -u|wc -l` diff --git a/paddle/scripts/travis/build_doc.sh b/paddle/scripts/travis/build_doc.sh index 973b2736e5c..28d82343ed3 100755 --- a/paddle/scripts/travis/build_doc.sh +++ b/paddle/scripts/travis/build_doc.sh @@ -6,7 +6,7 @@ mkdir -p $TRAVIS_BUILD_DIR/build cd $TRAVIS_BUILD_DIR/build # Compile Documentation only. -cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKLDNN=OFF -DWITH_MKLML=OFF -DWITH_DOC=ON +cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON make -j `nproc` gen_proto_py make -j `nproc` paddle_docs paddle_docs_cn -- GitLab