未验证 提交 38de512d 编写于 作者: T tensor-tang 提交者: GitHub

Merge pull request #6459 from tensor-tang/develop

unify MKL macro definition
...@@ -17,7 +17,7 @@ if(WITH_MKLML AND MKLML_INC_DIR AND MKLML_LIB) ...@@ -17,7 +17,7 @@ if(WITH_MKLML AND MKLML_INC_DIR AND MKLML_LIB)
set(CBLAS_INC_DIR ${MKLML_INC_DIR}) set(CBLAS_INC_DIR ${MKLML_INC_DIR})
set(CBLAS_LIBRARIES ${MKLML_LIB}) set(CBLAS_LIBRARIES ${MKLML_LIB})
add_definitions(-DPADDLE_USE_MKLML) add_definitions(-DPADDLE_WITH_MKLML)
add_definitions(-DLAPACK_FOUND) add_definitions(-DLAPACK_FOUND)
message(STATUS "Found cblas and lapack in MKLML " message(STATUS "Found cblas and lapack in MKLML "
......
...@@ -67,5 +67,5 @@ ADD_LIBRARY(mkldnn SHARED IMPORTED GLOBAL) ...@@ -67,5 +67,5 @@ ADD_LIBRARY(mkldnn SHARED IMPORTED GLOBAL)
SET_PROPERTY(TARGET mkldnn PROPERTY IMPORTED_LOCATION ${MKLDNN_LIB}) SET_PROPERTY(TARGET mkldnn PROPERTY IMPORTED_LOCATION ${MKLDNN_LIB})
ADD_DEPENDENCIES(mkldnn ${MKLDNN_PROJECT}) ADD_DEPENDENCIES(mkldnn ${MKLDNN_PROJECT})
MESSAGE(STATUS "MKLDNN library: ${MKLDNN_LIB}") MESSAGE(STATUS "MKLDNN library: ${MKLDNN_LIB}")
add_definitions(-DPADDLE_USE_MKLDNN) add_definitions(-DPADDLE_WITH_MKLDNN)
LIST(APPEND external_project_dependencies mkldnn) LIST(APPEND external_project_dependencies mkldnn)
...@@ -24,7 +24,7 @@ limitations under the License. */ ...@@ -24,7 +24,7 @@ limitations under the License. */
#include "paddle/utils/ClassRegistrar.h" #include "paddle/utils/ClassRegistrar.h"
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
#ifdef PADDLE_USE_MKLDNN #ifdef PADDLE_WITH_MKLDNN
#include "MKLDNNActivation.h" #include "MKLDNNActivation.h"
#endif #endif
...@@ -490,7 +490,7 @@ Error __must_check backward(Argument& act) { ...@@ -490,7 +490,7 @@ Error __must_check backward(Argument& act) {
END_DEFINE_ACTIVATION(log) END_DEFINE_ACTIVATION(log)
ActivationFunction* ActivationFunction::create(const std::string& type) { ActivationFunction* ActivationFunction::create(const std::string& type) {
#ifdef PADDLE_USE_MKLDNN #ifdef PADDLE_WITH_MKLDNN
if (!type.empty() && type.compare(0, 7, "mkldnn_") == 0) { if (!type.empty() && type.compare(0, 7, "mkldnn_") == 0) {
return MKLDNNActivation::create(type); return MKLDNNActivation::create(type);
} }
......
...@@ -20,7 +20,7 @@ limitations under the License. */ ...@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h" #include "paddle/utils/Stat.h"
#ifdef PADDLE_USE_MKLDNN #ifdef PADDLE_WITH_MKLDNN
#include "paddle/gserver/layers/MKLDNNLayer.h" #include "paddle/gserver/layers/MKLDNNLayer.h"
#endif #endif
...@@ -307,7 +307,7 @@ void NeuralNetwork::backward(const UpdateCallback& callback) { ...@@ -307,7 +307,7 @@ void NeuralNetwork::backward(const UpdateCallback& callback) {
} }
void NeuralNetwork::finish() { void NeuralNetwork::finish() {
#ifdef PADDLE_USE_MKLDNN #ifdef PADDLE_WITH_MKLDNN
FOR_EACH_R(layer, layers_) { FOR_EACH_R(layer, layers_) {
MKLDNNLayerPtr dnnLayer = std::dynamic_pointer_cast<MKLDNNLayer>(*layer); MKLDNNLayerPtr dnnLayer = std::dynamic_pointer_cast<MKLDNNLayer>(*layer);
if (dnnLayer) { if (dnnLayer) {
......
...@@ -48,7 +48,7 @@ public: ...@@ -48,7 +48,7 @@ public:
*/ */
virtual void* alloc(size_t size) { virtual void* alloc(size_t size) {
void* ptr; void* ptr;
#ifdef PADDLE_USE_MKLDNN #ifdef PADDLE_WITH_MKLDNN
// refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp // refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp
// memory alignment // memory alignment
CHECK_EQ(posix_memalign(&ptr, 4096ul, size), 0); CHECK_EQ(posix_memalign(&ptr, 4096ul, size), 0);
......
...@@ -206,7 +206,7 @@ double dotProduct<double>(const int n, const double* x, const double* y) { ...@@ -206,7 +206,7 @@ double dotProduct<double>(const int n, const double* x, const double* y) {
} }
#endif #endif
#if defined(PADDLE_USE_MKLML) #if defined(PADDLE_WITH_MKLML)
template <> template <>
void vExp<float>(const int n, const float* a, float* r) { void vExp<float>(const int n, const float* a, float* r) {
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#ifndef MATHFUNCTIONS_H_ #ifndef MATHFUNCTIONS_H_
#define MATHFUNCTIONS_H_ #define MATHFUNCTIONS_H_
#ifdef PADDLE_USE_MKLML #ifdef PADDLE_WITH_MKLML
#include <mkl_cblas.h> #include <mkl_cblas.h>
#include <mkl_lapacke.h> #include <mkl_lapacke.h>
#include <mkl_vml_functions.h> #include <mkl_vml_functions.h>
......
...@@ -43,7 +43,7 @@ void* CPUAllocator::Alloc(size_t& index, size_t size) { ...@@ -43,7 +43,7 @@ void* CPUAllocator::Alloc(size_t& index, size_t size) {
void* p; void* p;
#ifdef PADDLE_USE_MKLDNN #ifdef PADDLE_WITH_MKLDNN
// refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp // refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp
// memory alignment // memory alignment
PADDLE_ENFORCE_EQ(posix_memalign(&p, 4096ul, size), 0); PADDLE_ENFORCE_EQ(posix_memalign(&p, 4096ul, size), 0);
......
...@@ -132,7 +132,7 @@ void matmul<platform::CPUPlace, double>( ...@@ -132,7 +132,7 @@ void matmul<platform::CPUPlace, double>(
matrix_b.data<double>(), beta, matrix_out->data<double>()); matrix_b.data<double>(), beta, matrix_out->data<double>());
} }
#ifdef PADDLE_USE_MKLML #ifdef PADDLE_WITH_MKLML
// Use cblas_{s,d}gemm_batched if available: Run with 1 group of size batchSize. // Use cblas_{s,d}gemm_batched if available: Run with 1 group of size batchSize.
template <> template <>
void batched_gemm<platform::CPUPlace, float>( void batched_gemm<platform::CPUPlace, float>(
......
...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#ifdef PADDLE_USE_MKLML #ifdef PADDLE_WITH_MKLML
#include <mkl_cblas.h> #include <mkl_cblas.h>
#include <mkl_lapacke.h> #include <mkl_lapacke.h>
#include <mkl_vml_functions.h> #include <mkl_vml_functions.h>
......
...@@ -38,7 +38,7 @@ public: ...@@ -38,7 +38,7 @@ public:
real torch_learningRate = optConfig_.learning_method() == "torch_momentum" real torch_learningRate = optConfig_.learning_method() == "torch_momentum"
? 1.0 - paraConfig.momentum() ? 1.0 - paraConfig.momentum()
: 1.0; : 1.0;
#ifdef PADDLE_USE_MKLDNN #ifdef PADDLE_WITH_MKLDNN
sgdUpdate(learningRate_ * paraConfig.learning_rate() * sgdUpdate(learningRate_ * paraConfig.learning_rate() *
(firstTime_ ? 1.0 : torch_learningRate), (firstTime_ ? 1.0 : torch_learningRate),
paraConfig.momentum(), paraConfig.momentum(),
......
...@@ -30,7 +30,7 @@ void sgdUpdateCpu(real learningRate, ...@@ -30,7 +30,7 @@ void sgdUpdateCpu(real learningRate,
const real* grad, const real* grad,
real* momentumVec) { real* momentumVec) {
decayRate *= learningRate; decayRate *= learningRate;
#ifdef PADDLE_USE_MKLML #ifdef PADDLE_WITH_MKLML
#pragma omp parallel for #pragma omp parallel for
#endif #endif
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
......
...@@ -20,7 +20,7 @@ DEFINE_bool(use_gpu, false, "Only support CPU training"); ...@@ -20,7 +20,7 @@ DEFINE_bool(use_gpu, false, "Only support CPU training");
DEFINE_bool(use_gpu, true, "Whether to use GPU for training"); DEFINE_bool(use_gpu, true, "Whether to use GPU for training");
#endif #endif
#ifdef PADDLE_USE_MKLDNN #ifdef PADDLE_WITH_MKLDNN
// TODO(TJ): change to true when MKLDNN layers support multi-inputs // TODO(TJ): change to true when MKLDNN layers support multi-inputs
DEFINE_bool(use_mkldnn, false, "Default still keep use CPU training"); DEFINE_bool(use_mkldnn, false, "Default still keep use CPU training");
#else #else
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册