From c05a8ec45ab19384230464f2a50359bb9af739bb Mon Sep 17 00:00:00 2001 From: MRXLT Date: Tue, 21 Jul 2020 11:45:15 +0800 Subject: [PATCH] fix cmake --- CMakeLists.txt | 1 + core/general-server/CMakeLists.txt | 8 +++- core/predictor/CMakeLists.txt | 3 +- .../inferencer-fluid-gpu/CMakeLists.txt | 3 +- .../include/fluid_gpu_engine.h | 39 +++++++++++++++++++ .../src/fluid_gpu_engine.cpp | 5 +++ 6 files changed, 56 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7c497e3e..0620b64c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -49,6 +49,7 @@ set(THIRD_PARTY_BUILD_TYPE Release) option(WITH_AVX "Compile Paddle Serving with AVX intrinsics" OFF) option(WITH_MKL "Compile Paddle Serving with MKL support." OFF) option(WITH_GPU "Compile Paddle Serving with NVIDIA GPU" OFF) +option(WITH_XPU "Compile Paddle Serving with Kunlun XPU" OFF) option(CLIENT "Compile Paddle Serving Client" OFF) option(SERVER "Compile Paddle Serving Server" OFF) option(APP "Compile Paddle Serving App package" OFF) diff --git a/core/general-server/CMakeLists.txt b/core/general-server/CMakeLists.txt index 9056e229..560d387c 100644 --- a/core/general-server/CMakeLists.txt +++ b/core/general-server/CMakeLists.txt @@ -28,8 +28,14 @@ if(WITH_GPU) target_link_libraries(serving ${CUDA_LIBRARIES}) endif() +#link_libraries(${PROJECT_BINARY_DIR}/third_party/install/Paddle/third_party/install/lite/cxx/lib/libpaddle_full_api_shared.so) + if(WITH_MKL OR WITH_GPU) - target_link_libraries(serving -liomp5 -lmklml_intel -lmkldnn -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2) + target_link_libraries(serving -liomp5 -lmklml_intel -lmkldnn -lpthread + -lcrypto -lm -lrt -lssl -ldl -lz -lbz2 ) + target_link_libraries(serving + ${PROJECT_BINARY_DIR}/third_party/install/Paddle/third_party/install/lite/cxx/lib/libpaddle_full_api_shared.so) + else() target_link_libraries(serving openblas -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2) endif() diff --git a/core/predictor/CMakeLists.txt b/core/predictor/CMakeLists.txt index 1b9dc7b2..e953738e 100644 --- a/core/predictor/CMakeLists.txt +++ b/core/predictor/CMakeLists.txt @@ -15,7 +15,8 @@ set_source_files_properties( add_dependencies(pdserving protobuf boost brpc leveldb pdcodegen configure) target_link_libraries(pdserving - brpc protobuf boost leveldb configure -lpthread -lcrypto -lm -lrt -lssl -ldl -lz) + brpc protobuf boost leveldb configure -lpthread -lcrypto -lm -lrt + -lssl -ldl -lz) # install install(TARGETS pdserving diff --git a/paddle_inference/inferencer-fluid-gpu/CMakeLists.txt b/paddle_inference/inferencer-fluid-gpu/CMakeLists.txt index 725da85b..97ef1f57 100644 --- a/paddle_inference/inferencer-fluid-gpu/CMakeLists.txt +++ b/paddle_inference/inferencer-fluid-gpu/CMakeLists.txt @@ -3,7 +3,8 @@ add_library(fluid_gpu_engine ${fluid_gpu_engine_srcs}) target_include_directories(fluid_gpu_engine PUBLIC ${CMAKE_BINARY_DIR}/Paddle/fluid_install_dir/) add_dependencies(fluid_gpu_engine pdserving extern_paddle configure) -target_link_libraries(fluid_gpu_engine pdserving paddle_fluid iomp5 mklml_intel -lpthread -lcrypto -lm -lrt -lssl -ldl -lz) +target_link_libraries(fluid_gpu_engine pdserving paddle_fluid iomp5 + mklml_intel -lpthread -lcrypto -lm -lrt -lssl -ldl -lz) install(TARGETS fluid_gpu_engine ARCHIVE DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/lib diff --git a/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h b/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h index 2fc6ae58..8480c962 100644 --- a/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h +++ b/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h @@ -535,6 +535,45 @@ class FluidGpuAnalysisDirWithSigmoidCore : public FluidGpuWithSigmoidCore { } }; +class FluidXpuAnalysisDirCore : public FluidFamilyCore { + public: + int create(const predictor::InferEngineCreationParams& params) { + std::string data_path = params.get_path(); + if (access(data_path.c_str(), F_OK) == -1) { + LOG(ERROR) << "create paddle predictor failed, path not exits: " + << data_path; + return -1; + } + + paddle::AnalysisConfig analysis_config; + analysis_config.SetModel(data_path); + analysis_config.EnableXpu(); + analysis_config.SwitchSpecifyInputNames(true); + analysis_config.SetCpuMathLibraryNumThreads(1); + + if (params.enable_memory_optimization()) { + analysis_config.EnableMemoryOptim(); + } + + if (params.enable_ir_optimization()) { + analysis_config.SwitchIrOptim(true); + } else { + analysis_config.SwitchIrOptim(false); + } + + AutoLock lock(GlobalPaddleCreateMutex::instance()); + _core = + paddle::CreatePaddlePredictor(analysis_config); + if (NULL == _core.get()) { + LOG(ERROR) << "create paddle predictor failed, path: " << data_path; + return -1; + } + + VLOG(2) << "create paddle predictor sucess, path: " << data_path; + return 0; + } +}; + } // namespace fluid_gpu } // namespace paddle_serving } // namespace baidu diff --git a/paddle_inference/inferencer-fluid-gpu/src/fluid_gpu_engine.cpp b/paddle_inference/inferencer-fluid-gpu/src/fluid_gpu_engine.cpp index 7447a417..b31c9d16 100644 --- a/paddle_inference/inferencer-fluid-gpu/src/fluid_gpu_engine.cpp +++ b/paddle_inference/inferencer-fluid-gpu/src/fluid_gpu_engine.cpp @@ -54,6 +54,11 @@ REGIST_FACTORY_OBJECT_IMPL_WITH_NAME( ::baidu::paddle_serving::predictor::InferEngine, "FLUID_GPU_NATIVE_DIR_SIGMOID"); +REGIST_FACTORY_OBJECT_IMPL_WITH_NAME( + ::baidu::paddle_serving::predictor::FluidInferEngine< + FluidXpuAnalysisDirCore>, + ::baidu::paddle_serving::predictor::InferEngine, + "FLUID_XPU_ANALYSIS_DIR"); } // namespace fluid_gpu } // namespace paddle_serving } // namespace baidu -- GitLab