diff --git a/CMakeLists.txt b/CMakeLists.txt index 7c497e3e048c4dd8d5c1291286de2ab9d218b914..0620b64cfa6e6350adb14b8d2e40f98aaaa7e42d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -49,6 +49,7 @@ set(THIRD_PARTY_BUILD_TYPE Release) option(WITH_AVX "Compile Paddle Serving with AVX intrinsics" OFF) option(WITH_MKL "Compile Paddle Serving with MKL support." OFF) option(WITH_GPU "Compile Paddle Serving with NVIDIA GPU" OFF) +option(WITH_XPU "Compile Paddle Serving with Kunlun XPU" OFF) option(CLIENT "Compile Paddle Serving Client" OFF) option(SERVER "Compile Paddle Serving Server" OFF) option(APP "Compile Paddle Serving App package" OFF) diff --git a/core/general-server/CMakeLists.txt b/core/general-server/CMakeLists.txt index 9056e229a51f56463dc2eec5629f219d00dc6a38..560d387c2db5c32ec575a48b20ff60a9ed2353b4 100644 --- a/core/general-server/CMakeLists.txt +++ b/core/general-server/CMakeLists.txt @@ -28,8 +28,14 @@ if(WITH_GPU) target_link_libraries(serving ${CUDA_LIBRARIES}) endif() +#link_libraries(${PROJECT_BINARY_DIR}/third_party/install/Paddle/third_party/install/lite/cxx/lib/libpaddle_full_api_shared.so) + if(WITH_MKL OR WITH_GPU) - target_link_libraries(serving -liomp5 -lmklml_intel -lmkldnn -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2) + target_link_libraries(serving -liomp5 -lmklml_intel -lmkldnn -lpthread + -lcrypto -lm -lrt -lssl -ldl -lz -lbz2 ) + target_link_libraries(serving + ${PROJECT_BINARY_DIR}/third_party/install/Paddle/third_party/install/lite/cxx/lib/libpaddle_full_api_shared.so) + else() target_link_libraries(serving openblas -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2) endif() diff --git a/core/predictor/CMakeLists.txt b/core/predictor/CMakeLists.txt index 1b9dc7b29845a2b8c7f958c1d8e836cb57e91d41..e953738e4cfbcac8b971c0d82cb68010e7f0c592 100644 --- a/core/predictor/CMakeLists.txt +++ b/core/predictor/CMakeLists.txt @@ -15,7 +15,8 @@ set_source_files_properties( add_dependencies(pdserving protobuf boost brpc leveldb pdcodegen configure) target_link_libraries(pdserving - brpc protobuf boost leveldb configure -lpthread -lcrypto -lm -lrt -lssl -ldl -lz) + brpc protobuf boost leveldb configure -lpthread -lcrypto -lm -lrt + -lssl -ldl -lz) # install install(TARGETS pdserving diff --git a/paddle_inference/inferencer-fluid-gpu/CMakeLists.txt b/paddle_inference/inferencer-fluid-gpu/CMakeLists.txt index 725da85b45ca1070badf5343f340e49dce6b936f..97ef1f577ce89e0361b3fd5277d1516068d7b620 100644 --- a/paddle_inference/inferencer-fluid-gpu/CMakeLists.txt +++ b/paddle_inference/inferencer-fluid-gpu/CMakeLists.txt @@ -3,7 +3,8 @@ add_library(fluid_gpu_engine ${fluid_gpu_engine_srcs}) target_include_directories(fluid_gpu_engine PUBLIC ${CMAKE_BINARY_DIR}/Paddle/fluid_install_dir/) add_dependencies(fluid_gpu_engine pdserving extern_paddle configure) -target_link_libraries(fluid_gpu_engine pdserving paddle_fluid iomp5 mklml_intel -lpthread -lcrypto -lm -lrt -lssl -ldl -lz) +target_link_libraries(fluid_gpu_engine pdserving paddle_fluid iomp5 + mklml_intel -lpthread -lcrypto -lm -lrt -lssl -ldl -lz) install(TARGETS fluid_gpu_engine ARCHIVE DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/lib diff --git a/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h b/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h index 2fc6ae587ff26f5f05ff9332f08067ab49d06254..8480c962cd99460f758997166a8ded6852f0c77a 100644 --- a/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h +++ b/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h @@ -535,6 +535,45 @@ class FluidGpuAnalysisDirWithSigmoidCore : public FluidGpuWithSigmoidCore { } }; +class FluidXpuAnalysisDirCore : public FluidFamilyCore { + public: + int create(const predictor::InferEngineCreationParams& params) { + std::string data_path = params.get_path(); + if (access(data_path.c_str(), F_OK) == -1) { + LOG(ERROR) << "create paddle predictor failed, path not exits: " + << data_path; + return -1; + } + + paddle::AnalysisConfig analysis_config; + analysis_config.SetModel(data_path); + analysis_config.EnableXpu(); + analysis_config.SwitchSpecifyInputNames(true); + analysis_config.SetCpuMathLibraryNumThreads(1); + + if (params.enable_memory_optimization()) { + analysis_config.EnableMemoryOptim(); + } + + if (params.enable_ir_optimization()) { + analysis_config.SwitchIrOptim(true); + } else { + analysis_config.SwitchIrOptim(false); + } + + AutoLock lock(GlobalPaddleCreateMutex::instance()); + _core = + paddle::CreatePaddlePredictor(analysis_config); + if (NULL == _core.get()) { + LOG(ERROR) << "create paddle predictor failed, path: " << data_path; + return -1; + } + + VLOG(2) << "create paddle predictor sucess, path: " << data_path; + return 0; + } +}; + } // namespace fluid_gpu } // namespace paddle_serving } // namespace baidu diff --git a/paddle_inference/inferencer-fluid-gpu/src/fluid_gpu_engine.cpp b/paddle_inference/inferencer-fluid-gpu/src/fluid_gpu_engine.cpp index 7447a417338a37716eff025721126e4c817408a6..b31c9d161eae1e9988c0f698862a0be83a84150d 100644 --- a/paddle_inference/inferencer-fluid-gpu/src/fluid_gpu_engine.cpp +++ b/paddle_inference/inferencer-fluid-gpu/src/fluid_gpu_engine.cpp @@ -54,6 +54,11 @@ REGIST_FACTORY_OBJECT_IMPL_WITH_NAME( ::baidu::paddle_serving::predictor::InferEngine, "FLUID_GPU_NATIVE_DIR_SIGMOID"); +REGIST_FACTORY_OBJECT_IMPL_WITH_NAME( + ::baidu::paddle_serving::predictor::FluidInferEngine< + FluidXpuAnalysisDirCore>, + ::baidu::paddle_serving::predictor::InferEngine, + "FLUID_XPU_ANALYSIS_DIR"); } // namespace fluid_gpu } // namespace paddle_serving } // namespace baidu