diff --git a/CMakeLists.txt b/CMakeLists.txt index 7c497e3e048c4dd8d5c1291286de2ab9d218b914..59d6fcb07d27e1f3ab259e69d36708b775c1852a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -54,6 +54,7 @@ option(SERVER "Compile Paddle Serving Server" OFF) option(APP "Compile Paddle Serving App package" OFF) option(WITH_ELASTIC_CTR "Compile ELASITC-CTR solution" OFF) option(PACK "Compile for whl" OFF) +option(WITH_TRT "Compile Paddle Serving with TRT" OFF) set(WITH_MKLML ${WITH_MKL}) if (NOT DEFINED WITH_MKLDNN) diff --git a/README.md b/README.md index 44cee7bac8087a60e08754f007ad33bdebad98e3..fb537b65db83d013f570c8208f21c219ca5084a3 100644 --- a/README.md +++ b/README.md @@ -128,6 +128,7 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po | `mem_optim_off` | - | - | Disable memory / graphic memory optimization | | `ir_optim` | - | - | Enable analysis and optimization of calculation graph | | `use_mkl` (Only for cpu version) | - | - | Run inference with MKL | +| `use_trt` (Only for trt version) | - | - | Run inference with TensorRT | Here, we use `curl` to send a HTTP POST request to the service we just started. Users can use any python library to send HTTP POST as well, e.g, [requests](https://requests.readthedocs.io/en/master/). diff --git a/README_CN.md b/README_CN.md index 8bdc2702a68ed88437495fe8b4ced3817742d13a..2c37a26681d4291adcf7e8e70d3392772fabbe6b 100644 --- a/README_CN.md +++ b/README_CN.md @@ -124,6 +124,7 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po | `mem_optim_off` | - | - | Disable memory optimization | | `ir_optim` | - | - | Enable analysis and optimization of calculation graph | | `use_mkl` (Only for cpu version) | - | - | Run inference with MKL | +| `use_trt` (Only for trt version) | - | - | Run inference with TensorRT | 我们使用 `curl` 命令来发送HTTP POST请求给刚刚启动的服务。用户也可以调用python库来发送HTTP POST请求,请参考英文文档 [requests](https://requests.readthedocs.io/en/master/)。 diff --git a/cmake/paddlepaddle.cmake b/cmake/paddlepaddle.cmake index 5a164e93437e59e9b93ad6472755adffea8421ae..4b7d3ed1f620bfcd2e1e214c49c57ee3848129e7 100644 --- a/cmake/paddlepaddle.cmake +++ b/cmake/paddlepaddle.cmake @@ -34,7 +34,11 @@ message( "WITH_GPU = ${WITH_GPU}") SET(PADDLE_VERSION "1.8.4") if (WITH_GPU) - SET(PADDLE_LIB_VERSION "${PADDLE_VERSION}-gpu-cuda${CUDA_VERSION_MAJOR}-cudnn7-avx-mkl") + if (WITH_TRT) + SET(PADDLE_LIB_VERSION "${PADDLE_VERSION}-gpu-cuda10.1-cudnn7.6-avx-mkl-trt6") + else() + SET(PADDLE_LIB_VERSION "${PADDLE_VERSION}-gpu-cuda10-cudnn7-avx-mkl") + endif() else() if (WITH_AVX) if (WITH_MKLML) @@ -50,21 +54,38 @@ endif() SET(PADDLE_LIB_PATH "http://paddle-inference-lib.bj.bcebos.com/${PADDLE_LIB_VERSION}/fluid_inference.tgz") MESSAGE(STATUS "PADDLE_LIB_PATH=${PADDLE_LIB_PATH}") if (WITH_GPU OR WITH_MKLML) -ExternalProject_Add( - "extern_paddle" - ${EXTERNAL_PROJECT_LOG_ARGS} - URL "${PADDLE_LIB_PATH}" - PREFIX "${PADDLE_SOURCES_DIR}" - DOWNLOAD_DIR "${PADDLE_DOWNLOAD_DIR}" - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - UPDATE_COMMAND "" - INSTALL_COMMAND - ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/include ${PADDLE_INSTALL_DIR}/include && - ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/lib ${PADDLE_INSTALL_DIR}/lib && - ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/third_party ${PADDLE_INSTALL_DIR}/third_party && - ${CMAKE_COMMAND} -E copy ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so.0 ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so -) + if (WITH_TRT) + ExternalProject_Add( + "extern_paddle" + ${EXTERNAL_PROJECT_LOG_ARGS} + URL "${PADDLE_LIB_PATH}" + PREFIX "${PADDLE_SOURCES_DIR}" + DOWNLOAD_DIR "${PADDLE_DOWNLOAD_DIR}" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + UPDATE_COMMAND "" + INSTALL_COMMAND + ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/include ${PADDLE_INSTALL_DIR}/include && + ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/lib ${PADDLE_INSTALL_DIR}/lib && + ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/third_party ${PADDLE_INSTALL_DIR}/third_party + ) + else() + ExternalProject_Add( + "extern_paddle" + ${EXTERNAL_PROJECT_LOG_ARGS} + URL "${PADDLE_LIB_PATH}" + PREFIX "${PADDLE_SOURCES_DIR}" + DOWNLOAD_DIR "${PADDLE_DOWNLOAD_DIR}" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + UPDATE_COMMAND "" + INSTALL_COMMAND + ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/include ${PADDLE_INSTALL_DIR}/include && + ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/lib ${PADDLE_INSTALL_DIR}/lib && + ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/third_party ${PADDLE_INSTALL_DIR}/third_party && + ${CMAKE_COMMAND} -E copy ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so.0 ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so + ) + endif() else() ExternalProject_Add( "extern_paddle" @@ -92,8 +113,16 @@ LINK_DIRECTORIES(${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib) ADD_LIBRARY(openblas STATIC IMPORTED GLOBAL) SET_PROPERTY(TARGET openblas PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/third_party/install/openblas/lib/libopenblas.a) -ADD_LIBRARY(paddle_fluid STATIC IMPORTED GLOBAL) -SET_PROPERTY(TARGET paddle_fluid PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/lib/libpaddle_fluid.a) +ADD_LIBRARY(paddle_fluid SHARED IMPORTED GLOBAL) +SET_PROPERTY(TARGET paddle_fluid PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/lib/libpaddle_fluid.so) + +if (WITH_TRT) +ADD_LIBRARY(nvinfer SHARED IMPORTED GLOBAL) +SET_PROPERTY(TARGET nvinfer PROPERTY IMPORTED_LOCATION ${TENSORRT_ROOT}/lib/libnvinfer.so) + +ADD_LIBRARY(nvinfer_plugin SHARED IMPORTED GLOBAL) +SET_PROPERTY(TARGET nvinfer_plugin PROPERTY IMPORTED_LOCATION ${TENSORRT_ROOT}/lib/libnvinfer_plugin.so) +endif() ADD_LIBRARY(xxhash STATIC IMPORTED GLOBAL) SET_PROPERTY(TARGET xxhash PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/third_party/install/xxhash/lib/libxxhash.a) @@ -101,4 +130,9 @@ SET_PROPERTY(TARGET xxhash PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/thir LIST(APPEND external_project_dependencies paddle) LIST(APPEND paddle_depend_libs - xxhash) + xxhash) + +if(WITH_TRT) +LIST(APPEND paddle_depend_libs + nvinfer nvinfer_plugin) +endif() diff --git a/core/configure/proto/server_configure.proto b/core/configure/proto/server_configure.proto index de32637b2a523df1a8d8cd2e28dcf29e79ff96dc..c008ee857bb7c69672e399ce44b2420d5db7fb3c 100644 --- a/core/configure/proto/server_configure.proto +++ b/core/configure/proto/server_configure.proto @@ -44,6 +44,7 @@ message EngineDesc { optional bool static_optimization = 14; optional bool force_update_static_cache = 15; optional bool enable_ir_optimization = 16; + optional bool use_trt = 17; }; // model_toolkit conf diff --git a/core/cube/CMakeLists.txt b/core/cube/CMakeLists.txt index 07cf04977b618a515a2459f646c2dba298a5d58b..f9dc4d2c2508720f450b4aee3aba5dfdd7ccd43b 100644 --- a/core/cube/CMakeLists.txt +++ b/core/cube/CMakeLists.txt @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License +#execute_process(COMMAND go env -w GO111MODULE=off) add_subdirectory(cube-server) add_subdirectory(cube-api) add_subdirectory(cube-builder) -add_subdirectory(cube-transfer) -add_subdirectory(cube-agent) +#add_subdirectory(cube-transfer) +#add_subdirectory(cube-agent) diff --git a/core/general-client/include/general_model.h b/core/general-client/include/general_model.h index a81a0005473f3eb4039dd77aa430957e52eda687..3ee960069fd1eb8575d39fe4797038f9d4ef9f3b 100644 --- a/core/general-client/include/general_model.h +++ b/core/general-client/include/general_model.h @@ -218,25 +218,15 @@ class PredictorClient { int destroy_predictor(); - int batch_predict( - const std::vector>>& float_feed_batch, - const std::vector& float_feed_name, - const std::vector>& float_shape, - const std::vector>>& int_feed_batch, - const std::vector& int_feed_name, - const std::vector>& int_shape, - const std::vector& fetch_name, - PredictorRes& predict_res_batch, // NOLINT - const int& pid, - const uint64_t log_id); - int numpy_predict( const std::vector>>& float_feed_batch, const std::vector& float_feed_name, const std::vector>& float_shape, + const std::vector>& float_lod_slot_batch, const std::vector>>& int_feed_batch, const std::vector& int_feed_name, const std::vector>& int_shape, + const std::vector>& int_lod_slot_batch, const std::vector& fetch_name, PredictorRes& predict_res_batch, // NOLINT const int& pid, diff --git a/core/general-client/src/general_model.cpp b/core/general-client/src/general_model.cpp index a3160830a71c1244af209671da3f96d559c47f02..c2db765a082bf2e18aa7fe88c614a6bc8bb457c8 100644 --- a/core/general-client/src/general_model.cpp +++ b/core/general-client/src/general_model.cpp @@ -137,227 +137,15 @@ int PredictorClient::create_predictor() { return 0; } -int PredictorClient::batch_predict( - const std::vector>> &float_feed_batch, - const std::vector &float_feed_name, - const std::vector> &float_shape, - const std::vector>> &int_feed_batch, - const std::vector &int_feed_name, - const std::vector> &int_shape, - const std::vector &fetch_name, - PredictorRes &predict_res_batch, - const int &pid, - const uint64_t log_id) { - int batch_size = std::max(float_feed_batch.size(), int_feed_batch.size()); - - predict_res_batch.clear(); - Timer timeline; - int64_t preprocess_start = timeline.TimeStampUS(); - - int fetch_name_num = fetch_name.size(); - - _api.thrd_initialize(); - std::string variant_tag; - _predictor = _api.fetch_predictor("general_model", &variant_tag); - predict_res_batch.set_variant_tag(variant_tag); - VLOG(2) << "fetch general model predictor done."; - VLOG(2) << "float feed name size: " << float_feed_name.size(); - VLOG(2) << "int feed name size: " << int_feed_name.size(); - VLOG(2) << "max body size : " << brpc::fLU64::FLAGS_max_body_size; - Request req; - req.set_log_id(log_id); - for (auto &name : fetch_name) { - req.add_fetch_var_names(name); - } - - for (int bi = 0; bi < batch_size; bi++) { - VLOG(2) << "prepare batch " << bi; - std::vector tensor_vec; - FeedInst *inst = req.add_insts(); - std::vector> float_feed = float_feed_batch[bi]; - std::vector> int_feed = int_feed_batch[bi]; - for (auto &name : float_feed_name) { - tensor_vec.push_back(inst->add_tensor_array()); - } - - for (auto &name : int_feed_name) { - tensor_vec.push_back(inst->add_tensor_array()); - } - - VLOG(2) << "batch [" << bi << "] int_feed_name and float_feed_name " - << "prepared"; - int vec_idx = 0; - VLOG(2) << "tensor_vec size " << tensor_vec.size() << " float shape " - << float_shape.size(); - for (auto &name : float_feed_name) { - int idx = _feed_name_to_idx[name]; - Tensor *tensor = tensor_vec[idx]; - VLOG(2) << "prepare float feed " << name << " shape size " - << float_shape[vec_idx].size(); - for (uint32_t j = 0; j < float_shape[vec_idx].size(); ++j) { - tensor->add_shape(float_shape[vec_idx][j]); - } - tensor->set_elem_type(1); - for (uint32_t j = 0; j < float_feed[vec_idx].size(); ++j) { - tensor->add_float_data(float_feed[vec_idx][j]); - } - vec_idx++; - } - - VLOG(2) << "batch [" << bi << "] " - << "float feed value prepared"; - - vec_idx = 0; - for (auto &name : int_feed_name) { - int idx = _feed_name_to_idx[name]; - Tensor *tensor = tensor_vec[idx]; - if (_type[idx] == 0) { - VLOG(2) << "prepare int64 feed " << name << " shape size " - << int_shape[vec_idx].size(); - VLOG(3) << "feed var name " << name << " index " << vec_idx - << "first data " << int_feed[vec_idx][0]; - for (uint32_t j = 0; j < int_feed[vec_idx].size(); ++j) { - tensor->add_int64_data(int_feed[vec_idx][j]); - } - } else if (_type[idx] == 2) { - VLOG(2) << "prepare int32 feed " << name << " shape size " - << int_shape[vec_idx].size(); - VLOG(3) << "feed var name " << name << " index " << vec_idx - << "first data " << int32_t(int_feed[vec_idx][0]); - for (uint32_t j = 0; j < int_feed[vec_idx].size(); ++j) { - tensor->add_int_data(int32_t(int_feed[vec_idx][j])); - } - } - - for (uint32_t j = 0; j < int_shape[vec_idx].size(); ++j) { - tensor->add_shape(int_shape[vec_idx][j]); - } - tensor->set_elem_type(_type[idx]); - vec_idx++; - } - - VLOG(2) << "batch [" << bi << "] " - << "int feed value prepared"; - } - - int64_t preprocess_end = timeline.TimeStampUS(); - - int64_t client_infer_start = timeline.TimeStampUS(); - - Response res; - - int64_t client_infer_end = 0; - int64_t postprocess_start = 0; - int64_t postprocess_end = 0; - - if (FLAGS_profile_client) { - if (FLAGS_profile_server) { - req.set_profile_server(true); - } - } - - res.Clear(); - if (_predictor->inference(&req, &res) != 0) { - LOG(ERROR) << "failed call predictor with req: " << req.ShortDebugString(); - _api.thrd_clear(); - return -1; - } else { - client_infer_end = timeline.TimeStampUS(); - postprocess_start = client_infer_end; - VLOG(2) << "get model output num"; - uint32_t model_num = res.outputs_size(); - VLOG(2) << "model num: " << model_num; - for (uint32_t m_idx = 0; m_idx < model_num; ++m_idx) { - VLOG(2) << "process model output index: " << m_idx; - auto output = res.outputs(m_idx); - ModelRes model; - model.set_engine_name(output.engine_name()); - - int idx = 0; - - for (auto &name : fetch_name) { - // int idx = _fetch_name_to_idx[name]; - int shape_size = output.insts(0).tensor_array(idx).shape_size(); - VLOG(2) << "fetch var " << name << " index " << idx << " shape size " - << shape_size; - model._shape_map[name].resize(shape_size); - for (int i = 0; i < shape_size; ++i) { - model._shape_map[name][i] = - output.insts(0).tensor_array(idx).shape(i); - } - int lod_size = output.insts(0).tensor_array(idx).lod_size(); - if (lod_size > 0) { - model._lod_map[name].resize(lod_size); - for (int i = 0; i < lod_size; ++i) { - model._lod_map[name][i] = output.insts(0).tensor_array(idx).lod(i); - } - } - idx += 1; - } - - idx = 0; - for (auto &name : fetch_name) { - // int idx = _fetch_name_to_idx[name]; - if (_fetch_name_to_type[name] == 0) { - VLOG(2) << "ferch var " << name << "type int64"; - int size = output.insts(0).tensor_array(idx).int64_data_size(); - model._int64_value_map[name] = std::vector( - output.insts(0).tensor_array(idx).int64_data().begin(), - output.insts(0).tensor_array(idx).int64_data().begin() + size); - } else if (_fetch_name_to_type[name] == 1) { - VLOG(2) << "fetch var " << name << "type float"; - int size = output.insts(0).tensor_array(idx).float_data_size(); - model._float_value_map[name] = std::vector( - output.insts(0).tensor_array(idx).float_data().begin(), - output.insts(0).tensor_array(idx).float_data().begin() + size); - } else if (_fetch_name_to_type[name] == 2) { - VLOG(2) << "fetch var " << name << "type int32"; - int size = output.insts(0).tensor_array(idx).int_data_size(); - model._int32_value_map[name] = std::vector( - output.insts(0).tensor_array(idx).int_data().begin(), - output.insts(0).tensor_array(idx).int_data().begin() + size); - } - - idx += 1; - } - predict_res_batch.add_model_res(std::move(model)); - } - postprocess_end = timeline.TimeStampUS(); - } - - if (FLAGS_profile_client) { - std::ostringstream oss; - oss << "PROFILE\t" - << "pid:" << pid << "\t" - << "prepro_0:" << preprocess_start << " " - << "prepro_1:" << preprocess_end << " " - << "client_infer_0:" << client_infer_start << " " - << "client_infer_1:" << client_infer_end << " "; - if (FLAGS_profile_server) { - int op_num = res.profile_time_size() / 2; - for (int i = 0; i < op_num; ++i) { - oss << "op" << i << "_0:" << res.profile_time(i * 2) << " "; - oss << "op" << i << "_1:" << res.profile_time(i * 2 + 1) << " "; - } - } - - oss << "postpro_0:" << postprocess_start << " "; - oss << "postpro_1:" << postprocess_end; - - fprintf(stderr, "%s\n", oss.str().c_str()); - } - - _api.thrd_clear(); - return 0; -} - int PredictorClient::numpy_predict( const std::vector>> &float_feed_batch, const std::vector &float_feed_name, const std::vector> &float_shape, + const std::vector> &float_lod_slot_batch, const std::vector>> &int_feed_batch, const std::vector &int_feed_name, const std::vector> &int_shape, + const std::vector> &int_lod_slot_batch, const std::vector &fetch_name, PredictorRes &predict_res_batch, const int &pid, @@ -412,6 +200,9 @@ int PredictorClient::numpy_predict( for (uint32_t j = 0; j < float_shape[vec_idx].size(); ++j) { tensor->add_shape(float_shape[vec_idx][j]); } + for (uint32_t j = 0; j < float_lod_slot_batch[vec_idx].size(); ++j) { + tensor->add_lod(float_lod_slot_batch[vec_idx][j]); + } tensor->set_elem_type(1); const int float_shape_size = float_shape[vec_idx].size(); switch (float_shape_size) { @@ -470,6 +261,9 @@ int PredictorClient::numpy_predict( for (uint32_t j = 0; j < int_shape[vec_idx].size(); ++j) { tensor->add_shape(int_shape[vec_idx][j]); } + for (uint32_t j = 0; j < int_lod_slot_batch[vec_idx].size(); ++j) { + tensor->add_lod(int_lod_slot_batch[vec_idx][j]); + } tensor->set_elem_type(_type[idx]); if (_type[idx] == 0) { diff --git a/core/general-client/src/pybind_general_model.cpp b/core/general-client/src/pybind_general_model.cpp index 1e79a8d2489a9ebc2024402bada32a4be2000146..a0ac6caf2e42d9c4eee475648a371681ad30b135 100644 --- a/core/general-client/src/pybind_general_model.cpp +++ b/core/general-client/src/pybind_general_model.cpp @@ -95,42 +95,18 @@ PYBIND11_MODULE(serving_client, m) { [](PredictorClient &self) { self.create_predictor(); }) .def("destroy_predictor", [](PredictorClient &self) { self.destroy_predictor(); }) - .def("batch_predict", - [](PredictorClient &self, - const std::vector>> - &float_feed_batch, - const std::vector &float_feed_name, - const std::vector> &float_shape, - const std::vector>> - &int_feed_batch, - const std::vector &int_feed_name, - const std::vector> &int_shape, - const std::vector &fetch_name, - PredictorRes &predict_res_batch, - const int &pid, - const uint64_t log_id) { - return self.batch_predict(float_feed_batch, - float_feed_name, - float_shape, - int_feed_batch, - int_feed_name, - int_shape, - fetch_name, - predict_res_batch, - pid, - log_id); - }, - py::call_guard()) .def("numpy_predict", [](PredictorClient &self, const std::vector>> &float_feed_batch, const std::vector &float_feed_name, const std::vector> &float_shape, + const std::vector> &float_lod_slot_batch, const std::vector>> &int_feed_batch, const std::vector &int_feed_name, const std::vector> &int_shape, + const std::vector> &int_lod_slot_batch, const std::vector &fetch_name, PredictorRes &predict_res_batch, const int &pid, @@ -138,9 +114,11 @@ PYBIND11_MODULE(serving_client, m) { return self.numpy_predict(float_feed_batch, float_feed_name, float_shape, + float_lod_slot_batch, int_feed_batch, int_feed_name, int_shape, + int_lod_slot_batch, fetch_name, predict_res_batch, pid, diff --git a/core/general-server/CMakeLists.txt b/core/general-server/CMakeLists.txt index 9056e229a51f56463dc2eec5629f219d00dc6a38..aa1b7badc9140301d84bdbd94b3324b52176e837 100644 --- a/core/general-server/CMakeLists.txt +++ b/core/general-server/CMakeLists.txt @@ -9,7 +9,7 @@ endif() target_include_directories(serving PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/../../core/predictor ) - + include_directories(${CUDNN_ROOT}/include/) if(WITH_GPU) target_link_libraries(serving -Wl,--whole-archive fluid_gpu_engine -Wl,--no-whole-archive) @@ -29,7 +29,11 @@ if(WITH_GPU) endif() if(WITH_MKL OR WITH_GPU) + if (WITH_TRT) + target_link_libraries(serving -liomp5 -lmklml_intel -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2) + else() target_link_libraries(serving -liomp5 -lmklml_intel -lmkldnn -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2) +endif() else() target_link_libraries(serving openblas -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2) endif() diff --git a/core/general-server/op/general_reader_op.cpp b/core/general-server/op/general_reader_op.cpp index 14fd617e058ccc392a673678d03145ec1f6fd6d2..0329fac6b9bb6eda59f3f6f1589cd00c3eec0fd9 100644 --- a/core/general-server/op/general_reader_op.cpp +++ b/core/general-server/op/general_reader_op.cpp @@ -73,8 +73,6 @@ int GeneralReaderOp::inference() { // reade request from client const Request *req = dynamic_cast(get_request_message()); uint64_t log_id = req->log_id(); - - int batch_size = req->insts_size(); int input_var_num = 0; std::vector elem_type; std::vector elem_size; @@ -83,7 +81,6 @@ int GeneralReaderOp::inference() { GeneralBlob *res = mutable_data(); TensorVector *out = &res->tensor_vector; - res->SetBatchSize(batch_size); res->SetLogId(log_id); if (!res) { @@ -98,11 +95,11 @@ int GeneralReaderOp::inference() { VLOG(2) << "(logid=" << log_id << ") start to call load general model_conf op"; + baidu::paddle_serving::predictor::Resource &resource = baidu::paddle_serving::predictor::Resource::instance(); VLOG(2) << "(logid=" << log_id << ") get resource pointer done."; - std::shared_ptr model_config = resource.get_general_model_config(); @@ -122,13 +119,11 @@ int GeneralReaderOp::inference() { elem_type.resize(var_num); elem_size.resize(var_num); capacity.resize(var_num); - // prepare basic information for input for (int i = 0; i < var_num; ++i) { paddle::PaddleTensor lod_tensor; elem_type[i] = req->insts(0).tensor_array(i).elem_type(); - VLOG(2) << "(logid=" << log_id << ") var[" << i - << "] has elem type: " << elem_type[i]; + VLOG(2) << "var[" << i << "] has elem type: " << elem_type[i]; if (elem_type[i] == 0) { // int64 elem_size[i] = sizeof(int64_t); lod_tensor.dtype = paddle::PaddleDType::INT64; @@ -139,13 +134,24 @@ int GeneralReaderOp::inference() { elem_size[i] = sizeof(int32_t); lod_tensor.dtype = paddle::PaddleDType::INT32; } - - if (model_config->_is_lod_feed[i]) { - lod_tensor.lod.resize(1); - lod_tensor.lod[0].push_back(0); + // implement lod tensor here + if (req->insts(0).tensor_array(i).lod_size() > 0) { VLOG(2) << "(logid=" << log_id << ") var[" << i << "] is lod_tensor"; + lod_tensor.lod.resize(1); + for (int k = 0; k < req->insts(0).tensor_array(i).lod_size(); ++k) { + lod_tensor.lod[0].push_back(req->insts(0).tensor_array(i).lod(k)); + } + capacity[i] = 1; + for (int k = 0; k < req->insts(0).tensor_array(i).shape_size(); ++k) { + int dim = req->insts(0).tensor_array(i).shape(k); + VLOG(2) << "(logid=" << log_id << ") shape for var[" << i + << "]: " << dim; + capacity[i] *= dim; + lod_tensor.shape.push_back(dim); + } + VLOG(2) << "(logid=" << log_id << ") var[" << i + << "] is tensor, capacity: " << capacity[i]; } else { - lod_tensor.shape.push_back(batch_size); capacity[i] = 1; for (int k = 0; k < req->insts(0).tensor_array(i).shape_size(); ++k) { int dim = req->insts(0).tensor_array(i).shape(k); @@ -160,51 +166,40 @@ int GeneralReaderOp::inference() { lod_tensor.name = model_config->_feed_name[i]; out->push_back(lod_tensor); } - // specify the memory needed for output tensor_vector for (int i = 0; i < var_num; ++i) { if (out->at(i).lod.size() == 1) { int tensor_size = 0; - for (int j = 0; j < batch_size; ++j) { - const Tensor &tensor = req->insts(j).tensor_array(i); - int data_len = 0; - if (tensor.int64_data_size() > 0) { - data_len = tensor.int64_data_size(); - } else if (tensor.float_data_size() > 0) { - data_len = tensor.float_data_size(); - } else if (tensor.int_data_size() > 0) { - data_len = tensor.int_data_size(); - } - VLOG(2) << "(logid=" << log_id << ") tensor size for var[" << i - << "]: " << data_len; - tensor_size += data_len; - - int cur_len = out->at(i).lod[0].back(); - VLOG(2) << "(logid=" << log_id << ") current len: " << cur_len; - - int sample_len = 0; - if (tensor.shape_size() == 1) { - sample_len = data_len; - } else { - sample_len = tensor.shape(0); - } - out->at(i).lod[0].push_back(cur_len + sample_len); - VLOG(2) << "(logid=" << log_id << ") new len: " << cur_len + sample_len; - } - out->at(i).data.Resize(tensor_size * elem_size[i]); - out->at(i).shape = {out->at(i).lod[0].back()}; - for (int j = 1; j < req->insts(0).tensor_array(i).shape_size(); ++j) { - out->at(i).shape.push_back(req->insts(0).tensor_array(i).shape(j)); + const Tensor &tensor = req->insts(0).tensor_array(i); + int data_len = 0; + if (tensor.int64_data_size() > 0) { + data_len = tensor.int64_data_size(); + } else if (tensor.float_data_size() > 0) { + data_len = tensor.float_data_size(); + } else if (tensor.int_data_size() > 0) { + data_len = tensor.int_data_size(); } - if (out->at(i).shape.size() == 1) { - out->at(i).shape.push_back(1); + VLOG(2) << "(logid=" << log_id << ") tensor size for var[" << i + << "]: " << data_len; + tensor_size += data_len; + + int cur_len = out->at(i).lod[0].back(); + VLOG(2) << "(logid=" << log_id << ") current len: " << cur_len; + + int sample_len = 0; + if (tensor.shape_size() == 1) { + sample_len = data_len; + } else { + sample_len = tensor.shape(0); } + VLOG(2) << "(logid=" << log_id << ") new len: " << cur_len + sample_len; + out->at(i).data.Resize(tensor_size * elem_size[i]); VLOG(2) << "(logid=" << log_id << ") var[" << i << "] is lod_tensor and len=" << out->at(i).lod[0].back(); } else { - out->at(i).data.Resize(batch_size * capacity[i] * elem_size[i]); + out->at(i).data.Resize(capacity[i] * elem_size[i]); VLOG(2) << "(logid=" << log_id << ") var[" << i - << "] is tensor and capacity=" << batch_size * capacity[i]; + << "] is tensor and capacity=" << capacity[i]; } } @@ -215,58 +210,36 @@ int GeneralReaderOp::inference() { VLOG(2) << "(logid=" << log_id << ") first element data in var[" << i << "] is " << req->insts(0).tensor_array(i).int64_data(0); int offset = 0; - for (int j = 0; j < batch_size; ++j) { - int elem_num = req->insts(j).tensor_array(i).int64_data_size(); - for (int k = 0; k < elem_num; ++k) { - dst_ptr[offset + k] = req->insts(j).tensor_array(i).int64_data(k); - } - if (out->at(i).lod.size() == 1) { - offset = out->at(i).lod[0][j + 1]; - } else { - offset += capacity[i]; - } + int elem_num = req->insts(0).tensor_array(i).int64_data_size(); + for (int k = 0; k < elem_num; ++k) { + dst_ptr[offset + k] = req->insts(0).tensor_array(i).int64_data(k); } } else if (elem_type[i] == 1) { float *dst_ptr = static_cast(out->at(i).data.data()); VLOG(2) << "(logid=" << log_id << ") first element data in var[" << i << "] is " << req->insts(0).tensor_array(i).float_data(0); int offset = 0; - for (int j = 0; j < batch_size; ++j) { - int elem_num = req->insts(j).tensor_array(i).float_data_size(); - for (int k = 0; k < elem_num; ++k) { - dst_ptr[offset + k] = req->insts(j).tensor_array(i).float_data(k); - } - if (out->at(i).lod.size() == 1) { - offset = out->at(i).lod[0][j + 1]; - } else { - offset += capacity[i]; - } + int elem_num = req->insts(0).tensor_array(i).float_data_size(); + for (int k = 0; k < elem_num; ++k) { + dst_ptr[offset + k] = req->insts(0).tensor_array(i).float_data(k); } } else if (elem_type[i] == 2) { int32_t *dst_ptr = static_cast(out->at(i).data.data()); VLOG(2) << "(logid=" << log_id << ") first element data in var[" << i << "] is " << req->insts(0).tensor_array(i).int_data(0); int offset = 0; - for (int j = 0; j < batch_size; ++j) { - int elem_num = req->insts(j).tensor_array(i).int_data_size(); - for (int k = 0; k < elem_num; ++k) { - dst_ptr[offset + k] = req->insts(j).tensor_array(i).int_data(k); - } - if (out->at(i).lod.size() == 1) { - offset = out->at(i).lod[0][j + 1]; - } else { - offset += capacity[i]; - } + int elem_num = req->insts(0).tensor_array(i).int_data_size(); + for (int k = 0; k < elem_num; ++k) { + dst_ptr[offset + k] = req->insts(0).tensor_array(i).int_data(k); } } } VLOG(2) << "(logid=" << log_id << ") output size: " << out->size(); - timeline.Pause(); int64_t end = timeline.TimeStampUS(); res->p_size = 0; - res->_batch_size = batch_size; + res->_batch_size = 1; AddBlobInfo(res, start); AddBlobInfo(res, end); diff --git a/core/predictor/CMakeLists.txt b/core/predictor/CMakeLists.txt index 6b5013c3edadb4592df40db539fa75fb9364d02f..637c7c15530273bc908ec2f8693a3d66989eebd2 100644 --- a/core/predictor/CMakeLists.txt +++ b/core/predictor/CMakeLists.txt @@ -13,7 +13,9 @@ set_source_files_properties( PROPERTIES COMPILE_FLAGS "-Wno-strict-aliasing -Wno-unused-variable -Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") add_dependencies(pdserving protobuf boost brpc leveldb pdcodegen configure) - +if (WITH_TRT) + add_definitions(-DWITH_TRT) +endif() target_link_libraries(pdserving brpc protobuf boost leveldb configure -lpthread -lcrypto -lm -lrt -lssl -ldl -lz) diff --git a/core/predictor/framework/infer.h b/core/predictor/framework/infer.h index 1cff7647e2dbbcc8df4d144f81488fde35aeb798..431bc456326c1714dce48e2f6321bf58f3e021ce 100644 --- a/core/predictor/framework/infer.h +++ b/core/predictor/framework/infer.h @@ -38,6 +38,7 @@ class InferEngineCreationParams { _enable_ir_optimization = false; _static_optimization = false; _force_update_static_cache = false; + _use_trt = false; } void set_path(const std::string& path) { _path = path; } @@ -50,12 +51,16 @@ class InferEngineCreationParams { _enable_ir_optimization = enable_ir_optimization; } + void set_use_trt(bool use_trt) { _use_trt = use_trt; } + bool enable_memory_optimization() const { return _enable_memory_optimization; } bool enable_ir_optimization() const { return _enable_ir_optimization; } + bool use_trt() const { return _use_trt; } + void set_static_optimization(bool static_optimization = false) { _static_optimization = static_optimization; } @@ -86,6 +91,7 @@ class InferEngineCreationParams { bool _enable_ir_optimization; bool _static_optimization; bool _force_update_static_cache; + bool _use_trt; }; class InferEngine { @@ -172,6 +178,10 @@ class ReloadableInferEngine : public InferEngine { force_update_static_cache); } + if (conf.has_use_trt()) { + _infer_engine_params.set_use_trt(conf.use_trt()); + } + if (!check_need_reload() || load(_infer_engine_params) != 0) { LOG(ERROR) << "Failed load model_data_path" << _model_data_path; return -1; @@ -553,8 +563,12 @@ class CloneDBReloadableInferEngine }; template +#ifdef WITH_TRT +class FluidInferEngine : public DBReloadableInferEngine { +#else class FluidInferEngine : public CloneDBReloadableInferEngine { - public: +#endif + public: // NOLINT FluidInferEngine() {} ~FluidInferEngine() {} diff --git a/core/sdk-cpp/include/abtest.h b/core/sdk-cpp/include/abtest.h index a4275fcf52413bb85bcca1fcb470ea2360fdf174..47a502745ae8aa6297729a0a3695600402cf5cfe 100644 --- a/core/sdk-cpp/include/abtest.h +++ b/core/sdk-cpp/include/abtest.h @@ -51,8 +51,8 @@ class WeightedRandomRender : public EndpointRouterBase { new (std::nothrow) Factory(); if (factory == NULL) { RAW_LOG(ERROR, - "Failed regist factory: WeightedRandomRender->EndpointRouterBase \ - in macro!"); + "Failed regist factory: WeightedRandomRender->EndpointRouterBase " + "in macro!"); return -1; } @@ -63,8 +63,8 @@ class WeightedRandomRender : public EndpointRouterBase { if (FactoryPool::instance().register_factory( "WeightedRandomRender", factory) != 0) { RAW_LOG(INFO, - "Factory has been registed: \ - WeightedRandomRender->EndpointRouterBase."); + "Factory has been registed: " + "WeightedRandomRender->EndpointRouterBase."); } return 0; diff --git a/doc/COMPILE.md b/doc/COMPILE.md index abb66084ac6f6c57c13c940eb10a87e2aba2daa2..cf0bfdf2593ff0274e4bec20d3b1524f2e61241a 100644 --- a/doc/COMPILE.md +++ b/doc/COMPILE.md @@ -75,10 +75,12 @@ export PATH=$PATH:$GOPATH/bin ## Get go packages ```shell -go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway -go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger -go get -u github.com/golang/protobuf/protoc-gen-go -go get -u google.golang.org/grpc +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.cn,direct +go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@v1.15.2 +go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@v1.15.2 +go get -u github.com/golang/protobuf/protoc-gen-go@v1.4.3 +go get -u google.golang.org/grpc@v1.33.0 ``` @@ -89,9 +91,9 @@ go get -u google.golang.org/grpc ``` shell mkdir server-build-cpu && cd server-build-cpu cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ - -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ - -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ - -DSERVER=ON .. + -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ + -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ + -DSERVER=ON .. make -j10 ``` @@ -102,10 +104,28 @@ you can execute `make install` to put targets under directory `./output`, you ne ``` shell mkdir server-build-gpu && cd server-build-gpu cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ - -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ - -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ - -DSERVER=ON \ - -DWITH_GPU=ON .. + -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ + -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ + -DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \ + -DCUDNN_LIBRARY=${CUDNN_LIBRARY} \ + -DSERVER=ON \ + -DWITH_GPU=ON .. +make -j10 +``` + +### Integrated TRT version paddle inference library + +``` +mkdir server-build-trt && cd server-build-trt +cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ + -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ + -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ + -DTENSORRT_ROOT=${TENSORRT_LIBRARY_PATH} \ + -DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \ + -DCUDNN_LIBRARY=${CUDNN_LIBRARY} \ + -DSERVER=ON \ + -DWITH_GPU=ON \ + -DWITH_TRT=ON .. make -j10 ``` @@ -134,7 +154,10 @@ execute `make install` to put targets under directory `./output` ```bash mkdir app-build && cd app-build -cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DAPP=ON .. +cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ + -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ + -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ + -DAPP=ON .. make ``` @@ -165,7 +188,9 @@ Please use the example under `python/examples` to verify. | WITH_AVX | Compile Paddle Serving with AVX intrinsics | OFF | | WITH_MKL | Compile Paddle Serving with MKL support | OFF | | WITH_GPU | Compile Paddle Serving with NVIDIA GPU | OFF | -| CUDNN_ROOT | Define CuDNN library and header path | | +| CUDNN_LIBRARY | Define CuDNN library and header path | | +| CUDA_TOOLKIT_ROOT_DIR | Define CUDA PATH | | +| TENSORRT_ROOT | Define TensorRT PATH | | | CLIENT | Compile Paddle Serving Client | OFF | | SERVER | Compile Paddle Serving Server | OFF | | APP | Compile Paddle Serving App package | OFF | @@ -180,7 +205,8 @@ To compile the Paddle Serving GPU version on bare metal, you need to install the - CUDA - CuDNN -- NCCL2 + +To compile the TensorRT version, you need to install the TensorRT library. Note here: @@ -190,21 +216,12 @@ Note here: The following is the base library version matching relationship used by the PaddlePaddle release version for reference: -| | CUDA | CuDNN | NCCL2 | -| :----: | :-----: | :----------------------: | :----: | -| CUDA 8 | 8.0.61 | CuDNN 7.1.2 for CUDA 8.0 | 2.1.4 | -| CUDA 9 | 9.0.176 | CuDNN 7.3.1 for CUDA 9.0 | 2.2.12 | +| | CUDA | CuDNN | TensorRT | +| :----: | :-----: | :----------------------: | :----: | +| post9 | 9.0 | CuDNN 7.3.1 for CUDA 9.0 | | +| post10 | 10.0 | CuDNN 7.5.1 for CUDA 10.0| | +| trt | 10.1 | CuDNN 7.5.1 for CUDA 10.1| 6.0.1.5 | ### How to make the compiler detect the CuDNN library Download the corresponding CUDNN version from NVIDIA developer official website and decompressing it, add `-DCUDNN_ROOT` to cmake command, to specify the path of CUDNN. - -### How to make the compiler detect the nccl library - -After downloading the corresponding version of the nccl2 library from the NVIDIA developer official website and decompressing it, add the following environment variables (take nccl2.1.4 as an example): - -```shell -export C_INCLUDE_PATH=/path/to/nccl2/cuda8/nccl_2.1.4-1+cuda8.0_x86_64/include:$C_INCLUDE_PATH -export CPLUS_INCLUDE_PATH=/path/to/nccl2/cuda8/nccl_2.1.4-1+cuda8.0_x86_64/include:$CPLUS_INCLUDE_PATH -export LD_LIBRARY_PATH=/path/to/nccl2/cuda8/nccl_2.1.4-1+cuda8.0_x86_64/lib/:$LD_LIBRARY_PATH -``` diff --git a/doc/COMPILE_CN.md b/doc/COMPILE_CN.md index 2ddaaf71f23b0199c7458d068139a6b7169c25d8..b3619d9a38e967a139f850e7a605f713b1a57f95 100644 --- a/doc/COMPILE_CN.md +++ b/doc/COMPILE_CN.md @@ -72,10 +72,12 @@ export PATH=$PATH:$GOPATH/bin ## 获取 Go packages ```shell -go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway -go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger -go get -u github.com/golang/protobuf/protoc-gen-go -go get -u google.golang.org/grpc +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.cn,direct +go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@v1.15.2 +go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@v1.15.2 +go get -u github.com/golang/protobuf/protoc-gen-go@v1.4.3 +go get -u google.golang.org/grpc@v1.33.0 ``` @@ -85,7 +87,10 @@ go get -u google.golang.org/grpc ``` shell mkdir server-build-cpu && cd server-build-cpu -cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DSERVER=ON .. +cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ + -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ + -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ + -DSERVER=ON .. make -j10 ``` @@ -95,21 +100,44 @@ make -j10 ``` shell mkdir server-build-gpu && cd server-build-gpu -cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DSERVER=ON -DWITH_GPU=ON .. +cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ + -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ + -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ + -DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \ + -DCUDNN_LIBRARY=${CUDNN_LIBRARY} \ + -DSERVER=ON \ + -DWITH_GPU=ON .. make -j10 ``` -执行`make install`可以把目标产出放在`./output`目录下。 +### 集成TensorRT版本Paddle Inference Library -**注意:** 编译成功后,需要设置`SERVING_BIN`路径,详见后面的[注意事项](https://github.com/PaddlePaddle/Serving/blob/develop/doc/COMPILE_CN.md#注意事项)。 +``` +mkdir server-build-trt && cd server-build-trt +cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ + -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ + -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ + -DTENSORRT_ROOT=${TENSORRT_LIBRARY_PATH} \ + -DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \ + -DCUDNN_LIBRARY=${CUDNN_LIBRARY} \ + -DSERVER=ON \ + -DWITH_GPU=ON \ + -DWITH_TRT=ON .. +make -j10 +``` +执行`make install`可以把目标产出放在`./output`目录下。 +**注意:** 编译成功后,需要设置`SERVING_BIN`路径,详见后面的[注意事项](https://github.com/PaddlePaddle/Serving/blob/develop/doc/COMPILE_CN.md#注意事项)。 ## 编译Client部分 ``` shell mkdir client-build && cd client-build -cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DCLIENT=ON .. +cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ + -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ + -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ + -DCLIENT=ON .. make -j10 ``` @@ -121,7 +149,11 @@ make -j10 ```bash mkdir app-build && cd app-build -cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DCMAKE_INSTALL_PREFIX=./output -DAPP=ON .. +cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ + -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ + -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ + -DCMAKE_INSTALL_PREFIX=./output \ + -DAPP=ON .. make ``` @@ -152,7 +184,10 @@ make | WITH_AVX | Compile Paddle Serving with AVX intrinsics | OFF | | WITH_MKL | Compile Paddle Serving with MKL support | OFF | | WITH_GPU | Compile Paddle Serving with NVIDIA GPU | OFF | -| CUDNN_ROOT | Define CuDNN library and header path | | +| WITH_TRT | Compile Paddle Serving with TensorRT | OFF | +| CUDNN_LIBRARY | Define CuDNN library and header path | | +| CUDA_TOOLKIT_ROOT_DIR | Define CUDA PATH | | +| TENSORRT_ROOT | Define TensorRT PATH | | | CLIENT | Compile Paddle Serving Client | OFF | | SERVER | Compile Paddle Serving Server | OFF | | APP | Compile Paddle Serving App package | OFF | @@ -167,7 +202,8 @@ Paddle Serving通过PaddlePaddle预测库支持在GPU上做预测。WITH_GPU选 - CUDA - CuDNN -- NCCL2 + +编译TensorRT版本,需要安装TensorRT库。 这里要注意的是: @@ -176,21 +212,12 @@ Paddle Serving通过PaddlePaddle预测库支持在GPU上做预测。WITH_GPU选 以下是PaddlePaddle发布版本所使用的基础库版本匹配关系,供参考: -| | CUDA | CuDNN | NCCL2 | -| :----: | :-----: | :----------------------: | :----: | -| CUDA 8 | 8.0.61 | CuDNN 7.1.2 for CUDA 8.0 | 2.1.4 | -| CUDA 9 | 9.0.176 | CuDNN 7.3.1 for CUDA 9.0 | 2.2.12 | +| | CUDA | CuDNN | TensorRT | +| :----: | :-----: | :----------------------: | :----: | +| post9 | 9.0 | CuDNN 7.3.1 for CUDA 9.0 | | +| post10 | 10.0 | CuDNN 7.5.1 for CUDA 10.0| | +| trt | 10.1 | CuDNN 7.5.1 for CUDA 10.1| 6.0.1.5 | ### 如何让Paddle Serving编译系统探测到CuDNN库 -从NVIDIA developer官网下载对应版本CuDNN并在本地解压后,在cmake编译命令中增加`-DCUDNN_ROOT`参数,指定CuDNN库所在路径。 - -### 如何让Paddle Serving编译系统探测到nccl库 - -从NVIDIA developer官网下载对应版本nccl2库并解压后,增加如下环境变量 (以nccl2.1.4为例): - -```shell -export C_INCLUDE_PATH=/path/to/nccl2/cuda8/nccl_2.1.4-1+cuda8.0_x86_64/include:$C_INCLUDE_PATH -export CPLUS_INCLUDE_PATH=/path/to/nccl2/cuda8/nccl_2.1.4-1+cuda8.0_x86_64/include:$CPLUS_INCLUDE_PATH -export LD_LIBRARY_PATH=/path/to/nccl2/cuda8/nccl_2.1.4-1+cuda8.0_x86_64/lib/:$LD_LIBRARY_PATH -``` +从NVIDIA developer官网下载对应版本CuDNN并在本地解压后,在cmake编译命令中增加`-DCUDNN_LIBRARY`参数,指定CuDNN库所在路径。 diff --git a/doc/FAQ.md b/doc/FAQ.md index daf5cb8613d0754a966ce09f1a8f7dac5cfd7a78..0dc4ed35a55e5904adbd1b924441aa21bc5436ab 100644 --- a/doc/FAQ.md +++ b/doc/FAQ.md @@ -1,39 +1,172 @@ # FAQ -- Q: 如何调整RPC服务的等待时间,避免超时? - A: 使用set_rpc_timeout_ms设置更长的等待时间,单位为毫秒,默认时间为20秒。 - - 示例: - ``` - from paddle_serving_client import Client - client = Client() - client.load_client_config(sys.argv[1]) - client.set_rpc_timeout_ms(100000) - client.connect(["127.0.0.1:9393"]) - ``` +## 基础知识 -- Q: 如何使用自己编译的Paddle Serving进行预测? +#### Q: Paddle Serving 、Paddle Inference、PaddleHub Serving三者的区别及联系? - A: 通过pip命令安装自己编译出的whl包,并设置SERVING_BIN环境变量为编译出的serving二进制文件路径。 +**A:** paddle serving是远程服务,即发起预测的设备(手机、浏览器、客户端等)与实际预测的硬件不在一起。 paddle inference是一个library,适合嵌入到一个大系统中保证预测效率,paddle serving调用了paddle inference做远程服务。paddlehub serving可以认为是一个示例,都会使用paddle serving作为统一预测服务入口。如果在web端交互,一般是调用远程服务的形式,可以使用paddle serving的web service搭建。 -- Q: 执行GPU预测时遇到InvalidArgumentError: Device id must be less than GPU count, but received id is: 0. GPU count is: 0. +#### Q: paddle-serving是否支持Int32支持 - A: 将显卡驱动对应的libcuda.so的目录添加到LD_LIBRARY_PATH环境变量中 +**A:** 在protobuf定feed_type和fetch_type编号与数据类型对应如下 -- Q: 执行GPU预测时遇到ExternalError: Cudnn error, CUDNN_STATUS_BAD_PARAM at (/home/scmbuild/workspaces_cluster.dev/baidu.lib.paddlepaddle/baidu/lib/paddlepaddle/Paddle/paddle/fluid/operators/batch_norm_op.cu:198) +​ 0-int64 - A: 将cudnn的lib64路径添加到LD_LIBRARY_PATH,安装自pypi的Paddle Serving中post9版使用的是cudnn 7.3,post10使用的是cudnn 7.5。如果是使用自己编译的Paddle Serving,可以在log/serving.INFO日志文件中查看对应的cudnn版本。 +​ 1-float32 -- Q: 执行GPU预测时遇到Error: Failed to find dynamic library: libcublas.so +​ 2-int32 - A: 将cuda的lib64路径添加到LD_LIBRARY_PATH, post9版本的Paddle Serving使用的是cuda 9.0,post10版本使用的cuda 10.0。 +#### Q: paddle-serving是否支持windows和Linux环境下的多线程调用 -- Q: 部署和预测中的日志信息在哪里查看? +**A:** 客户端可以发起多线程访问调用服务端 -- A: server端的日志分为两部分,一部分打印到标准输出,一部分打印到启动服务时的目录下的log/serving.INFO文件中。 +#### Q: paddle-serving如何修改消息大小限制 - client端的日志直接打印到标准输出。 +**A:** 在server端和client但通过FLAGS_max_body_size来扩大数据量限制,单位为字节,默认为64MB - 通过在部署服务之前 'export GLOG_v=3'可以输出更为详细的日志信息。 +#### Q: paddle-serving客户端目前支持哪些语言 + +**A:** java c++ python + +#### Q: paddle-serving目前支持哪些协议 + +**A:** http rpc + + +## 编译问题 + +#### Q: 如何使用自己编译的Paddle Serving进行预测? + +**A:** 通过pip命令安装自己编译出的whl包,并设置SERVING_BIN环境变量为编译出的serving二进制文件路径。 + +#### Q: 使用Java客户端,mvn compile过程出现"No compiler is provided in this environment. Perhaps you are running on a JRE rather than a JDK?"错误 + +**A:** 没有安装JDK,或者JAVA_HOME路径配置错误(正确配置是JDK路径,常见错误配置成JRE路径,例如正确路径参考JAVA_HOME="/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.262.b10-0.el7_8.x86_64/")。Java JDK安装参考https://segmentfault.com/a/1190000015389941 + + + +## 部署问题 + +#### Q: GPU环境运行Serving报错,GPU count is: 0。 + +``` +terminate called after throwing an instance of 'paddle::platform::EnforceNotMet' +what(): +-------------------------------------------- +C++ Call Stacks (More useful to developers): +-------------------------------------------- +0 std::string paddle::platform::GetTraceBackString(std::string const&, char const*, int) +1 paddle::platform::SetDeviceId(int) +2 paddle::AnalysisConfig::fraction_of_gpu_memory_for_pool() const +3 std::unique_ptr > paddle::CreatePaddlePredictor(paddle::AnalysisConfig const&) +4 std::unique_ptr > paddle::CreatePaddlePredictor(paddle::AnalysisConfig const&) +---------------------- +Error Message Summary: +---------------------- +InvalidArgumentError: Device id must be less than GPU count, but received id is: 0. GPU count is: 0. +[Hint: Expected id < GetCUDADeviceCount(), but received id:0 >= GetCUDADeviceCount():0.] at (/home/scmbuild/workspaces_cluster.dev/baidu.lib.paddlepaddle/baidu/lib/paddlepaddle/Paddle/paddle/fluid/platform/gpu_info.cc:211) +``` + +**A:** libcuda.so没有链接成功。首先在机器上找到libcuda.so,ldd检查libnvidia版本与nvidia-smi中版本一致(libnvidia-fatbinaryloader.so.418.39,与NVIDIA-SMI 418.39 Driver Version: 418.39),然后用export导出libcuda.so的路径即可(例如libcuda.so在/usr/lib64/,export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib64/) + +#### Q: 遇到 GPU not found, please check your environment or use cpu version by "pip install paddle_serving_server" + +**A:** 检查环境中是否有N卡:ls /dev/ | grep nvidia + +#### Q: 目前Paddle Serving支持哪些镜像环境? + +**A:** 目前(0.4.0)仅支持CentOS,具体列表查阅[这里](https://github.com/PaddlePaddle/Serving/blob/develop/doc/DOCKER_IMAGES.md) + +#### Q: python编译的GCC版本与serving的版本不匹配 + +**A:**:1)使用[GPU docker](https://github.com/PaddlePaddle/Serving/blob/develop/doc/RUN_IN_DOCKER.md#gpunvidia-docker)解决环境问题 + +​ 2)修改anaconda的虚拟环境下安装的python的gcc版本[参考](https://www.jianshu.com/p/c498b3d86f77) + +#### Q: paddle-serving是否支持本地离线安装 + +**A:** 支持离线部署,需要把一些相关的[依赖包](https://github.com/PaddlePaddle/Serving/blob/develop/doc/COMPILE.md)提前准备安装好 + +## 预测问题 + +#### Q: 使用GPU第一次预测时特别慢,如何调整RPC服务的等待时间避免超时? + +**A:** GPU第一次预测需要初始化。使用set_rpc_timeout_ms设置更长的等待时间,单位为毫秒,默认时间为20秒。 + +示例: + +``` +from paddle_serving_client import Client + +client = Client() +client.load_client_config(sys.argv[1]) +client.set_rpc_timeout_ms(100000) +client.connect(["127.0.0.1:9393"]) +``` + +#### Q: 执行GPU预测时遇到InvalidArgumentError: Device id must be less than GPU count, but received id is: 0. GPU count is: 0. + +**A:** 将显卡驱动对应的libcuda.so的目录添加到LD_LIBRARY_PATH环境变量中 + +#### Q: 执行GPU预测时遇到ExternalError: Cudnn error, CUDNN_STATUS_BAD_PARAM at (../batch_norm_op.cu:198) + +**A:** 将cudnn的lib64路径添加到LD_LIBRARY_PATH,安装自pypi的Paddle Serving中post9版使用的是cudnn 7.3,post10使用的是cudnn 7.5。如果是使用自己编译的Paddle Serving,可以在log/serving.INFO日志文件中查看对应的cudnn版本。 + +#### Q: 执行GPU预测时遇到Error: Failed to find dynamic library: libcublas.so + +**A:** 将cuda的lib64路径添加到LD_LIBRARY_PATH, post9版本的Paddle Serving使用的是cuda 9.0,post10版本使用的cuda 10.0。 + +#### Q: Client端fetch的变量名如何设置 + +**A:** 可以查看配置文件serving_server_conf.prototxt,获取需要的变量名 + +#### Q: 如何使用多语言客户端 + +**A:** 多语言客户端要与多语言服务端配套使用。当前版本下(0.4.0),服务端需要将Server改为MultiLangServer(如果是以命令行启动的话只需要添加--use_multilang参数),Python客户端需要将Client改为MultiLangClient,同时去除load_client_config的过程。[Java客户端参考文档](https://github.com/PaddlePaddle/Serving/blob/develop/doc/JAVA_SDK_CN.md) + +#### Q: 如何在Windows下使用Paddle Serving + +**A:** 当前版本(0.4.0)在Windows上可以运行多语言RPC客户端,或使用HTTP方式访问。如果使用多语言RPC客户端,需要在Linux环境(比如本机容器,或远程Linux机器)中运行多语言服务端;如果使用HTTP方式,需要在Linux环境中运行普通服务端 + +#### Q: libnvinfer.so: cannot open shared object file: No such file or directory) + + **A:** 参考该文档安装TensorRT: https://blog.csdn.net/hesongzefairy/article/details/105343525 + + + +## 日志排查 + +#### Q: 部署和预测中的日志信息在哪里查看? + +**A:** server端的日志分为两部分,一部分打印到标准输出,一部分打印到启动服务时的目录下的log/serving.INFO文件中。 + +client端的日志直接打印到标准输出。 + +通过在部署服务之前 'export GLOG_v=3'可以输出更为详细的日志信息。 + +#### Q: paddle-serving启动成功后,相关的日志在哪里设置 + +**A:** 1)警告是glog组件打印的,告知glog初始化之前日志打印在STDERR + +​ 2)一般采用GLOG_v方式启动服务同时设置日志级别。 + +例如: +``` +GLOG_v=2 python -m paddle_serving_server.serve --model xxx_conf/ --port 9999 +``` + + + +#### Q: (GLOG_v=2下)Server端日志一切正常,但Client端始终得不到正确的预测结果 + +**A:** 可能是配置文件有问题,检查下配置文件(is_load_tensor,fetch_type等有没有问题) + +#### Q: 如何给Server传递Logid + +**A:** Logid默认为0(后续应该有自动生成Logid的计划,当前版本0.4.0),Client端通过在predict函数中指定log_id参数传递 + + + +## 性能优化 diff --git a/doc/GRPC_IMPL_CN.md b/doc/GRPC_IMPL_CN.md index 7b10907caec98ae5754126a7ec54096cc4cd48af..9e7ecd268fe0900c1085479c1f96fa083629758c 100644 --- a/doc/GRPC_IMPL_CN.md +++ b/doc/GRPC_IMPL_CN.md @@ -1,52 +1,137 @@ -# gRPC接口 +# gRPC接口使用介绍 + + - [1.与bRPC接口对比](#1与brpc接口对比) + - [1.1 服务端对比](#11-服务端对比) + - [1.2 客服端对比](#12-客服端对比) + - [1.3 其他](#13-其他) + - [2.示例:线性回归预测服务](#2示例线性回归预测服务) + - [获取数据](#获取数据) + - [开启 gRPC 服务端](#开启-grpc-服务端) + - [客户端预测](#客户端预测) + - [同步预测](#同步预测) + - [异步预测](#异步预测) + - [Batch 预测](#batch-预测) + - [通用 pb 预测](#通用-pb-预测) + - [预测超时](#预测超时) + - [List 输入](#list-输入) + - [3.更多示例](#3更多示例) + +使用gRPC接口,Client端可以在Win/Linux/MacOS平台上调用不同语言。gRPC 接口实现结构如下: + +![](https://github.com/PaddlePaddle/Serving/blob/develop/doc/grpc_impl.png) + +## 1.与bRPC接口对比 + +#### 1.1 服务端对比 + +* gRPC Server 端 `load_model_config` 函数添加 `client_config_path` 参数: -gRPC 接口实现形式类似 Web Service: - -![](grpc_impl.png) - -## 与bRPC接口对比 - -1. gRPC Server 端 `load_model_config` 函数添加 `client_config_path` 参数: - - ```python + ``` def load_model_config(self, server_config_paths, client_config_path=None) ``` + 在一些例子中 bRPC Server 端与 bRPC Client 端的配置文件可能不同(如 在cube local 中,Client 端的数据先交给 cube,经过 cube 处理后再交给预测库),此时 gRPC Server 端需要手动设置 gRPC Client 端的配置`client_config_path`。 + **`client_config_path` 默认为 `/serving_server_conf.prototxt`。** - 在一些例子中 bRPC Server 端与 bRPC Client 端的配置文件可能是不同的(如 cube local 例子中,Client 端的数据先交给 cube,经过 cube 处理后再交给预测库),所以 gRPC Server 端需要获取 gRPC Client 端的配置;同时为了取消 gRPC Client 端手动加载配置文件的过程,所以设计 gRPC Server 端同时加载两个配置文件。`client_config_path` 默认为 `/serving_server_conf.prototxt`。 +#### 1.2 客服端对比 -2. gRPC Client 端取消 `load_client_config` 步骤: +* gRPC Client 端取消 `load_client_config` 步骤: 在 `connect` 步骤通过 RPC 获取相应的 prototxt(从任意一个 endpoint 获取即可)。 -3. gRPC Client 需要通过 RPC 方式设置 timeout 时间(调用形式与 bRPC Client保持一致) +* gRPC Client 需要通过 RPC 方式设置 timeout 时间(调用形式与 bRPC Client保持一致) 因为 bRPC Client 在 `connect` 后无法更改 timeout 时间,所以当 gRPC Server 收到变更 timeout 的调用请求时会重新创建 bRPC Client 实例以变更 bRPC Client timeout时间,同时 gRPC Client 会设置 gRPC 的 deadline 时间。 **注意,设置 timeout 接口和 Inference 接口不能同时调用(非线程安全),出于性能考虑暂时不加锁。** -4. gRPC Client 端 `predict` 函数添加 `asyn` 和 `is_python` 参数: +* gRPC Client 端 `predict` 函数添加 `asyn` 和 `is_python` 参数: - ```python + ``` def predict(self, feed, fetch, need_variant_tag=False, asyn=False, is_python=True) ``` - 其中,`asyn` 为异步调用选项。当 `asyn=True` 时为异步调用,返回 `MultiLangPredictFuture` 对象,通过 `MultiLangPredictFuture.result()` 阻塞获取预测值;当 `asyn=Fasle` 为同步调用。 +1. `asyn` 为异步调用选项。当 `asyn=True` 时为异步调用,返回 `MultiLangPredictFuture` 对象,通过 `MultiLangPredictFuture.result()` 阻塞获取预测值;当 `asyn=Fasle` 为同步调用。 + +2. `is_python` 为 proto 格式选项。当 `is_python=True` 时,基于 numpy bytes 格式进行数据传输,目前只适用于 Python;当 `is_python=False` 时,以普通数据格式传输,更加通用。使用 numpy bytes 格式传输耗时比普通数据格式小很多(详见 [#654](https://github.com/PaddlePaddle/Serving/pull/654))。 + +#### 1.3 其他 + +* 异常处理:当 gRPC Server 端的 bRPC Client 预测失败(返回 `None`)时,gRPC Client 端同样返回None。其他 gRPC 异常会在 Client 内部捕获,并在返回的 fetch_map 中添加一个 "status_code" 字段来区分是否预测正常(参考 timeout 样例)。 + +* 由于 gRPC 只支持 pick_first 和 round_robin 负载均衡策略,ABTEST 特性还未打齐。 + +* 系统兼容性: + * [x] CentOS + * [x] macOS + * [x] Windows + +* 已经支持的客户端语言: + + - Python + - Java + - Go + + +## 2.示例:线性回归预测服务 + +以下是采用gRPC实现的关于线性回归预测的一个示例,具体代码详见此[链接](https://github.com/PaddlePaddle/Serving/tree/develop/python/examples/grpc_impl_example/fit_a_line) +#### 获取数据 + +```shell +sh get_data.sh +``` + +#### 开启 gRPC 服务端 + +``` shell +python test_server.py uci_housing_model/ +``` + +也可以通过下面的一行代码开启默认 gRPC 服务: + +```shell +python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9393 --use_multilang +``` +注:--use_multilang参数用来启用多语言客户端 + +### 客户端预测 + +#### 同步预测 + +``` shell +python test_sync_client.py +``` + +#### 异步预测 + +``` shell +python test_asyn_client.py +``` + +#### Batch 预测 + +``` shell +python test_batch_client.py +``` - `is_python` 为 proto 格式选项。当 `is_python=True` 时,基于 numpy bytes 格式进行数据传输,目前只适用于 Python;当 `is_python=False` 时,以普通数据格式传输,更加通用。使用 numpy bytes 格式传输耗时比普通数据格式小很多(详见 [#654](https://github.com/PaddlePaddle/Serving/pull/654))。 +#### 通用 pb 预测 -5. 异常处理:当 gRPC Server 端的 bRPC Client 预测失败(返回 `None`)时,gRPC Client 端同样返回None。其他 gRPC 异常会在 Client 内部捕获,并在返回的 fetch_map 中添加一个 "status_code" 字段来区分是否预测正常(参考 timeout 样例)。 +``` shell +python test_general_pb_client.py +``` -6. 由于 gRPC 只支持 pick_first 和 round_robin 负载均衡策略,ABTEST 特性还未打齐。 +#### 预测超时 -7. 经测试,gRPC 版本可以在 Windows、macOS 平台使用。 +``` shell +python test_timeout_client.py +``` -8. 计划支持的客户端语言: +#### List 输入 - - [x] Python - - [ ] Java - - [ ] Go - - [ ] JavaScript +``` shell +python test_list_input_client.py +``` -## Python 端的一些例子 +## 3.更多示例 -详见 `python/examples/grpc_impl_example` 下的示例文件。 +详见[`python/examples/grpc_impl_example`](https://github.com/PaddlePaddle/Serving/tree/develop/python/examples/grpc_impl_example)下的示例文件。 diff --git a/doc/INFERENCE_TO_SERVING.md b/doc/INFERENCE_TO_SERVING.md index e10ee976fb455c8cc49a0d5fa44ed4cc1f300ba9..719aa63c0a9b408d6bff628e7be4f35dfb49c5c8 100644 --- a/doc/INFERENCE_TO_SERVING.md +++ b/doc/INFERENCE_TO_SERVING.md @@ -24,13 +24,13 @@ inference_model_dir = "your_inference_model" serving_client_dir = "serving_client_dir" serving_server_dir = "serving_server_dir" feed_var_names, fetch_var_names = inference_model_to_serving( - inference_model_dir, serving_client_dir, serving_server_dir) + inference_model_dir, serving_server_dir, serving_client_dir) ``` if your model file and params file are both standalone, please use the following api. ``` feed_var_names, fetch_var_names = inference_model_to_serving( - inference_model_dir, serving_client_dir, serving_server_dir, + inference_model_dir, serving_server_dir, serving_client_dir, model_filename="model", params_filename="params") ``` diff --git a/doc/INFERENCE_TO_SERVING_CN.md b/doc/INFERENCE_TO_SERVING_CN.md index e7e909ac04be3b1a0885b3390d99a153dfbd170e..5d783f25a3f367baa94d471e50f227d9e6f733d1 100644 --- a/doc/INFERENCE_TO_SERVING_CN.md +++ b/doc/INFERENCE_TO_SERVING_CN.md @@ -23,11 +23,11 @@ inference_model_dir = "your_inference_model" serving_client_dir = "serving_client_dir" serving_server_dir = "serving_server_dir" feed_var_names, fetch_var_names = inference_model_to_serving( - inference_model_dir, serving_client_dir, serving_server_dir) + inference_model_dir, serving_server_dir, serving_client_dir) ``` 如果模型中有模型描述文件`model_filename` 和 模型参数文件`params_filename`,那么请用 ``` feed_var_names, fetch_var_names = inference_model_to_serving( - inference_model_dir, serving_client_dir, serving_server_dir, + inference_model_dir, serving_server_dir, serving_client_dir, model_filename="model", params_filename="params") ``` diff --git a/doc/LATEST_PACKAGES.md b/doc/LATEST_PACKAGES.md index 247c04c000404944e7021093ff8bf3280c2f2539..dc72421ef5b1766955a67814b83071f591700f9c 100644 --- a/doc/LATEST_PACKAGES.md +++ b/doc/LATEST_PACKAGES.md @@ -18,6 +18,8 @@ https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.0.0-py2-none-an https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post9-py3-none-any.whl #cuda 10.0 https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post10-py3-none-any.whl +#cuda10.1 with TensorRT 6 +https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.trt-py3-none-any.whl ``` ### Python 2 ``` @@ -25,6 +27,8 @@ https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post10- https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post9-py2-none-any.whl #cuda 10.0 https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post10-py2-none-any.whl +##cuda10.1 with TensorRT 6 +https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.trt-py2-none-any.whl ``` ## Client diff --git a/java/README.md b/java/README.md new file mode 100644 index 0000000000000000000000000000000000000000..aac68283ae326923637804b879d93770374571ca --- /dev/null +++ b/java/README.md @@ -0,0 +1,26 @@ +## Java Demo + +### Install package +``` +mvn compile +mvn install +cd examples +mvn compile +mvn install +``` + +### Start Server + +take the fit_a_line demo as example + +``` + python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9393 --use_multilang #CPU +python -m paddle_serving_server_gpu.serve --model uci_housing_model --thread 10 --port 9393 --use_multilang #GPU +``` + +### Client Predict +``` +java -cp paddle-serving-sdk-java-examples-0.0.1-jar-with-dependencies.jar PaddleServingClientExample fit_a_line +``` + +The Java example also contains the prediction client of Bert, Model_enaemble, asyn_predict, batch_predict, Cube_local, Cube_quant, and Yolov4 models. diff --git a/java/README_CN.md b/java/README_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..a068e8ecf47842fa57e41808b66f0a4017148d50 --- /dev/null +++ b/java/README_CN.md @@ -0,0 +1,26 @@ +## Java 示例 + +### 安装客户端依赖 +``` +mvn compile +mvn install +cd examples +mvn compile +mvn install +``` + +### 启动服务端 + +以fit_a_line模型为例 + +``` + python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9393 --use_multilang #CPU +python -m paddle_serving_server_gpu.serve --model uci_housing_model --thread 10 --port 9393 --use_multilang #GPU +``` + +### 客户端预测 +``` +java -cp paddle-serving-sdk-java-examples-0.0.1-jar-with-dependencies.jar PaddleServingClientExample fit_a_line +``` + +java示例中还包含了bert、model_enaemble、asyn_predict、batch_predict、cube_local、cube_quant、yolov4模型的预测客户端。 diff --git a/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h b/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h index f65711e04cf601e40f693b045adbaba0cf7ada71..a4d8dda71a7977185106bb1552cb8f39ef6bc50e 100644 --- a/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h +++ b/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h @@ -23,7 +23,6 @@ #include "core/configure/inferencer_configure.pb.h" #include "core/predictor/framework/infer.h" #include "paddle_inference_api.h" // NOLINT -//#include "predictor/framework/infer.h" namespace baidu { namespace paddle_serving { diff --git a/paddle_inference/inferencer-fluid-gpu/CMakeLists.txt b/paddle_inference/inferencer-fluid-gpu/CMakeLists.txt index 725da85b45ca1070badf5343f340e49dce6b936f..6ba3ddd6ba5d80f7b987b7c0dbbbebfdaaf37e46 100644 --- a/paddle_inference/inferencer-fluid-gpu/CMakeLists.txt +++ b/paddle_inference/inferencer-fluid-gpu/CMakeLists.txt @@ -2,6 +2,7 @@ FILE(GLOB fluid_gpu_engine_srcs ${CMAKE_CURRENT_LIST_DIR}/src/*.cpp) add_library(fluid_gpu_engine ${fluid_gpu_engine_srcs}) target_include_directories(fluid_gpu_engine PUBLIC ${CMAKE_BINARY_DIR}/Paddle/fluid_install_dir/) + add_dependencies(fluid_gpu_engine pdserving extern_paddle configure) target_link_libraries(fluid_gpu_engine pdserving paddle_fluid iomp5 mklml_intel -lpthread -lcrypto -lm -lrt -lssl -ldl -lz) diff --git a/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h b/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h index 2fc6ae587ff26f5f05ff9332f08067ab49d06254..3782c967823d07c23ba02e5ce0f388dc6b46e181 100644 --- a/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h +++ b/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h @@ -190,7 +190,7 @@ class FluidGpuAnalysisDirCore : public FluidFamilyCore { paddle::AnalysisConfig analysis_config; analysis_config.SetModel(data_path); - analysis_config.EnableUseGpu(100, FLAGS_gpuid); + analysis_config.EnableUseGpu(1500, FLAGS_gpuid); analysis_config.SwitchSpecifyInputNames(true); analysis_config.SetCpuMathLibraryNumThreads(1); @@ -198,12 +198,68 @@ class FluidGpuAnalysisDirCore : public FluidFamilyCore { analysis_config.EnableMemoryOptim(); } - if (params.enable_ir_optimization()) { - analysis_config.SwitchIrOptim(true); +#if 0 // todo: support flexible shape + + int min_seq_len = 1; + int max_seq_len = 512; + int opt_seq_len = 128; + int head_number = 12; + int batch = 50; + + std::vector min_in_shape = {batch, min_seq_len, 1}; + std::vector max_in_shape = {batch, max_seq_len, 1}; + std::vector opt_in_shape = {batch, opt_seq_len, 1}; + + std::string input1_name = "src_text_a_ids"; + std::string input2_name = "pos_text_a_ids"; + std::string input3_name = "sent_text_a_ids"; + std::string input4_name = "stack_0.tmp_0"; + + std::map> min_input_shape = { + {input1_name, min_in_shape}, + {input2_name, min_in_shape}, + {input3_name, min_in_shape}, + {input4_name, {batch, head_number, min_seq_len, min_seq_len}}, + }; + + std::map> max_input_shape = { + {input1_name, max_in_shape}, + {input2_name, max_in_shape}, + {input3_name, max_in_shape}, + {input4_name, {batch, head_number, max_seq_len, max_seq_len}}, + }; + std::map> opt_input_shape = { + {input1_name, opt_in_shape}, + {input2_name, opt_in_shape}, + {input3_name, opt_in_shape}, + {input4_name, {batch, head_number, opt_seq_len, opt_seq_len}}, + }; + + analysis_config.SetTRTDynamicShapeInfo( + min_input_shape, max_input_shape, opt_input_shape); +#endif + int max_batch = 32; + int min_subgraph_size = 3; + if (params.use_trt()) { + analysis_config.EnableTensorRtEngine( + 1 << 20, + max_batch, + min_subgraph_size, + paddle::AnalysisConfig::Precision::kFloat32, + false, + false); + LOG(INFO) << "create TensorRT predictor"; } else { - analysis_config.SwitchIrOptim(false); - } + if (params.enable_memory_optimization()) { + analysis_config.EnableMemoryOptim(); + } + if (params.enable_ir_optimization()) { + analysis_config.SwitchIrOptim(true); + } else { + analysis_config.SwitchIrOptim(false); + } + } AutoLock lock(GlobalPaddleCreateMutex::instance()); _core = paddle::CreatePaddlePredictor(analysis_config); diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 4b20cb2001ebb595601f22fa6e4aab8dd5df18f4..23e0b6b507f53f1ab60a32854891b79b377638ce 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -80,6 +80,16 @@ if (SERVER) COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES}) add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) + elseif(WITH_TRT) + add_custom_command( + OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp + COMMAND cp -r + ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_server_gpu/ ${PADDLE_SERVING_BINARY_DIR}/python/ + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py + "server_gpu" trt + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel + DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES}) + add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) else() add_custom_command( OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp diff --git a/python/examples/bert/bert_client.py b/python/examples/bert/bert_client.py index 362ac67915870af9d11209520daa61daa95082c1..4111589b3ddfde980e415fbac1a5f38f4abafada 100644 --- a/python/examples/bert/bert_client.py +++ b/python/examples/bert/bert_client.py @@ -18,16 +18,20 @@ import sys from paddle_serving_client import Client from paddle_serving_client.utils import benchmark_args from paddle_serving_app.reader import ChineseBertReader - +import numpy as np args = benchmark_args() reader = ChineseBertReader({"max_seq_len": 128}) fetch = ["pooled_output"] -endpoint_list = ["127.0.0.1:9292"] +endpoint_list = ['127.0.0.1:9292'] client = Client() client.load_client_config(args.model) client.connect(endpoint_list) for line in sys.stdin: feed_dict = reader.process(line) - result = client.predict(feed=feed_dict, fetch=fetch) + for key in feed_dict.keys(): + feed_dict[key] = np.array(feed_dict[key]).reshape((128, 1)) + #print(feed_dict) + result = client.predict(feed=feed_dict, fetch=fetch, batch=False) +print(result) diff --git a/python/examples/bert/bert_web_service.py b/python/examples/bert/bert_web_service.py index b1898b2cc0ee690dd075958944a56fed27dce29a..7cd34fb99e0ecebbf2f6bec47e9c9d163ac3a44c 100644 --- a/python/examples/bert/bert_web_service.py +++ b/python/examples/bert/bert_web_service.py @@ -13,10 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=doc-string-missing -from paddle_serving_server_gpu.web_service import WebService +from paddle_serving_server.web_service import WebService from paddle_serving_app.reader import ChineseBertReader import sys import os +import numpy as np class BertService(WebService): @@ -27,18 +28,21 @@ class BertService(WebService): }) def preprocess(self, feed=[], fetch=[]): - feed_res = [ - self.reader.process(ins["words"].encode("utf-8")) for ins in feed - ] - return feed_res, fetch + feed_res = [] + is_batch = False + for ins in feed: + feed_dict = self.reader.process(ins["words"].encode("utf-8")) + for key in feed_dict.keys(): + feed_dict[key] = np.array(feed_dict[key]).reshape( + (len(feed_dict[key]), 1)) + feed_res.append(feed_dict) + return feed_res, fetch, is_batch bert_service = BertService(name="bert") bert_service.load() bert_service.load_model_config(sys.argv[1]) -gpu_ids = os.environ["CUDA_VISIBLE_DEVICES"] -bert_service.set_gpus(gpu_ids) bert_service.prepare_server( - workdir="workdir", port=int(sys.argv[2]), device="gpu") + workdir="workdir", port=int(sys.argv[2]), device="cpu") bert_service.run_rpc_service() bert_service.run_web_service() diff --git a/python/examples/bert/test_multi_fetch_client.py b/python/examples/bert/test_multi_fetch_client.py index c15c4d4deaf282c432ff0990ee03c6e80daeee74..1ee540097c32429348fbeb504278fb986bd3a9e7 100644 --- a/python/examples/bert/test_multi_fetch_client.py +++ b/python/examples/bert/test_multi_fetch_client.py @@ -15,6 +15,7 @@ from paddle_serving_client import Client from paddle_serving_app.reader import ChineseBertReader import sys +import numpy as np client = Client() client.load_client_config("./bert_seq32_client/serving_client_conf.prototxt") @@ -28,12 +29,21 @@ expected_shape = { "pooled_output": (4, 768) } batch_size = 4 -feed_batch = [] +feed_batch = {} +batch_len = 0 for line in sys.stdin: feed = reader.process(line) + if batch_len == 0: + for key in feed.keys(): + val_len = len(feed[key]) + feed_batch[key] = np.array(feed[key]).reshape((1, val_len, 1)) + continue if len(feed_batch) < batch_size: - feed_batch.append(feed) + for key in feed.keys(): + np.concatenate([ + feed_batch[key], np.array(feed[key]).reshape((1, val_len, 1)) + ]) else: fetch_map = client.predict(feed=feed_batch, fetch=fetch) feed_batch = [] diff --git a/python/examples/criteo_ctr/test_client.py b/python/examples/criteo_ctr/test_client.py index 2beac850228291c49d56c1180365fdd8e627ffc0..ecb2fc376c0d3a8c7174c9f2ab093b25c8ac4791 100644 --- a/python/examples/criteo_ctr/test_client.py +++ b/python/examples/criteo_ctr/test_client.py @@ -20,7 +20,7 @@ import os import time import criteo_reader as criteo from paddle_serving_client.metric import auc - +import numpy as np import sys py_version = sys.version_info[0] @@ -49,7 +49,8 @@ for ei in range(1000): data = reader().__next__() feed_dict = {} for i in range(1, 27): - feed_dict["sparse_{}".format(i - 1)] = data[0][i] + feed_dict["sparse_{}".format(i - 1)] = np.array(data[0][i]).reshape(-1) + feed_dict["sparse_{}.lod".format(i - 1)] = [0, len(data[0][i])] fetch_map = client.predict(feed=feed_dict, fetch=["prob"]) end = time.time() print(end - start) diff --git a/python/examples/criteo_ctr_with_cube/test_client.py b/python/examples/criteo_ctr_with_cube/test_client.py index 8518db55572196e470da014a02797ae9e200c988..853b8fb5e793d7daeff4703f32c57cb57a9c279c 100755 --- a/python/examples/criteo_ctr_with_cube/test_client.py +++ b/python/examples/criteo_ctr_with_cube/test_client.py @@ -19,6 +19,7 @@ import os import criteo as criteo import time from paddle_serving_client.metric import auc +import numpy as np py_version = sys.version_info[0] @@ -41,10 +42,15 @@ for ei in range(10000): else: data = reader().__next__() feed_dict = {} - feed_dict['dense_input'] = data[0][0] + feed_dict['dense_input'] = np.array(data[0][0]).astype("float32").reshape( + 1, 13) + feed_dict['dense_input.lod'] = [0, 1] for i in range(1, 27): - feed_dict["embedding_{}.tmp_0".format(i - 1)] = data[0][i] - fetch_map = client.predict(feed=feed_dict, fetch=["prob"]) + tmp_data = np.array(data[0][i]).astype(np.int64) + feed_dict["embedding_{}.tmp_0".format(i - 1)] = tmp_data.reshape( + (1, len(data[0][i]))) + feed_dict["embedding_{}.tmp_0.lod".format(i - 1)] = [0, 1] + fetch_map = client.predict(feed=feed_dict, fetch=["prob"], batch=True) prob_list.append(fetch_map['prob'][0][1]) label_list.append(data[0][-1][0]) diff --git a/python/examples/faster_rcnn_model/test_client.py b/python/examples/faster_rcnn_model/test_client.py index ce577a3c4396d33af33e45694a573f8b1cbcb52b..98a1c8f4df087a71891d2a3c89e8fca64f701854 100755 --- a/python/examples/faster_rcnn_model/test_client.py +++ b/python/examples/faster_rcnn_model/test_client.py @@ -36,6 +36,7 @@ fetch_map = client.predict( "im_info": np.array(list(im.shape[1:]) + [1.0]), "im_shape": np.array(list(im.shape[1:]) + [1.0]) }, - fetch=["multiclass_nms"]) + fetch=["multiclass_nms"], + batch=False) fetch_map["image"] = sys.argv[3] postprocess(fetch_map) diff --git a/python/examples/fit_a_line/test_client.py b/python/examples/fit_a_line/test_client.py index 442ed230bc3d75c9ec3b5eac160b3a53ac31cd83..41a037decb6109337bebda4927eba4ea46121b87 100644 --- a/python/examples/fit_a_line/test_client.py +++ b/python/examples/fit_a_line/test_client.py @@ -27,5 +27,10 @@ test_reader = paddle.batch( batch_size=1) for data in test_reader(): - fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"]) + import numpy as np + new_data = np.zeros((1, 1, 13)).astype("float32") + new_data[0] = data[0][0] + fetch_map = client.predict( + feed={"x": new_data}, fetch=["price"], batch=True) print("{} {}".format(fetch_map["price"][0], data[0][1][0])) + print(fetch_map) diff --git a/python/examples/fit_a_line/test_multi_process_client.py b/python/examples/fit_a_line/test_multi_process_client.py index 5272d095df5e74f25ce0e36ca22c8d6d1884f5f0..e6120266097f8fdd446998741582a9e396cd2efd 100644 --- a/python/examples/fit_a_line/test_multi_process_client.py +++ b/python/examples/fit_a_line/test_multi_process_client.py @@ -15,6 +15,7 @@ from paddle_serving_client import Client from paddle_serving_client.utils import MultiThreadRunner import paddle +import numpy as np def single_func(idx, resource): @@ -26,6 +27,7 @@ def single_func(idx, resource): 0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332 ] + x = np.array(x) for i in range(1000): fetch_map = client.predict(feed={"x": x}, fetch=["price"]) if fetch_map is None: diff --git a/python/examples/imagenet/resnet50_rpc_client.py b/python/examples/imagenet/resnet50_rpc_client.py index 7888ab6302b483672ec1d7270f7db0c551f1778d..b23f99175b97a011c3b1c72d3b7358b646c54e68 100644 --- a/python/examples/imagenet/resnet50_rpc_client.py +++ b/python/examples/imagenet/resnet50_rpc_client.py @@ -38,7 +38,8 @@ start = time.time() image_file = "https://paddle-serving.bj.bcebos.com/imagenet-example/daisy.jpg" for i in range(10): img = seq(image_file) - fetch_map = client.predict(feed={"image": img}, fetch=["score"]) + fetch_map = client.predict( + feed={"image": img}, fetch=["score"], batch=False) prob = max(fetch_map["score"][0]) label = label_dict[fetch_map["score"][0].tolist().index(prob)].strip( ).replace(",", "") diff --git a/python/examples/imagenet/resnet50_web_service.py b/python/examples/imagenet/resnet50_web_service.py index d38dcc0ffc1952193803575c7eb612c4f0bbad28..7033103717621807ecd74093bf5eba8d31a9b877 100644 --- a/python/examples/imagenet/resnet50_web_service.py +++ b/python/examples/imagenet/resnet50_web_service.py @@ -13,6 +13,7 @@ # limitations under the License. import sys from paddle_serving_client import Client +import numpy as np from paddle_serving_app.reader import Sequential, URL2Image, Resize, CenterCrop, RGB2BGR, Transpose, Div, Normalize, Base64ToImage if len(sys.argv) != 4: @@ -43,12 +44,13 @@ class ImageService(WebService): def preprocess(self, feed=[], fetch=[]): feed_batch = [] + is_batch = True for ins in feed: if "image" not in ins: raise ("feed data error!") img = self.seq(ins["image"]) - feed_batch.append({"image": img}) - return feed_batch, fetch + feed_batch.append({"image": img[np.newaxis, :]}) + return feed_batch, fetch, is_batch def postprocess(self, feed=[], fetch=[], fetch_map={}): score_list = fetch_map["score"] diff --git a/python/examples/imdb/benchmark.py b/python/examples/imdb/benchmark.py index d226efbfbc5317db81039bc6a778498cdf853854..18584f88ea51373ffe2ca2e75946342c94464d76 100644 --- a/python/examples/imdb/benchmark.py +++ b/python/examples/imdb/benchmark.py @@ -17,7 +17,8 @@ import os import sys import time import requests -from paddle_serving_app.reader import IMDBDataset +import numpy as np +from paddle_serving_app.reader.imdb_reader import IMDBDataset from paddle_serving_client import Client from paddle_serving_client.utils import MultiThreadRunner from paddle_serving_client.utils import MultiThreadRunner, benchmark_args, show_latency @@ -47,11 +48,17 @@ def single_func(idx, resource): for i in range(1000): if args.batch_size >= 1: feed_batch = [] + feed = {"words": [], "words.lod": [0]} for bi in range(args.batch_size): word_ids, label = imdb_dataset.get_words_and_label(dataset[ bi]) - feed_batch.append({"words": word_ids}) - result = client.predict(feed=feed_batch, fetch=["prediction"]) + feed["words.lod"].append(feed["words.lod"][-1] + len( + word_ids)) + feed["words"].extend(word_ids) + feed["words"] = np.array(feed["words"]).reshape( + len(feed["words"]), 1) + result = client.predict( + feed=feed, fetch=["prediction"], batch=True) if result is None: raise ("predict failed.") else: diff --git a/python/examples/imdb/test_client.py b/python/examples/imdb/test_client.py index b903a59983fb0df87adfa4fa38b7eb2b80fb4ebb..2aeee01a83cde4a66e4bd03ad49c7791c67a287e 100644 --- a/python/examples/imdb/test_client.py +++ b/python/examples/imdb/test_client.py @@ -13,8 +13,9 @@ # limitations under the License. # pylint: disable=doc-string-missing from paddle_serving_client import Client -from paddle_serving_app.reader import IMDBDataset +from paddle_serving_app.reader.imdb_reader import IMDBDataset import sys +import numpy as np client = Client() client.load_client_config(sys.argv[1]) @@ -28,7 +29,12 @@ imdb_dataset.load_resource(sys.argv[2]) for line in sys.stdin: word_ids, label = imdb_dataset.get_words_and_label(line) - feed = {"words": word_ids} + word_len = len(word_ids) + feed = { + "words": np.array(word_ids).reshape(word_len, 1), + "words.lod": [0, word_len] + } + #print(feed) fetch = ["prediction"] - fetch_map = client.predict(feed=feed, fetch=fetch) + fetch_map = client.predict(feed=feed, fetch=fetch, batch=True) print("{} {}".format(fetch_map["prediction"][0], label[0])) diff --git a/python/examples/imdb/text_classify_service.py b/python/examples/imdb/text_classify_service.py index fe6ab0319deb0de5875781cf0890aa39a45c2415..ca1e26002baf0284f282add235706080f7902c33 100755 --- a/python/examples/imdb/text_classify_service.py +++ b/python/examples/imdb/text_classify_service.py @@ -14,8 +14,9 @@ # pylint: disable=doc-string-missing from paddle_serving_server.web_service import WebService -from paddle_serving_app.reader import IMDBDataset +from paddle_serving_app.reader.imdb_reader import IMDBDataset import sys +import numpy as np class IMDBService(WebService): @@ -26,10 +27,16 @@ class IMDBService(WebService): self.dataset.load_resource(args["dict_file_path"]) def preprocess(self, feed={}, fetch=[]): - res_feed = [{ - "words": self.dataset.get_words_only(ins["words"]) - } for ins in feed] - return res_feed, fetch + feed_batch = [] + words_lod = [0] + is_batch = True + for ins in feed: + words = self.dataset.get_words_only(ins["words"]) + words = np.array(words).reshape(len(words), 1) + words_lod.append(words_lod[-1] + len(words)) + feed_batch.append(words) + feed = {"words": np.concatenate(feed_batch), "words.lod": words_lod} + return feed, fetch, is_batch imdb_service = IMDBService(name="imdb") diff --git a/python/examples/lac/lac_client.py b/python/examples/lac/lac_client.py index 22f3c511dcd2540365623ef9428b60cfcb5e5a34..568b08d8b3af86fd7aa7b20660aeb4acbf060e04 100644 --- a/python/examples/lac/lac_client.py +++ b/python/examples/lac/lac_client.py @@ -19,6 +19,7 @@ from paddle_serving_app.reader import LACReader import sys import os import io +import numpy as np client = Client() client.load_client_config(sys.argv[1]) @@ -31,7 +32,17 @@ for line in sys.stdin: feed_data = reader.process(line) if len(feed_data) <= 0: continue - fetch_map = client.predict(feed={"words": feed_data}, fetch=["crf_decode"]) + print(feed_data) + #fetch_map = client.predict(feed={"words": np.array(feed_data).reshape(len(feed_data), 1), "words.lod": [0, len(feed_data)]}, fetch=["crf_decode"], batch=True) + fetch_map = client.predict( + feed={ + "words": np.array(feed_data + feed_data).reshape( + len(feed_data) * 2, 1), + "words.lod": [0, len(feed_data), 2 * len(feed_data)] + }, + fetch=["crf_decode"], + batch=True) + print(fetch_map) begin = fetch_map['crf_decode.lod'][0] end = fetch_map['crf_decode.lod'][1] segs = reader.parse_result(line, fetch_map["crf_decode"][begin:end]) diff --git a/python/examples/lac/lac_web_service.py b/python/examples/lac/lac_web_service.py index bed89f54b626c0cce55767f8edacc3dd33f0104c..cf37f66294bd154324f2c7cacd1a35571b6c6350 100644 --- a/python/examples/lac/lac_web_service.py +++ b/python/examples/lac/lac_web_service.py @@ -15,6 +15,7 @@ from paddle_serving_server.web_service import WebService import sys from paddle_serving_app.reader import LACReader +import numpy as np class LACService(WebService): @@ -23,13 +24,21 @@ class LACService(WebService): def preprocess(self, feed={}, fetch=[]): feed_batch = [] + fetch = ["crf_decode"] + lod_info = [0] + is_batch = True for ins in feed: if "words" not in ins: raise ("feed data error!") feed_data = self.reader.process(ins["words"]) - feed_batch.append({"words": feed_data}) - fetch = ["crf_decode"] - return feed_batch, fetch + feed_batch.append(np.array(feed_data).reshape(len(feed_data), 1)) + lod_info.append(lod_info[-1] + len(feed_data)) + feed_dict = { + "words": np.concatenate( + feed_batch, axis=0), + "words.lod": lod_info + } + return feed_dict, fetch, is_batch def postprocess(self, feed={}, fetch=[], fetch_map={}): batch_ret = [] diff --git a/python/examples/ocr/README.md b/python/examples/ocr/README.md index a0fc9f60160506183076233f33face1732a278c7..680376a07ae462f567b31234cbe7651405c08048 100644 --- a/python/examples/ocr/README.md +++ b/python/examples/ocr/README.md @@ -34,9 +34,9 @@ python ocr_web_server.py gpu ``` python ocr_web_client.py ``` -If you want a faster web service, please try Web Debugger Service +If you want a faster web service, please try Web LocalPredictor Service -## Web Debugger Service +## Web LocalPredictor Service ``` #choose one of cpu/gpu commands as following #for cpu user @@ -45,7 +45,7 @@ python ocr_debugger_server.py cpu python ocr_debugger_server.py gpu ``` -## Web Debugger Client Prediction +## Web LocalPredictor Client Prediction ``` python ocr_web_client.py ``` @@ -61,7 +61,7 @@ Dataset: RCTW 500 sample images | engine | client read image(ms) | client-server tras time(ms) | server read image(ms) | det pre(ms) | det infer(ms) | det post(ms) | rec pre(ms) | rec infer(ms) | rec post(ms) | server-client trans time(ms) | server side time consumption(ms) | server side overhead(ms) | total time(ms) | |------------------------------|----------------|----------------------------|------------------|--------------------|------------------|--------------------|--------------------|------------------|--------------------|--------------------------|--------------------|--------------|---------------| | Serving web service | 8.69 | 13.41 | 109.97 | 2.82 | 87.76 | 4.29 | 3.98 | 78.51 | 3.66 | 4.12 | 181.02 | 136.49 | 317.51 | -| Serving Debugger web service | 8.73 | 16.42 | 115.27 | 2.93 | 20.63 | 3.97 | 4.48 | 13.84 | 3.60 | 6.91 | 49.45 | 147.33 | 196.78 | +| Serving LocalPredictor web service | 8.73 | 16.42 | 115.27 | 2.93 | 20.63 | 3.97 | 4.48 | 13.84 | 3.60 | 6.91 | 49.45 | 147.33 | 196.78 | ## Appendix: For Users who want to launch Det or Rec only if you are going to detect images not recognize it or directly recognize the words from images. We also provide Det and Rec server for you. diff --git a/python/examples/ocr/README_CN.md b/python/examples/ocr/README_CN.md index 8bdc45cf8e390b378708fbee2dbfe318132aea44..52663bfd3c4e5fae77e5f03c2954268038c80833 100644 --- a/python/examples/ocr/README_CN.md +++ b/python/examples/ocr/README_CN.md @@ -34,8 +34,8 @@ python ocr_web_server.py gpu python ocr_web_client.py ``` -如果用户需要更快的执行速度,请尝试Debugger版Web服务 -## 启动Debugger版Web服务 +如果用户需要更快的执行速度,请尝试LocalPredictor版Web服务 +## 启动LocalPredictor版Web服务 ``` #根据CPU/GPU设备选择一种启动方式 #for cpu user @@ -60,7 +60,7 @@ GPU: Nvidia Tesla V100单卡 | engine | 客户端读图(ms) | 客户端发送请求到服务端(ms) | 服务端读图(ms) | 检测预处理耗时(ms) | 检测模型耗时(ms) | 检测后处理耗时(ms) | 识别预处理耗时(ms) | 识别模型耗时(ms) | 识别后处理耗时(ms) | 服务端回传客户端时间(ms) | 服务端整体耗时(ms) | 空跑耗时(ms) | 整体耗时(ms) | |------------------------------|----------------|----------------------------|------------------|--------------------|------------------|--------------------|--------------------|------------------|--------------------|--------------------------|--------------------|--------------|---------------| | Serving web service | 8.69 | 13.41 | 109.97 | 2.82 | 87.76 | 4.29 | 3.98 | 78.51 | 3.66 | 4.12 | 181.02 | 136.49 | 317.51 | -| Serving Debugger web service | 8.73 | 16.42 | 115.27 | 2.93 | 20.63 | 3.97 | 4.48 | 13.84 | 3.60 | 6.91 | 49.45 | 147.33 | 196.78 | +| Serving LocalPredictor web service | 8.73 | 16.42 | 115.27 | 2.93 | 20.63 | 3.97 | 4.48 | 13.84 | 3.60 | 6.91 | 49.45 | 147.33 | 196.78 | ## 附录: 检测/识别单服务启动 diff --git a/python/examples/ocr/ocr_debugger_server.py b/python/examples/ocr/ocr_debugger_server.py index f7458c3036734e4bb6e554097029270e11912a3a..3cbc3a66ef620f5c8851b50a352a0c1587467b3b 100644 --- a/python/examples/ocr/ocr_debugger_server.py +++ b/python/examples/ocr/ocr_debugger_server.py @@ -26,7 +26,7 @@ if sys.argv[1] == 'gpu': from paddle_serving_server_gpu.web_service import WebService elif sys.argv[1] == 'cpu': from paddle_serving_server.web_service import WebService -from paddle_serving_app.local_predict import Debugger +from paddle_serving_app.local_predict import LocalPredictor import time import re import base64 @@ -39,7 +39,7 @@ class OCRService(WebService): Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose( (2, 0, 1)) ]) - self.det_client = Debugger() + self.det_client = LocalPredictor() if sys.argv[1] == 'gpu': self.det_client.load_model_config( det_model_config, gpu=True, profile=False) diff --git a/python/examples/pipeline/imagenet/README.md b/python/examples/pipeline/imagenet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d0fa99e6d72f10d3d2b5907285528b68685128e0 --- /dev/null +++ b/python/examples/pipeline/imagenet/README.md @@ -0,0 +1,19 @@ +# Imagenet Pipeline WebService + +This document will takes Imagenet service as an example to introduce how to use Pipeline WebService. + +## Get model +``` +sh get_model.sh +``` + +## Start server + +``` +python resnet50_web_service.py &>log.txt & +``` + +## RPC test +``` +python pipeline_rpc_client.py +``` diff --git a/python/examples/pipeline/imagenet/README_CN.md b/python/examples/pipeline/imagenet/README_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..325a64e7a01da169978da7fc07b9252c4896f327 --- /dev/null +++ b/python/examples/pipeline/imagenet/README_CN.md @@ -0,0 +1,19 @@ +# Imagenet Pipeline WebService + +这里以 Uci 服务为例来介绍 Pipeline WebService 的使用。 + +## 获取模型 +``` +sh get_data.sh +``` + +## 启动服务 + +``` +python web_service.py &>log.txt & +``` + +## 测试 +``` +curl -X POST -k http://localhost:18082/uci/prediction -d '{"key": ["x"], "value": ["0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332"]}' +``` diff --git a/python/examples/pipeline/imagenet/config.yml b/python/examples/pipeline/imagenet/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..52ddab6f3194efe7c884411bfbcd381f76ea075e --- /dev/null +++ b/python/examples/pipeline/imagenet/config.yml @@ -0,0 +1,30 @@ +#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG +##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num +worker_num: 1 + +#http端口, rpc_port和http_port不允许同时为空。当rpc_port可用且http_port为空时,不自动生成http_port +http_port: 18082 +rpc_port: 9999 + +dag: + #op资源类型, True, 为线程模型;False,为进程模型 + is_thread_op: False +op: + imagenet: + #当op配置没有server_endpoints时,从local_service_conf读取本地服务配置 + local_service_conf: + + #并发数,is_thread_op=True时,为线程并发;否则为进程并发 + concurrency: 2 + + #uci模型路径 + model_config: ResNet50_vd_model + + #计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡 + devices: "0" # "0,1" + + #client类型,包括brpc, grpc和local_predictor.local_predictor不启动Serving服务,进程内预测 + client_type: local_predictor + + #Fetch结果列表,以client_config中fetch_var的alias_name为准 + fetch_list: ["score"] diff --git a/python/examples/pipeline/imagenet/daisy.jpg b/python/examples/pipeline/imagenet/daisy.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7edeca63e5f32e68550ef720d81f59df58a8eabc Binary files /dev/null and b/python/examples/pipeline/imagenet/daisy.jpg differ diff --git a/python/examples/pipeline/imagenet/get_model.sh b/python/examples/pipeline/imagenet/get_model.sh new file mode 100644 index 0000000000000000000000000000000000000000..1964c79a2e13dbbe373636ca1a06d2967fad7b79 --- /dev/null +++ b/python/examples/pipeline/imagenet/get_model.sh @@ -0,0 +1,5 @@ +wget --no-check-certificate https://paddle-serving.bj.bcebos.com/imagenet-example/ResNet50_vd.tar.gz +tar -xzvf ResNet50_vd.tar.gz + +wget --no-check-certificate https://paddle-serving.bj.bcebos.com/imagenet-example/image_data.tar.gz +tar -xzvf image_data.tar.gz diff --git a/python/examples/pipeline/imagenet/imagenet.label b/python/examples/pipeline/imagenet/imagenet.label new file mode 100644 index 0000000000000000000000000000000000000000..d7146735146ea1894173d6d0e20fb90af36be849 --- /dev/null +++ b/python/examples/pipeline/imagenet/imagenet.label @@ -0,0 +1,1000 @@ +tench, Tinca tinca, +goldfish, Carassius auratus, +great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias, +tiger shark, Galeocerdo cuvieri, +hammerhead, hammerhead shark, +electric ray, crampfish, numbfish, torpedo, +stingray, +cock, +hen, +ostrich, Struthio camelus, +brambling, Fringilla montifringilla, +goldfinch, Carduelis carduelis, +house finch, linnet, Carpodacus mexicanus, +junco, snowbird, +indigo bunting, indigo finch, indigo bird, Passerina cyanea, +robin, American robin, Turdus migratorius, +bulbul, +jay, +magpie, +chickadee, +water ouzel, dipper, +kite, +bald eagle, American eagle, Haliaeetus leucocephalus, +vulture, +great grey owl, great gray owl, Strix nebulosa, +European fire salamander, Salamandra salamandra, +common newt, Triturus vulgaris, +eft, +spotted salamander, Ambystoma maculatum, +axolotl, mud puppy, Ambystoma mexicanum, +bullfrog, Rana catesbeiana, +tree frog, tree-frog, +tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui, +loggerhead, loggerhead turtle, Caretta caretta, +leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea, +mud turtle, +terrapin, +box turtle, box tortoise, +banded gecko, +common iguana, iguana, Iguana iguana, +American chameleon, anole, Anolis carolinensis, +whiptail, whiptail lizard, +agama, +frilled lizard, Chlamydosaurus kingi, +alligator lizard, +Gila monster, Heloderma suspectum, +green lizard, Lacerta viridis, +African chameleon, Chamaeleo chamaeleon, +Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis, +African crocodile, Nile crocodile, Crocodylus niloticus, +American alligator, Alligator mississipiensis, +triceratops, +thunder snake, worm snake, Carphophis amoenus, +ringneck snake, ring-necked snake, ring snake, +hognose snake, puff adder, sand viper, +green snake, grass snake, +king snake, kingsnake, +garter snake, grass snake, +water snake, +vine snake, +night snake, Hypsiglena torquata, +boa constrictor, Constrictor constrictor, +rock python, rock snake, Python sebae, +Indian cobra, Naja naja, +green mamba, +sea snake, +horned viper, cerastes, sand viper, horned asp, Cerastes cornutus, +diamondback, diamondback rattlesnake, Crotalus adamanteus, +sidewinder, horned rattlesnake, Crotalus cerastes, +trilobite, +harvestman, daddy longlegs, Phalangium opilio, +scorpion, +black and gold garden spider, Argiope aurantia, +barn spider, Araneus cavaticus, +garden spider, Aranea diademata, +black widow, Latrodectus mactans, +tarantula, +wolf spider, hunting spider, +tick, +centipede, +black grouse, +ptarmigan, +ruffed grouse, partridge, Bonasa umbellus, +prairie chicken, prairie grouse, prairie fowl, +peacock, +quail, +partridge, +African grey, African gray, Psittacus erithacus, +macaw, +sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita, +lorikeet, +coucal, +bee eater, +hornbill, +hummingbird, +jacamar, +toucan, +drake, +red-breasted merganser, Mergus serrator, +goose, +black swan, Cygnus atratus, +tusker, +echidna, spiny anteater, anteater, +platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus, +wallaby, brush kangaroo, +koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus, +wombat, +jellyfish, +sea anemone, anemone, +brain coral, +flatworm, platyhelminth, +nematode, nematode worm, roundworm, +conch, +snail, +slug, +sea slug, nudibranch, +chiton, coat-of-mail shell, sea cradle, polyplacophore, +chambered nautilus, pearly nautilus, nautilus, +Dungeness crab, Cancer magister, +rock crab, Cancer irroratus, +fiddler crab, +king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica, +American lobster, Northern lobster, Maine lobster, Homarus americanus, +spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish, +crayfish, crawfish, crawdad, crawdaddy, +hermit crab, +isopod, +white stork, Ciconia ciconia, +black stork, Ciconia nigra, +spoonbill, +flamingo, +little blue heron, Egretta caerulea, +American egret, great white heron, Egretta albus, +bittern, +crane, +limpkin, Aramus pictus, +European gallinule, Porphyrio porphyrio, +American coot, marsh hen, mud hen, water hen, Fulica americana, +bustard, +ruddy turnstone, Arenaria interpres, +red-backed sandpiper, dunlin, Erolia alpina, +redshank, Tringa totanus, +dowitcher, +oystercatcher, oyster catcher, +pelican, +king penguin, Aptenodytes patagonica, +albatross, mollymawk, +grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus, +killer whale, killer, orca, grampus, sea wolf, Orcinus orca, +dugong, Dugong dugon, +sea lion, +Chihuahua, +Japanese spaniel, +Maltese dog, Maltese terrier, Maltese, +Pekinese, Pekingese, Peke, +Shih-Tzu, +Blenheim spaniel, +papillon, +toy terrier, +Rhodesian ridgeback, +Afghan hound, Afghan, +basset, basset hound, +beagle, +bloodhound, sleuthhound, +bluetick, +black-and-tan coonhound, +Walker hound, Walker foxhound, +English foxhound, +redbone, +borzoi, Russian wolfhound, +Irish wolfhound, +Italian greyhound, +whippet, +Ibizan hound, Ibizan Podenco, +Norwegian elkhound, elkhound, +otterhound, otter hound, +Saluki, gazelle hound, +Scottish deerhound, deerhound, +Weimaraner, +Staffordshire bullterrier, Staffordshire bull terrier, +American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier, +Bedlington terrier, +Border terrier, +Kerry blue terrier, +Irish terrier, +Norfolk terrier, +Norwich terrier, +Yorkshire terrier, +wire-haired fox terrier, +Lakeland terrier, +Sealyham terrier, Sealyham, +Airedale, Airedale terrier, +cairn, cairn terrier, +Australian terrier, +Dandie Dinmont, Dandie Dinmont terrier, +Boston bull, Boston terrier, +miniature schnauzer, +giant schnauzer, +standard schnauzer, +Scotch terrier, Scottish terrier, Scottie, +Tibetan terrier, chrysanthemum dog, +silky terrier, Sydney silky, +soft-coated wheaten terrier, +West Highland white terrier, +Lhasa, Lhasa apso, +flat-coated retriever, +curly-coated retriever, +golden retriever, +Labrador retriever, +Chesapeake Bay retriever, +German short-haired pointer, +vizsla, Hungarian pointer, +English setter, +Irish setter, red setter, +Gordon setter, +Brittany spaniel, +clumber, clumber spaniel, +English springer, English springer spaniel, +Welsh springer spaniel, +cocker spaniel, English cocker spaniel, cocker, +Sussex spaniel, +Irish water spaniel, +kuvasz, +schipperke, +groenendael, +malinois, +briard, +kelpie, +komondor, +Old English sheepdog, bobtail, +Shetland sheepdog, Shetland sheep dog, Shetland, +collie, +Border collie, +Bouvier des Flandres, Bouviers des Flandres, +Rottweiler, +German shepherd, German shepherd dog, German police dog, alsatian, +Doberman, Doberman pinscher, +miniature pinscher, +Greater Swiss Mountain dog, +Bernese mountain dog, +Appenzeller, +EntleBucher, +boxer, +bull mastiff, +Tibetan mastiff, +French bulldog, +Great Dane, +Saint Bernard, St Bernard, +Eskimo dog, husky, +malamute, malemute, Alaskan malamute, +Siberian husky, +dalmatian, coach dog, carriage dog, +affenpinscher, monkey pinscher, monkey dog, +basenji, +pug, pug-dog, +Leonberg, +Newfoundland, Newfoundland dog, +Great Pyrenees, +Samoyed, Samoyede, +Pomeranian, +chow, chow chow, +keeshond, +Brabancon griffon, +Pembroke, Pembroke Welsh corgi, +Cardigan, Cardigan Welsh corgi, +toy poodle, +miniature poodle, +standard poodle, +Mexican hairless, +timber wolf, grey wolf, gray wolf, Canis lupus, +white wolf, Arctic wolf, Canis lupus tundrarum, +red wolf, maned wolf, Canis rufus, Canis niger, +coyote, prairie wolf, brush wolf, Canis latrans, +dingo, warrigal, warragal, Canis dingo, +dhole, Cuon alpinus, +African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus, +hyena, hyaena, +red fox, Vulpes vulpes, +kit fox, Vulpes macrotis, +Arctic fox, white fox, Alopex lagopus, +grey fox, gray fox, Urocyon cinereoargenteus, +tabby, tabby cat, +tiger cat, +Persian cat, +Siamese cat, Siamese, +Egyptian cat, +cougar, puma, catamount, mountain lion, painter, panther, Felis concolor, +lynx, catamount, +leopard, Panthera pardus, +snow leopard, ounce, Panthera uncia, +jaguar, panther, Panthera onca, Felis onca, +lion, king of beasts, Panthera leo, +tiger, Panthera tigris, +cheetah, chetah, Acinonyx jubatus, +brown bear, bruin, Ursus arctos, +American black bear, black bear, Ursus americanus, Euarctos americanus, +ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus, +sloth bear, Melursus ursinus, Ursus ursinus, +mongoose, +meerkat, mierkat, +tiger beetle, +ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle, +ground beetle, carabid beetle, +long-horned beetle, longicorn, longicorn beetle, +leaf beetle, chrysomelid, +dung beetle, +rhinoceros beetle, +weevil, +fly, +bee, +ant, emmet, pismire, +grasshopper, hopper, +cricket, +walking stick, walkingstick, stick insect, +cockroach, roach, +mantis, mantid, +cicada, cicala, +leafhopper, +lacewing, lacewing fly, +"dragonfly, darning needle, devils darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", +damselfly, +admiral, +ringlet, ringlet butterfly, +monarch, monarch butterfly, milkweed butterfly, Danaus plexippus, +cabbage butterfly, +sulphur butterfly, sulfur butterfly, +lycaenid, lycaenid butterfly, +starfish, sea star, +sea urchin, +sea cucumber, holothurian, +wood rabbit, cottontail, cottontail rabbit, +hare, +Angora, Angora rabbit, +hamster, +porcupine, hedgehog, +fox squirrel, eastern fox squirrel, Sciurus niger, +marmot, +beaver, +guinea pig, Cavia cobaya, +sorrel, +zebra, +hog, pig, grunter, squealer, Sus scrofa, +wild boar, boar, Sus scrofa, +warthog, +hippopotamus, hippo, river horse, Hippopotamus amphibius, +ox, +water buffalo, water ox, Asiatic buffalo, Bubalus bubalis, +bison, +ram, tup, +bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis, +ibex, Capra ibex, +hartebeest, +impala, Aepyceros melampus, +gazelle, +Arabian camel, dromedary, Camelus dromedarius, +llama, +weasel, +mink, +polecat, fitch, foulmart, foumart, Mustela putorius, +black-footed ferret, ferret, Mustela nigripes, +otter, +skunk, polecat, wood pussy, +badger, +armadillo, +three-toed sloth, ai, Bradypus tridactylus, +orangutan, orang, orangutang, Pongo pygmaeus, +gorilla, Gorilla gorilla, +chimpanzee, chimp, Pan troglodytes, +gibbon, Hylobates lar, +siamang, Hylobates syndactylus, Symphalangus syndactylus, +guenon, guenon monkey, +patas, hussar monkey, Erythrocebus patas, +baboon, +macaque, +langur, +colobus, colobus monkey, +proboscis monkey, Nasalis larvatus, +marmoset, +capuchin, ringtail, Cebus capucinus, +howler monkey, howler, +titi, titi monkey, +spider monkey, Ateles geoffroyi, +squirrel monkey, Saimiri sciureus, +Madagascar cat, ring-tailed lemur, Lemur catta, +indri, indris, Indri indri, Indri brevicaudatus, +Indian elephant, Elephas maximus, +African elephant, Loxodonta africana, +lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens, +giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca, +barracouta, snoek, +eel, +coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch, +rock beauty, Holocanthus tricolor, +anemone fish, +sturgeon, +gar, garfish, garpike, billfish, Lepisosteus osseus, +lionfish, +puffer, pufferfish, blowfish, globefish, +abacus, +abaya, +"academic gown, academic robe, judges robe", +accordion, piano accordion, squeeze box, +acoustic guitar, +aircraft carrier, carrier, flattop, attack aircraft carrier, +airliner, +airship, dirigible, +altar, +ambulance, +amphibian, amphibious vehicle, +analog clock, +apiary, bee house, +apron, +ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin, +assault rifle, assault gun, +backpack, back pack, knapsack, packsack, rucksack, haversack, +bakery, bakeshop, bakehouse, +balance beam, beam, +balloon, +ballpoint, ballpoint pen, ballpen, Biro, +Band Aid, +banjo, +bannister, banister, balustrade, balusters, handrail, +barbell, +barber chair, +barbershop, +barn, +barometer, +barrel, cask, +barrow, garden cart, lawn cart, wheelbarrow, +baseball, +basketball, +bassinet, +bassoon, +bathing cap, swimming cap, +bath towel, +bathtub, bathing tub, bath, tub, +beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon, +beacon, lighthouse, beacon light, pharos, +beaker, +bearskin, busby, shako, +beer bottle, +beer glass, +bell cote, bell cot, +bib, +bicycle-built-for-two, tandem bicycle, tandem, +bikini, two-piece, +binder, ring-binder, +binoculars, field glasses, opera glasses, +birdhouse, +boathouse, +bobsled, bobsleigh, bob, +bolo tie, bolo, bola tie, bola, +bonnet, poke bonnet, +bookcase, +bookshop, bookstore, bookstall, +bottlecap, +bow, +bow tie, bow-tie, bowtie, +brass, memorial tablet, plaque, +brassiere, bra, bandeau, +breakwater, groin, groyne, mole, bulwark, seawall, jetty, +breastplate, aegis, egis, +broom, +bucket, pail, +buckle, +bulletproof vest, +bullet train, bullet, +butcher shop, meat market, +cab, hack, taxi, taxicab, +caldron, cauldron, +candle, taper, wax light, +cannon, +canoe, +can opener, tin opener, +cardigan, +car mirror, +carousel, carrousel, merry-go-round, roundabout, whirligig, +"carpenters kit, tool kit", +carton, +car wheel, +cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM, +cassette, +cassette player, +castle, +catamaran, +CD player, +cello, violoncello, +cellular telephone, cellular phone, cellphone, cell, mobile phone, +chain, +chainlink fence, +chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour, +chain saw, chainsaw, +chest, +chiffonier, commode, +chime, bell, gong, +china cabinet, china closet, +Christmas stocking, +church, church building, +cinema, movie theater, movie theatre, movie house, picture palace, +cleaver, meat cleaver, chopper, +cliff dwelling, +cloak, +clog, geta, patten, sabot, +cocktail shaker, +coffee mug, +coffeepot, +coil, spiral, volute, whorl, helix, +combination lock, +computer keyboard, keypad, +confectionery, confectionary, candy store, +container ship, containership, container vessel, +convertible, +corkscrew, bottle screw, +cornet, horn, trumpet, trump, +cowboy boot, +cowboy hat, ten-gallon hat, +cradle, +crane, +crash helmet, +crate, +crib, cot, +Crock Pot, +croquet ball, +crutch, +cuirass, +dam, dike, dyke, +desk, +desktop computer, +dial telephone, dial phone, +diaper, nappy, napkin, +digital clock, +digital watch, +dining table, board, +dishrag, dishcloth, +dishwasher, dish washer, dishwashing machine, +disk brake, disc brake, +dock, dockage, docking facility, +dogsled, dog sled, dog sleigh, +dome, +doormat, welcome mat, +drilling platform, offshore rig, +drum, membranophone, tympan, +drumstick, +dumbbell, +Dutch oven, +electric fan, blower, +electric guitar, +electric locomotive, +entertainment center, +envelope, +espresso maker, +face powder, +feather boa, boa, +file, file cabinet, filing cabinet, +fireboat, +fire engine, fire truck, +fire screen, fireguard, +flagpole, flagstaff, +flute, transverse flute, +folding chair, +football helmet, +forklift, +fountain, +fountain pen, +four-poster, +freight car, +French horn, horn, +frying pan, frypan, skillet, +fur coat, +garbage truck, dustcart, +gasmask, respirator, gas helmet, +gas pump, gasoline pump, petrol pump, island dispenser, +goblet, +go-kart, +golf ball, +golfcart, golf cart, +gondola, +gong, tam-tam, +gown, +grand piano, grand, +greenhouse, nursery, glasshouse, +grille, radiator grille, +grocery store, grocery, food market, market, +guillotine, +hair slide, +hair spray, +half track, +hammer, +hamper, +hand blower, blow dryer, blow drier, hair dryer, hair drier, +hand-held computer, hand-held microcomputer, +handkerchief, hankie, hanky, hankey, +hard disc, hard disk, fixed disk, +harmonica, mouth organ, harp, mouth harp, +harp, +harvester, reaper, +hatchet, +holster, +home theater, home theatre, +honeycomb, +hook, claw, +hoopskirt, crinoline, +horizontal bar, high bar, +horse cart, horse-cart, +hourglass, +iPod, +iron, smoothing iron, +"jack-o-lantern", +jean, blue jean, denim, +jeep, landrover, +jersey, T-shirt, tee shirt, +jigsaw puzzle, +jinrikisha, ricksha, rickshaw, +joystick, +kimono, +knee pad, +knot, +lab coat, laboratory coat, +ladle, +lampshade, lamp shade, +laptop, laptop computer, +lawn mower, mower, +lens cap, lens cover, +letter opener, paper knife, paperknife, +library, +lifeboat, +lighter, light, igniter, ignitor, +limousine, limo, +liner, ocean liner, +lipstick, lip rouge, +Loafer, +lotion, +loudspeaker, speaker, speaker unit, loudspeaker system, speaker system, +"loupe, jewelers loupe", +lumbermill, sawmill, +magnetic compass, +mailbag, postbag, +mailbox, letter box, +maillot, +maillot, tank suit, +manhole cover, +maraca, +marimba, xylophone, +mask, +matchstick, +maypole, +maze, labyrinth, +measuring cup, +medicine chest, medicine cabinet, +megalith, megalithic structure, +microphone, mike, +microwave, microwave oven, +military uniform, +milk can, +minibus, +miniskirt, mini, +minivan, +missile, +mitten, +mixing bowl, +mobile home, manufactured home, +Model T, +modem, +monastery, +monitor, +moped, +mortar, +mortarboard, +mosque, +mosquito net, +motor scooter, scooter, +mountain bike, all-terrain bike, off-roader, +mountain tent, +mouse, computer mouse, +mousetrap, +moving van, +muzzle, +nail, +neck brace, +necklace, +nipple, +notebook, notebook computer, +obelisk, +oboe, hautboy, hautbois, +ocarina, sweet potato, +odometer, hodometer, mileometer, milometer, +oil filter, +organ, pipe organ, +oscilloscope, scope, cathode-ray oscilloscope, CRO, +overskirt, +oxcart, +oxygen mask, +packet, +paddle, boat paddle, +paddlewheel, paddle wheel, +padlock, +paintbrush, +"pajama, pyjama, pjs, jammies", +palace, +panpipe, pandean pipe, syrinx, +paper towel, +parachute, chute, +parallel bars, bars, +park bench, +parking meter, +passenger car, coach, carriage, +patio, terrace, +pay-phone, pay-station, +pedestal, plinth, footstall, +pencil box, pencil case, +pencil sharpener, +perfume, essence, +Petri dish, +photocopier, +pick, plectrum, plectron, +pickelhaube, +picket fence, paling, +pickup, pickup truck, +pier, +piggy bank, penny bank, +pill bottle, +pillow, +ping-pong ball, +pinwheel, +pirate, pirate ship, +pitcher, ewer, +"plane, carpenters plane, woodworking plane", +planetarium, +plastic bag, +plate rack, +plow, plough, +"plunger, plumbers helper", +Polaroid camera, Polaroid Land camera, +pole, +police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria, +poncho, +pool table, billiard table, snooker table, +pop bottle, soda bottle, +pot, flowerpot, +"potters wheel", +power drill, +prayer rug, prayer mat, +printer, +prison, prison house, +projectile, missile, +projector, +puck, hockey puck, +punching bag, punch bag, punching ball, punchball, +purse, +quill, quill pen, +quilt, comforter, comfort, puff, +racer, race car, racing car, +racket, racquet, +radiator, +radio, wireless, +radio telescope, radio reflector, +rain barrel, +recreational vehicle, RV, R.V., +reel, +reflex camera, +refrigerator, icebox, +remote control, remote, +restaurant, eating house, eating place, eatery, +revolver, six-gun, six-shooter, +rifle, +rocking chair, rocker, +rotisserie, +rubber eraser, rubber, pencil eraser, +rugby ball, +rule, ruler, +running shoe, +safe, +safety pin, +saltshaker, salt shaker, +sandal, +sarong, +sax, saxophone, +scabbard, +scale, weighing machine, +school bus, +schooner, +scoreboard, +screen, CRT screen, +screw, +screwdriver, +seat belt, seatbelt, +sewing machine, +shield, buckler, +shoe shop, shoe-shop, shoe store, +shoji, +shopping basket, +shopping cart, +shovel, +shower cap, +shower curtain, +ski, +ski mask, +sleeping bag, +slide rule, slipstick, +sliding door, +slot, one-armed bandit, +snorkel, +snowmobile, +snowplow, snowplough, +soap dispenser, +soccer ball, +sock, +solar dish, solar collector, solar furnace, +sombrero, +soup bowl, +space bar, +space heater, +space shuttle, +spatula, +speedboat, +"spider web, spiders web", +spindle, +sports car, sport car, +spotlight, spot, +stage, +steam locomotive, +steel arch bridge, +steel drum, +stethoscope, +stole, +stone wall, +stopwatch, stop watch, +stove, +strainer, +streetcar, tram, tramcar, trolley, trolley car, +stretcher, +studio couch, day bed, +stupa, tope, +submarine, pigboat, sub, U-boat, +suit, suit of clothes, +sundial, +sunglass, +sunglasses, dark glasses, shades, +sunscreen, sunblock, sun blocker, +suspension bridge, +swab, swob, mop, +sweatshirt, +swimming trunks, bathing trunks, +swing, +switch, electric switch, electrical switch, +syringe, +table lamp, +tank, army tank, armored combat vehicle, armoured combat vehicle, +tape player, +teapot, +teddy, teddy bear, +television, television system, +tennis ball, +thatch, thatched roof, +theater curtain, theatre curtain, +thimble, +thresher, thrasher, threshing machine, +throne, +tile roof, +toaster, +tobacco shop, tobacconist shop, tobacconist, +toilet seat, +torch, +totem pole, +tow truck, tow car, wrecker, +toyshop, +tractor, +trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi, +tray, +trench coat, +tricycle, trike, velocipede, +trimaran, +tripod, +triumphal arch, +trolleybus, trolley coach, trackless trolley, +trombone, +tub, vat, +turnstile, +typewriter keyboard, +umbrella, +unicycle, monocycle, +upright, upright piano, +vacuum, vacuum cleaner, +vase, +vault, +velvet, +vending machine, +vestment, +viaduct, +violin, fiddle, +volleyball, +waffle iron, +wall clock, +wallet, billfold, notecase, pocketbook, +wardrobe, closet, press, +warplane, military plane, +washbasin, handbasin, washbowl, lavabo, wash-hand basin, +washer, automatic washer, washing machine, +water bottle, +water jug, +water tower, +whiskey jug, +whistle, +wig, +window screen, +window shade, +Windsor tie, +wine bottle, +wing, +wok, +wooden spoon, +wool, woolen, woollen, +worm fence, snake fence, snake-rail fence, Virginia fence, +wreck, +yawl, +yurt, +web site, website, internet site, site, +comic book, +crossword puzzle, crossword, +street sign, +traffic light, traffic signal, stoplight, +book jacket, dust cover, dust jacket, dust wrapper, +menu, +plate, +guacamole, +consomme, +hot pot, hotpot, +trifle, +ice cream, icecream, +ice lolly, lolly, lollipop, popsicle, +French loaf, +bagel, beigel, +pretzel, +cheeseburger, +hotdog, hot dog, red hot, +mashed potato, +head cabbage, +broccoli, +cauliflower, +zucchini, courgette, +spaghetti squash, +acorn squash, +butternut squash, +cucumber, cuke, +artichoke, globe artichoke, +bell pepper, +cardoon, +mushroom, +Granny Smith, +strawberry, +orange, +lemon, +fig, +pineapple, ananas, +banana, +jackfruit, jak, jack, +custard apple, +pomegranate, +hay, +carbonara, +chocolate sauce, chocolate syrup, +dough, +meat loaf, meatloaf, +pizza, pizza pie, +potpie, +burrito, +red wine, +espresso, +cup, +eggnog, +alp, +bubble, +cliff, drop, drop-off, +coral reef, +geyser, +lakeside, lakeshore, +promontory, headland, head, foreland, +sandbar, sand bar, +seashore, coast, seacoast, sea-coast, +valley, vale, +volcano, +ballplayer, baseball player, +groom, bridegroom, +scuba diver, +rapeseed, +daisy, +"yellow ladys slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", +corn, +acorn, +hip, rose hip, rosehip, +buckeye, horse chestnut, conker, +coral fungus, +agaric, +gyromitra, +stinkhorn, carrion fungus, +earthstar, +hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa, +bolete, +ear, spike, capitulum, +toilet tissue, toilet paper, bathroom tissue diff --git a/python/examples/pipeline/imagenet/pipeline_rpc_client.py b/python/examples/pipeline/imagenet/pipeline_rpc_client.py new file mode 100644 index 0000000000000000000000000000000000000000..3220e6c20b27c92a59cd0c28050719a8790d648d --- /dev/null +++ b/python/examples/pipeline/imagenet/pipeline_rpc_client.py @@ -0,0 +1,36 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddle_serving_server_gpu.pipeline import PipelineClient +import numpy as np +import requests +import json +import cv2 +import base64 +import os + +client = PipelineClient() +client.connect(['127.0.0.1:9999']) + + +def cv2_to_base64(image): + return base64.b64encode(image).decode('utf8') + + +with open("daisy.jpg", 'rb') as file: + image_data = file.read() +image = cv2_to_base64(image_data) + +for i in range(1): + ret = client.predict(feed_dict={"image": image}, fetch=["label", "prob"]) + print(ret) diff --git a/python/examples/pipeline/imagenet/resnet50_web_service.py b/python/examples/pipeline/imagenet/resnet50_web_service.py new file mode 100644 index 0000000000000000000000000000000000000000..ece3befee8d62c9af2e0e0a1c576a63e42d86245 --- /dev/null +++ b/python/examples/pipeline/imagenet/resnet50_web_service.py @@ -0,0 +1,71 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +from paddle_serving_app.reader import Sequential, URL2Image, Resize, CenterCrop, RGB2BGR, Transpose, Div, Normalize, Base64ToImage +try: + from paddle_serving_server_gpu.web_service import WebService, Op +except ImportError: + from paddle_serving_server.web_service import WebService, Op +import logging +import numpy as np +import base64, cv2 + + +class ImagenetOp(Op): + def init_op(self): + self.seq = Sequential([ + Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), + Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], + True) + ]) + self.label_dict = {} + label_idx = 0 + with open("imagenet.label") as fin: + for line in fin: + self.label_dict[label_idx] = line.strip() + label_idx += 1 + + def preprocess(self, input_dicts, data_id, log_id): + (_, input_dict), = input_dicts.items() + data = base64.b64decode(input_dict["image"].encode('utf8')) + data = np.fromstring(data, np.uint8) + # Note: class variables(self.var) can only be used in process op mode + im = cv2.imdecode(data, cv2.IMREAD_COLOR) + img = self.seq(im) + return {"image": img[np.newaxis, :].copy()}, False, None, "" + + def postprocess(self, input_dicts, fetch_dict, log_id): + print(fetch_dict) + score_list = fetch_dict["score"] + result = {"label": [], "prob": []} + for score in score_list: + score = score.tolist() + max_score = max(score) + result["label"].append(self.label_dict[score.index(max_score)] + .strip().replace(",", "")) + result["prob"].append(max_score) + result["label"] = str(result["label"]) + result["prob"] = str(result["prob"]) + return result, None, "" + + +class ImageService(WebService): + def get_pipeline_response(self, read_op): + image_op = ImagenetOp(name="imagenet", input_ops=[read_op]) + return image_op + + +uci_service = ImageService(name="imagenet") +uci_service.prepare_pipeline_config("config.yml") +uci_service.run_service() diff --git a/python/examples/pipeline/imdb_model_ensemble/README_CN.md b/python/examples/pipeline/imdb_model_ensemble/README_CN.md index 88eeab70c470268775ad22fd65a6d1b999a6b167..fd4785292c3bfa731f76666b7d4e12e4e285fbda 100644 --- a/python/examples/pipeline/imdb_model_ensemble/README_CN.md +++ b/python/examples/pipeline/imdb_model_ensemble/README_CN.md @@ -8,8 +8,8 @@ sh get_data.sh ## 启动服务 ``` -python -m paddle_serving_server_gpu.serve --model imdb_cnn_model --port 9292 &> cnn.log & -python -m paddle_serving_server_gpu.serve --model imdb_bow_model --port 9393 &> bow.log & +python -m paddle_serving_server.serve --model imdb_cnn_model --port 9292 &> cnn.log & +python -m paddle_serving_server.serve --model imdb_bow_model --port 9393 &> bow.log & python test_pipeline_server.py &>pipeline.log & ``` @@ -17,8 +17,3 @@ python test_pipeline_server.py &>pipeline.log & ``` python test_pipeline_client.py ``` - -## HTTP 测试 -``` -curl -X POST -k http://localhost:9999/prediction -d '{"key": ["words"], "value": ["i am very sad | 0"]}' -``` diff --git a/python/examples/pipeline/imdb_model_ensemble/config.yml b/python/examples/pipeline/imdb_model_ensemble/config.yml index 3447ffd449de59ea76450e95c7f355413d1a12ac..2f25fa861f3ec50d15d5d5795e5e25dbf801e861 100644 --- a/python/examples/pipeline/imdb_model_ensemble/config.yml +++ b/python/examples/pipeline/imdb_model_ensemble/config.yml @@ -1,11 +1,100 @@ -rpc_port: 18085 +#rpc端口, rpc_port和http_port不允许同时为空。当rpc_port为空且http_port不为空时,会自动将rpc_port设置为http_port+1 +rpc_port: 18070 + +#http端口, rpc_port和http_port不允许同时为空。当rpc_port可用且http_port为空时,不自动生成http_port +http_port: 18071 + +#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG +#当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num worker_num: 4 -build_dag_each_worker: false -http_port: 9999 + +#build_dag_each_worker, False,框架在进程内创建一条DAG;True,框架会每个进程内创建多个独立的DAG +build_dag_each_worker: False + dag: - is_thread_op: false - client_type: brpc + #op资源类型, True, 为线程模型;False,为进程模型 + is_thread_op: True + + #重试次数 retry: 1 - use_profile: false + + #使用性能分析, True,生成Timeline性能数据,对性能有一定影响;False为不使用 + use_profile: False + + #channel的最大长度,默认为0 + channel_size: 0 + + #tracer, 跟踪框架吞吐,每个OP和channel的工作情况。无tracer时不生成数据 tracer: + #每次trace的时间间隔,单位秒/s interval_s: 10 +op: + bow: + #并发数,is_thread_op=True时,为线程并发;否则为进程并发 + concurrency: 1 + + #client连接类型,brpc + client_type: brpc + + #Serving交互重试次数,默认不重试 + retry: 1 + + #Serving交互超时时间, 单位ms + timeout: 3000 + + #Serving IPs + server_endpoints: ["127.0.0.1:9393"] + + #bow模型client端配置 + client_config: "imdb_bow_client_conf/serving_client_conf.prototxt" + + #Fetch结果列表,以client_config中fetch_var的alias_name为准 + fetch_list: ["prediction"] + + #批量查询Serving的数量, 默认1。batch_size>1要设置auto_batching_timeout,否则不足batch_size时会阻塞 + batch_size: 1 + + #批量查询超时,与batch_size配合使用 + auto_batching_timeout: 2000 + cnn: + #并发数,is_thread_op=True时,为线程并发;否则为进程并发 + concurrency: 1 + + #client连接类型,brpc + client_type: brpc + + #Serving交互重试次数,默认不重试 + retry: 1 + + #超时时间, 单位ms + timeout: 3000 + + #Serving IPs + server_endpoints: ["127.0.0.1:9292"] + + #cnn模型client端配置 + client_config: "imdb_cnn_client_conf/serving_client_conf.prototxt" + + #Fetch结果列表,以client_config中fetch_var的alias_name为准 + fetch_list: ["prediction"] + + #批量查询Serving的数量, 默认1。batch_size>1要设置auto_batching_timeout,否则不足batch_size时会阻塞 + batch_size: 1 + + #批量查询超时,与batch_size配合使用 + auto_batching_timeout: 2000 + combine: + #并发数,is_thread_op=True时,为线程并发;否则为进程并发 + concurrency: 1 + + #Serving交互重试次数,默认不重试 + retry: 1 + + #超时时间, 单位ms + timeout: 3000 + + #批量查询Serving的数量, 默认1。batch_size>1要设置auto_batching_timeout,否则不足batch_size时会阻塞 + batch_size: 1 + + #批量查询超时,与batch_size配合使用 + auto_batching_timeout: 2000 diff --git a/python/examples/pipeline/imdb_model_ensemble/test_pipeline_client.py b/python/examples/pipeline/imdb_model_ensemble/test_pipeline_client.py index 765ab7fd5a02a4af59b0773135bc59c802464b42..1737f8f782a25025547f68be6619c237975f5172 100644 --- a/python/examples/pipeline/imdb_model_ensemble/test_pipeline_client.py +++ b/python/examples/pipeline/imdb_model_ensemble/test_pipeline_client.py @@ -15,21 +15,22 @@ from paddle_serving_server.pipeline import PipelineClient import numpy as np client = PipelineClient() -client.connect(['127.0.0.1:18080']) +client.connect(['127.0.0.1:18070']) words = 'i am very sad | 0' futures = [] -for i in range(4): +for i in range(100): futures.append( client.predict( - feed_dict={"words": words}, + feed_dict={"words": words, + "logid": 10000 + i}, fetch=["prediction"], asyn=True, profile=False)) for f in futures: res = f.result() - if res["ecode"] != 0: + if res.err_no != 0: print("predict failed: {}".format(res)) print(res) diff --git a/python/examples/pipeline/imdb_model_ensemble/test_pipeline_server.py b/python/examples/pipeline/imdb_model_ensemble/test_pipeline_server.py index 92a15379c0b6ae1ad0cdc1401a01556e41c7eed7..35171a3910baf0af3ac6c83e521744906f49c948 100644 --- a/python/examples/pipeline/imdb_model_ensemble/test_pipeline_server.py +++ b/python/examples/pipeline/imdb_model_ensemble/test_pipeline_server.py @@ -15,10 +15,14 @@ from paddle_serving_server.pipeline import Op, RequestOp, ResponseOp from paddle_serving_server.pipeline import PipelineServer from paddle_serving_server.pipeline.proto import pipeline_service_pb2 -from paddle_serving_server.pipeline.channel import ChannelDataEcode +from paddle_serving_server.pipeline.channel import ChannelDataErrcode import numpy as np -from paddle_serving_app.reader import IMDBDataset +from paddle_serving_app.reader.imdb_reader import IMDBDataset import logging +try: + from paddle_serving_server.web_service import WebService +except ImportError: + from paddle_serving_server_gpu.web_service import WebService _LOGGER = logging.getLogger() user_handler = logging.StreamHandler() @@ -41,74 +45,68 @@ class ImdbRequestOp(RequestOp): continue words = request.value[idx] word_ids, _ = self.imdb_dataset.get_words_and_label(words) - dictdata[key] = np.array(word_ids) - return dictdata + word_len = len(word_ids) + dictdata[key] = np.array(word_ids).reshape(word_len, 1) + dictdata["{}.lod".format(key)] = np.array([0, word_len]) + + log_id = None + if request.logid is not None: + log_id = request.logid + return dictdata, log_id, None, "" class CombineOp(Op): - def preprocess(self, input_data): + def preprocess(self, input_data, data_id, log_id): + #_LOGGER.info("Enter CombineOp::preprocess") combined_prediction = 0 for op_name, data in input_data.items(): _LOGGER.info("{}: {}".format(op_name, data["prediction"])) combined_prediction += data["prediction"] data = {"prediction": combined_prediction / 2} - return data + return data, False, None, "" class ImdbResponseOp(ResponseOp): # Here ImdbResponseOp is consistent with the default ResponseOp implementation def pack_response_package(self, channeldata): resp = pipeline_service_pb2.Response() - resp.ecode = channeldata.ecode - if resp.ecode == ChannelDataEcode.OK.value: + resp.err_no = channeldata.error_code + if resp.err_no == ChannelDataErrcode.OK.value: feed = channeldata.parse() # ndarray to string for name, var in feed.items(): resp.value.append(var.__repr__()) resp.key.append(name) else: - resp.error_info = channeldata.error_info + resp.err_msg = channeldata.error_info return resp read_op = ImdbRequestOp() -bow_op = Op(name="bow", - input_ops=[read_op], - server_endpoints=["127.0.0.1:9393"], - fetch_list=["prediction"], - client_config="imdb_bow_client_conf/serving_client_conf.prototxt", - concurrency=1, - timeout=-1, - retry=1, - batch_size=3, - auto_batching_timeout=1000) -cnn_op = Op(name="cnn", - input_ops=[read_op], - server_endpoints=["127.0.0.1:9292"], - fetch_list=["prediction"], - client_config="imdb_cnn_client_conf/serving_client_conf.prototxt", - concurrency=1, - timeout=-1, - retry=1, - batch_size=1, - auto_batching_timeout=None) -combine_op = CombineOp( - name="combine", - input_ops=[bow_op, cnn_op], - concurrency=1, - timeout=-1, - retry=1, - batch_size=2, - auto_batching_timeout=None) + + +class BowOp(Op): + def init_op(self): + pass + + +class CnnOp(Op): + def init_op(self): + pass + + +bow_op = BowOp("bow", input_ops=[read_op]) +cnn_op = CnnOp("cnn", input_ops=[read_op]) +combine_op = CombineOp("combine", input_ops=[bow_op, cnn_op]) # fetch output of bow_op -# response_op = ImdbResponseOp(input_ops=[bow_op]) +#response_op = ImdbResponseOp(input_ops=[bow_op]) # fetch output of combine_op response_op = ImdbResponseOp(input_ops=[combine_op]) # use default ResponseOp implementation -# response_op = ResponseOp(input_ops=[combine_op]) +#response_op = ResponseOp(input_ops=[combine_op]) server = PipelineServer() server.set_response_op(response_op) diff --git a/python/examples/pipeline/ocr/README.md b/python/examples/pipeline/ocr/README.md index f51789fc5e419d715141ba59dc49011d4f306e56..de7bcaa2ece7f9fa7ba56de533e8e4dd023ad1f3 100644 --- a/python/examples/pipeline/ocr/README.md +++ b/python/examples/pipeline/ocr/README.md @@ -28,31 +28,9 @@ python web_service.py &>log.txt & python pipeline_http_client.py ``` - -