diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index cf8c84602c008d1be089d7c178a631a718fb1317..47024faa608c2c1a527bc58399ac7258f746e6e4 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -25,4 +25,5 @@ endif() if (NOT CLIENT_ONLY) add_subdirectory(predictor) +add_subdirectory(general-server) endif() diff --git a/core/configure/CMakeLists.txt b/core/configure/CMakeLists.txt index fa128e8239c55eda9343a5841221645cf4ed8f33..e00dc18f85bf5b78f1e0e2157c8963215fb73eb2 100644 --- a/core/configure/CMakeLists.txt +++ b/core/configure/CMakeLists.txt @@ -29,6 +29,10 @@ FILE(GLOB inc ${CMAKE_CURRENT_BINARY_DIR}/*.pb.h) install(FILES ${inc} DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/include/configure) +py_proto_compile(general_model_config_py_proto SRCS proto/general_model_config.proto) +add_custom_target(general_model_config_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) +add_dependencies(general_model_config_py_proto general_model_config_py_proto_init) + if (CLIENT_ONLY) py_proto_compile(sdk_configure_py_proto SRCS proto/sdk_configure.proto) add_custom_target(sdk_configure_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) @@ -38,6 +42,13 @@ add_custom_command(TARGET sdk_configure_py_proto POST_BUILD COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/proto COMMENT "Copy generated python proto into directory paddle_serving_client/proto." WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + +add_custom_command(TARGET general_model_config_py_proto POST_BUILD + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/proto + COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/proto + COMMENT "Copy generated general_model_config proto file into directory paddle_serving_client/proto." + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + endif() if (NOT CLIENT_ONLY) @@ -49,4 +60,12 @@ add_custom_command(TARGET server_config_py_proto POST_BUILD COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto COMMENT "Copy generated python proto into directory paddle_serving_server/proto." WORKING_DIRECTORY ${CMAKE_CURRENT_BINRARY_DIR}) + +add_custom_command(TARGET general_model_config_py_proto POST_BUILD + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto + COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto + COMMENT "Copy generated general_model_config proto file into directory paddle_serving_server/proto." + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + endif() + diff --git a/core/configure/include/configure_parser.h b/core/configure/include/configure_parser.h index fc9f786a192e40e14fd6c79206ccc5edf13809d5..c69b86050f4024700e79fe88d9a1ca4f64978c5b 100644 --- a/core/configure/include/configure_parser.h +++ b/core/configure/include/configure_parser.h @@ -19,13 +19,17 @@ namespace baidu { namespace paddle_serving { namespace configure { -int read_proto_conf(const std::string &conf_path, - const std::string &conf_file, - google::protobuf::Message *conf); -int write_proto_conf(google::protobuf::Message *message, - const std::string &output_path, - const std::string &output_file); + int read_proto_conf(const std::string &conf_full_path, + google::protobuf::Message *conf); + + int read_proto_conf(const std::string &conf_path, + const std::string &conf_file, + google::protobuf::Message *conf); + + int write_proto_conf(google::protobuf::Message *message, + const std::string &output_path, + const std::string &output_file); } // namespace configure } // namespace paddle_serving diff --git a/core/configure/proto/general_model_config.proto b/core/configure/proto/general_model_config.proto index 22f31047f11f8642ce53261fc2ac720448918485..29753cbc798154bcbb3c3ca3e0cb7935d772c7f3 100644 --- a/core/configure/proto/general_model_config.proto +++ b/core/configure/proto/general_model_config.proto @@ -16,14 +16,16 @@ syntax = "proto2"; package baidu.paddle_serving.configure; message FeedVar { - required string name = 1; - required bool is_lod_tensor = 2; - required int32 feed_type = 3; - repeated int32 shape = 4; + optional string name = 1; + optional string alias_name = 2; + optional bool is_lod_tensor = 3 [ default = false ]; + optional int32 feed_type = 4 [ default = 0 ]; + repeated int32 shape = 5; } message FetchVar { - required string name = 1; - repeated int32 shape = 2; + optional string name = 1; + optional string alias_name = 2; + repeated int32 shape = 3; } message GeneralModelConfig { repeated FeedVar feed_var = 1; diff --git a/core/configure/src/configure_parser.cpp b/core/configure/src/configure_parser.cpp index 2ce6fa9b51458edb57cfa3625eb8811b17fc3ae5..13a72df0c3bfd1e2e87dd81485546695eb8d7b87 100644 --- a/core/configure/src/configure_parser.cpp +++ b/core/configure/src/configure_parser.cpp @@ -31,6 +31,24 @@ namespace baidu { namespace paddle_serving { namespace configure { +int read_proto_conf(const std::string &conf_file_full_path, + google::protobuf::Message *conf) { + int fd = open(conf_file_full_path.c_str(), O_RDONLY); + if (fd == -1) { + LOG(WARNING) << "File not found: " << conf_file_full_path.c_str(); + return -1; + } + + google::protobuf::io::FileInputStream input(fd); + bool success = google::protobuf::TextFormat::Parse(&input, conf); + close(fd); + if (!success) { + return -1; + } + + return 0; +} + int read_proto_conf(const std::string &conf_path, const std::string &conf_file, google::protobuf::Message *conf) { diff --git a/core/general-client/include/general_model.h b/core/general-client/include/general_model.h index 3567fbdaef75adf6dbf759056c6b4c6d062d1ca9..04d988cea06c26332b7b7c03ed602284d19d449e 100644 --- a/core/general-client/include/general_model.h +++ b/core/general-client/include/general_model.h @@ -45,7 +45,7 @@ class PredictorClient { PredictorClient() {} ~PredictorClient() {} - void init(const std::string& client_conf); + int init(const std::string& client_conf); void set_predictor_conf(const std::string& conf_path, const std::string& conf_file); diff --git a/core/general-client/src/general_model.cpp b/core/general-client/src/general_model.cpp index a593117db992a76f9a223cc15a768c92601dc879..e874712545b265a0546063b834bcf7554e7dcd1a 100644 --- a/core/general-client/src/general_model.cpp +++ b/core/general-client/src/general_model.cpp @@ -27,45 +27,42 @@ using baidu::paddle_serving::predictor::general_model::FetchInst; namespace baidu { namespace paddle_serving { namespace general_model { +using configure::GeneralModelConfig; -void PredictorClient::init(const std::string &conf_file) { - _conf_file = conf_file; - std::ifstream fin(conf_file); - if (!fin) { - LOG(ERROR) << "Your inference conf file can not be found"; - exit(-1); - } - _feed_name_to_idx.clear(); - _fetch_name_to_idx.clear(); - _shape.clear(); - int feed_var_num = 0; - int fetch_var_num = 0; - fin >> feed_var_num >> fetch_var_num; - std::string name; - std::string fetch_var_name; - int shape_num = 0; - int dim = 0; - int type_value = 0; - for (int i = 0; i < feed_var_num; ++i) { - fin >> name; - _feed_name_to_idx[name] = i; - fin >> shape_num; - std::vector tmp_feed_shape; - for (int j = 0; j < shape_num; ++j) { - fin >> dim; - tmp_feed_shape.push_back(dim); +int PredictorClient::init(const std::string &conf_file) { + try { + GeneralModelConfig model_config; + if (configure::read_proto_conf(conf_file.c_str(), + &model_config) != 0) { + LOG(ERROR) << "Failed to load general model config" + << ", file path: " << conf_file; + return -1; + } + _feed_name_to_idx.clear(); + _fetch_name_to_idx.clear(); + _shape.clear(); + int feed_var_num = model_config.feed_var_size(); + int fetch_var_num = model_config.fetch_var_size(); + for (int i = 0; i < feed_var_num; ++i) { + _feed_name_to_idx[model_config.feed_var(i).alias_name()] = i; + std::vector tmp_feed_shape; + for (int j = 0; j < model_config.feed_var(i).shape_size(); ++j) { + tmp_feed_shape.push_back(model_config.feed_var(i).shape(j)); + } + _type.push_back(model_config.feed_var(i).feed_type()); + _shape.push_back(tmp_feed_shape); } - fin >> type_value; - _type.push_back(type_value); - _shape.push_back(tmp_feed_shape); - } - for (int i = 0; i < fetch_var_num; ++i) { - fin >> name; - fin >> fetch_var_name; - _fetch_name_to_idx[name] = i; - _fetch_name_to_var_name[name] = fetch_var_name; + for (int i = 0; i < fetch_var_num; ++i) { + _fetch_name_to_idx[model_config.fetch_var(i).alias_name()] = i; + _fetch_name_to_var_name[model_config.fetch_var(i).alias_name()] = + model_config.fetch_var(i).name(); + } + } catch (std::exception& e) { + LOG(ERROR) << "Failed load general model config" << e.what(); + return -1; } + return 0; } void PredictorClient::set_predictor_conf(const std::string &conf_path, diff --git a/core/general-client/src/pybind_general_model.cpp b/core/general-client/src/pybind_general_model.cpp index caa88acbcdc514bdcf94fbea2ee9458105d7bbd7..8aae514ec3bed6d93735f94af94697c052938162 100644 --- a/core/general-client/src/pybind_general_model.cpp +++ b/core/general-client/src/pybind_general_model.cpp @@ -33,7 +33,7 @@ PYBIND11_MODULE(serving_client, m) { .def(py::init()) .def("init", [](PredictorClient &self, const std::string &conf) { - self.init(conf); + return self.init(conf); }) .def("set_predictor_conf", [](PredictorClient &self, diff --git a/core/general-server/CMakeLists.txt b/core/general-server/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..29527e8d9041e6270837fd6da7ec8b56d6ef821f --- /dev/null +++ b/core/general-server/CMakeLists.txt @@ -0,0 +1,56 @@ +include_directories(SYSTEM ${CMAKE_CURRENT_LIST_DIR}/../kvdb/include) +include(op/CMakeLists.txt) +include(proto/CMakeLists.txt) +add_executable(serving ${serving_srcs}) +add_dependencies(serving pdcodegen fluid_cpu_engine pdserving paddle_fluid + opencv_imgcodecs cube-api) +if (WITH_GPU) + add_dependencies(serving fluid_gpu_engine) +endif() +target_include_directories(serving PUBLIC + ${CMAKE_CURRENT_BINARY_DIR}/../../core/predictor + ) + +if(WITH_GPU) + target_link_libraries(serving -Wl,--whole-archive fluid_gpu_engine + -Wl,--no-whole-archive) +endif() + +target_link_libraries(serving -Wl,--whole-archive fluid_cpu_engine + -Wl,--no-whole-archive) + +target_link_libraries(serving paddle_fluid ${paddle_depend_libs}) + +target_link_libraries(serving pdserving) +target_link_libraries(serving cube-api) + +target_link_libraries(serving kvdb rocksdb) + +if(WITH_GPU) + target_link_libraries(serving ${CUDA_LIBRARIES}) +endif() + +if(WITH_MKL) + target_link_libraries(serving -liomp5 -lmklml_intel -lmkldnn -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2) +else() + target_link_libraries(serving openblas -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2) +endif() + +install(TARGETS serving + RUNTIME DESTINATION + ${PADDLE_SERVING_INSTALL_DIR}/demo/serving/bin) +install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/conf DESTINATION + ${PADDLE_SERVING_INSTALL_DIR}/demo/serving/) + +FILE(GLOB inc ${CMAKE_CURRENT_BINARY_DIR}/*.pb.h) +install(FILES ${inc} + DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/include/serving) + +if (${WITH_MKL}) +install(FILES + ${CMAKE_BINARY_DIR}/third_party/install/Paddle/third_party/install/mklml/lib/libmklml_intel.so + ${CMAKE_BINARY_DIR}/third_party/install/Paddle/third_party/install/mklml/lib/libiomp5.so + ${CMAKE_BINARY_DIR}/third_party/install/Paddle/third_party/install/mkldnn/lib/libmkldnn.so.0 + DESTINATION + ${PADDLE_SERVING_INSTALL_DIR}/demo/serving/bin) +endif() diff --git a/core/general-server/op/CMakeLists.txt b/core/general-server/op/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..9287408e5e64fa284acbbdb18563703510114e87 --- /dev/null +++ b/core/general-server/op/CMakeLists.txt @@ -0,0 +1,2 @@ +FILE(GLOB op_srcs ${CMAKE_CURRENT_LIST_DIR}/*.cpp) +LIST(APPEND serving_srcs ${op_srcs}) diff --git a/core/general-server/op/general_infer_op.cpp b/core/general-server/op/general_infer_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..742d27ef4612b8b201f8b21b5058dbf7525c9a9d --- /dev/null +++ b/core/general-server/op/general_infer_op.cpp @@ -0,0 +1,118 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "examples/demo-serving/op/general_infer_op.h" +#include +#include +#include +#include +#include "core/predictor/framework/infer.h" +#include "core/predictor/framework/memory.h" +#include "core/predictor/framework/resource.h" +#include "examples/demo-serving/op/general_reader_op.h" + +namespace baidu { +namespace paddle_serving { +namespace serving { + +using baidu::paddle_serving::predictor::MempoolWrapper; +using baidu::paddle_serving::predictor::general_model::Tensor; +using baidu::paddle_serving::predictor::general_model::Response; +using baidu::paddle_serving::predictor::general_model::FetchInst; +using baidu::paddle_serving::predictor::InferManager; + +int GeneralInferOp::inference() { + const GeneralReaderOutput *reader_out = + get_depend_argument("general_reader_op"); + if (!reader_out) { + LOG(ERROR) << "Failed mutable depended argument, op:" + << "general_reader_op"; + return -1; + } + + int reader_status = reader_out->reader_status; + if (reader_status != 0) { + LOG(ERROR) << "Read request wrong."; + return -1; + } + + const TensorVector *in = &reader_out->tensor_vector; + TensorVector *out = butil::get_object(); + int batch_size = (*in)[0].shape[0]; + // infer + if (InferManager::instance().infer(GENERAL_MODEL_NAME, in, out, batch_size)) { + LOG(ERROR) << "Failed do infer in fluid model: " << GENERAL_MODEL_NAME; + return -1; + } + + Response *res = mutable_data(); + + for (int i = 0; i < batch_size; ++i) { + FetchInst *fetch_inst = res->add_insts(); + for (int j = 0; j < out->size(); ++j) { + Tensor *tensor = fetch_inst->add_tensor_array(); + tensor->set_elem_type(1); + if (out->at(j).lod.size() == 1) { + tensor->add_shape(-1); + } else { + for (int k = 1; k < out->at(j).shape.size(); ++k) { + tensor->add_shape(out->at(j).shape[k]); + } + } + } + } + + for (int i = 0; i < out->size(); ++i) { + float *data_ptr = static_cast(out->at(i).data.data()); + int cap = 1; + for (int j = 1; j < out->at(i).shape.size(); ++j) { + cap *= out->at(i).shape[j]; + } + if (out->at(i).lod.size() == 1) { + for (int j = 0; j < batch_size; ++j) { + for (int k = out->at(i).lod[0][j]; k < out->at(i).lod[0][j + 1]; k++) { + res->mutable_insts(j)->mutable_tensor_array(i)->add_data( + reinterpret_cast(&(data_ptr[k])), sizeof(float)); + } + } + } else { + for (int j = 0; j < batch_size; ++j) { + for (int k = j * cap; k < (j + 1) * cap; ++k) { + res->mutable_insts(j)->mutable_tensor_array(i)->add_data( + reinterpret_cast(&(data_ptr[k])), sizeof(float)); + } + } + } + } + /* + for (size_t i = 0; i < in->size(); ++i) { + (*in)[i].shape.clear(); + } + in->clear(); + butil::return_object(in); + + for (size_t i = 0; i < out->size(); ++i) { + (*out)[i].shape.clear(); + } + out->clear(); + butil::return_object(out); + } + */ + return 0; +} +DEFINE_OP(GeneralInferOp); + +} // namespace serving +} // namespace paddle_serving +} // namespace baidu diff --git a/core/general-server/op/general_infer_op.h b/core/general-server/op/general_infer_op.h new file mode 100644 index 0000000000000000000000000000000000000000..ca839054e0f11b40fd5f461307f3121d338028f8 --- /dev/null +++ b/core/general-server/op/general_infer_op.h @@ -0,0 +1,47 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#ifdef BCLOUD +#ifdef WITH_GPU +#include "paddle/paddle_inference_api.h" +#else +#include "paddle/fluid/inference/api/paddle_inference_api.h" +#endif +#else +#include "paddle_inference_api.h" // NOLINT +#endif +#include "examples/demo-serving/general_model_service.pb.h" + +namespace baidu { +namespace paddle_serving { +namespace serving { + +static const char* GENERAL_MODEL_NAME = "general_model"; + +class GeneralInferOp + : public baidu::paddle_serving::predictor::OpWithChannel< + baidu::paddle_serving::predictor::general_model::Response> { + public: + typedef std::vector TensorVector; + + DECLARE_OP(GeneralInferOp); + + int inference(); +}; + +} // namespace serving +} // namespace paddle_serving +} // namespace baidu diff --git a/core/general-server/op/general_reader_op.cpp b/core/general-server/op/general_reader_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b692ba9796dc47f9710f7db96372636f7b42140a --- /dev/null +++ b/core/general-server/op/general_reader_op.cpp @@ -0,0 +1,217 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "examples/demo-serving/op/general_reader_op.h" +#include +#include +#include +#include +#include "core/predictor/framework/infer.h" +#include "core/predictor/framework/memory.h" + +namespace baidu { +namespace paddle_serving { +namespace serving { + +using baidu::paddle_serving::predictor::MempoolWrapper; +using baidu::paddle_serving::predictor::general_model::Tensor; +using baidu::paddle_serving::predictor::general_model::Request; +using baidu::paddle_serving::predictor::general_model::FeedInst; +using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; + +int conf_check(const Request *req, + const std::shared_ptr &model_config) { + int var_num = req->insts(0).tensor_array_size(); + if (var_num != model_config->_feed_type.size()) { + VLOG(2) << "var num: " << var_num; + VLOG(2) << "model config var num: " << model_config->_feed_type.size(); + LOG(ERROR) << "feed var number not match."; + return -1; + } + for (int i = 0; i < var_num; ++i) { + if (model_config->_feed_type[i] != + req->insts(0).tensor_array(i).elem_type()) { + LOG(ERROR) << "feed type not match."; + return -1; + } + if (model_config->_feed_shape[i].size() == + req->insts(0).tensor_array(i).shape_size()) { + for (int j = 0; j < model_config->_feed_shape[i].size(); ++j) { + req->insts(0).tensor_array(i).shape(j); + if (model_config->_feed_shape[i][j] != + req->insts(0).tensor_array(i).shape(j)) { + LOG(ERROR) << "feed shape not match."; + return -1; + } + } + } else { + LOG(ERROR) << "feed shape not match."; + return -1; + } + } + return 0; +} + +int GeneralReaderOp::inference() { + // reade request from client + const Request *req = dynamic_cast(get_request_message()); + + int batch_size = req->insts_size(); + int input_var_num = 0; + + std::vector elem_type; + std::vector elem_size; + std::vector capacity; + + GeneralReaderOutput *res = mutable_data(); + TensorVector *in = &res->tensor_vector; + + if (!res) { + LOG(ERROR) << "Failed get op tls reader object output"; + } + if (batch_size <= 0) { + res->reader_status = -1; + return 0; + } + + int var_num = req->insts(0).tensor_array_size(); + VLOG(2) << "var num: " << var_num; + // read config + + LOG(INFO) << "start to call load general model_conf op"; + baidu::paddle_serving::predictor::Resource &resource = + baidu::paddle_serving::predictor::Resource::instance(); + + LOG(INFO) << "get resource pointer done."; + std::shared_ptr model_config = + resource.get_general_model_config(); + + LOG(INFO) << "print general model config done."; + + // check + res->reader_status = conf_check(req, model_config); + if (res->reader_status != 0) { + LOG(INFO) << "model conf of server:"; + resource.print_general_model_config(model_config); + return 0; + } + // package tensor + + elem_type.resize(var_num); + elem_size.resize(var_num); + capacity.resize(var_num); + paddle::PaddleTensor lod_tensor; + for (int i = 0; i < var_num; ++i) { + elem_type[i] = req->insts(0).tensor_array(i).elem_type(); + VLOG(2) << "var[" << i << "] has elem type: " << elem_type[i]; + if (elem_type[i] == 0) { // int64 + elem_size[i] = sizeof(int64_t); + lod_tensor.dtype = paddle::PaddleDType::INT64; + } else { + elem_size[i] = sizeof(float); + lod_tensor.dtype = paddle::PaddleDType::FLOAT32; + } + + if (req->insts(0).tensor_array(i).shape(0) == -1) { + lod_tensor.lod.resize(1); + lod_tensor.lod[0].push_back(0); + VLOG(2) << "var[" << i << "] is lod_tensor"; + } else { + lod_tensor.shape.push_back(batch_size); + capacity[i] = 1; + for (int k = 0; k < req->insts(0).tensor_array(i).shape_size(); ++k) { + int dim = req->insts(0).tensor_array(i).shape(k); + VLOG(2) << "shape for var[" << i << "]: " << dim; + capacity[i] *= dim; + lod_tensor.shape.push_back(dim); + } + VLOG(2) << "var[" << i << "] is tensor, capacity: " << capacity[i]; + } + if (i == 0) { + lod_tensor.name = "words"; + } else { + lod_tensor.name = "label"; + } + in->push_back(lod_tensor); + } + + for (int i = 0; i < var_num; ++i) { + if (in->at(i).lod.size() == 1) { + for (int j = 0; j < batch_size; ++j) { + const Tensor &tensor = req->insts(j).tensor_array(i); + int data_len = tensor.data_size(); + VLOG(2) << "tensor size for var[" << i << "]: " << tensor.data_size(); + int cur_len = in->at(i).lod[0].back(); + VLOG(2) << "current len: " << cur_len; + in->at(i).lod[0].push_back(cur_len + data_len); + VLOG(2) << "new len: " << cur_len + data_len; + } + in->at(i).data.Resize(in->at(i).lod[0].back() * elem_size[i]); + in->at(i).shape = {in->at(i).lod[0].back(), 1}; + VLOG(2) << "var[" << i + << "] is lod_tensor and len=" << in->at(i).lod[0].back(); + } else { + in->at(i).data.Resize(batch_size * capacity[i] * elem_size[i]); + VLOG(2) << "var[" << i + << "] is tensor and capacity=" << batch_size * capacity[i]; + } + } + + for (int i = 0; i < var_num; ++i) { + if (elem_type[i] == 0) { + int64_t *dst_ptr = static_cast(in->at(i).data.data()); + int offset = 0; + for (int j = 0; j < batch_size; ++j) { + for (int k = 0; k < req->insts(j).tensor_array(i).data_size(); ++k) { + dst_ptr[offset + k] = + *(const int64_t *)req->insts(j).tensor_array(i).data(k).c_str(); + } + if (in->at(i).lod.size() == 1) { + offset = in->at(i).lod[0][j + 1]; + } else { + offset += capacity[i]; + } + } + } else { + float *dst_ptr = static_cast(in->at(i).data.data()); + int offset = 0; + for (int j = 0; j < batch_size; ++j) { + for (int k = 0; k < req->insts(j).tensor_array(i).data_size(); ++k) { + dst_ptr[offset + k] = + *(const float *)req->insts(j).tensor_array(i).data(k).c_str(); + } + if (in->at(i).lod.size() == 1) { + offset = in->at(i).lod[0][j + 1]; + } else { + offset += capacity[i]; + } + } + } + } + + VLOG(2) << "read data from client success"; + // print request + std::ostringstream oss; + int64_t *example = reinterpret_cast((*in)[0].data.data()); + for (int i = 0; i < 10; i++) { + oss << *(example + i) << " "; + } + VLOG(2) << "head element of first feed var : " << oss.str(); + // + return 0; +} +DEFINE_OP(GeneralReaderOp); +} // namespace serving +} // namespace paddle_serving +} // namespace baidu diff --git a/core/general-server/op/general_reader_op.h b/core/general-server/op/general_reader_op.h new file mode 100644 index 0000000000000000000000000000000000000000..ce68dcaee53d68d707defeeeacd5dee2981120d0 --- /dev/null +++ b/core/general-server/op/general_reader_op.h @@ -0,0 +1,61 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#ifdef BCLOUD +#ifdef WITH_GPU +#include "paddle/paddle_inference_api.h" +#else +#include "paddle/fluid/inference/api/paddle_inference_api.h" +#endif +#else +#include "paddle_inference_api.h" // NOLINT +#endif +#include +#include "core/predictor/framework/resource.h" +#include "examples/demo-serving/general_model_service.pb.h" +#include "examples/demo-serving/load_general_model_service.pb.h" + +namespace baidu { +namespace paddle_serving { +namespace serving { + +struct GeneralReaderOutput { + std::vector tensor_vector; + int reader_status = 0; + + void Clear() { + size_t tensor_count = tensor_vector.size(); + for (size_t ti = 0; ti < tensor_count; ++ti) { + tensor_vector[ti].shape.clear(); + } + tensor_vector.clear(); + } + std::string ShortDebugString() const { return "Not implemented!"; } +}; + +class GeneralReaderOp : public baidu::paddle_serving::predictor::OpWithChannel< + GeneralReaderOutput> { + public: + typedef std::vector TensorVector; + + DECLARE_OP(GeneralReaderOp); + + int inference(); +}; + +} // namespace serving +} // namespace paddle_serving +} // namespace baidu diff --git a/core/general-server/proto/CMakeLists.txt b/core/general-server/proto/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..480ffd0e93b22f72484338459d8f08302ff01b75 --- /dev/null +++ b/core/general-server/proto/CMakeLists.txt @@ -0,0 +1,7 @@ +LIST(APPEND protofiles + ${CMAKE_CURRENT_LIST_DIR}/load_general_model_service.proto + ${CMAKE_CURRENT_LIST_DIR}/general_model_service.proto +) + +PROTOBUF_GENERATE_SERVING_CPP(TRUE PROTO_SRCS PROTO_HDRS ${protofiles}) +LIST(APPEND serving_srcs ${PROTO_SRCS}) diff --git a/core/general-server/proto/general_model_service.proto b/core/general-server/proto/general_model_service.proto new file mode 100644 index 0000000000000000000000000000000000000000..1b9bfe380134eb494dbc104e0d241ffbf3f98c58 --- /dev/null +++ b/core/general-server/proto/general_model_service.proto @@ -0,0 +1,48 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; +import "pds_option.proto"; +import "builtin_format.proto"; +package baidu.paddle_serving.predictor.general_model; + +option cc_generic_services = true; + +message Tensor { + repeated bytes data = 1; + optional int32 elem_type = 2; + repeated int32 shape = 3; +}; + +message FeedInst { + repeated Tensor tensor_array = 1; +}; + +message FetchInst { + repeated Tensor tensor_array = 1; +}; + +message Request { + repeated FeedInst insts = 1; +}; + +message Response { + repeated FetchInst insts = 1; +}; + +service GeneralModelService { + rpc inference(Request) returns (Response); + rpc debug(Request) returns (Response); + option (pds.options).generate_impl = true; +}; diff --git a/core/general-server/proto/load_general_model_service.proto b/core/general-server/proto/load_general_model_service.proto new file mode 100644 index 0000000000000000000000000000000000000000..b8a86497f8c0b683f4e95f4517d83f576e79baad --- /dev/null +++ b/core/general-server/proto/load_general_model_service.proto @@ -0,0 +1,30 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; +import "pds_option.proto"; +package baidu.paddle_serving.predictor.load_general_model_service; + +option cc_generic_services = true; + +message RequestAndResponse { + required int32 a = 1; + required float b = 2; +}; + +service LoadGeneralModelService { + rpc inference(RequestAndResponse) returns (RequestAndResponse); + rpc debug(RequestAndResponse) returns (RequestAndResponse); + option (pds.options).generate_impl = true; +}; diff --git a/core/predictor/common/constant.cpp b/core/predictor/common/constant.cpp index 331d339e94feae7109f450aa124a808b57720925..5fa1277de1a4b0d33d14a9c33d3cb4b280bc3b5c 100644 --- a/core/predictor/common/constant.cpp +++ b/core/predictor/common/constant.cpp @@ -38,12 +38,12 @@ DEFINE_int32( 0, "Number of pthreads that server runs on, not change if this value <= 0"); DEFINE_int32(reload_interval_s, 10, ""); -DEFINE_bool(enable_model_toolkit, false, "enable model toolkit"); +DEFINE_bool(enable_model_toolkit, true, "enable model toolkit"); DEFINE_string(enable_protocol_list, "baidu_std", "set protocol list"); DEFINE_bool(enable_cube, false, "enable cube"); DEFINE_string(general_model_path, "./conf", ""); DEFINE_string(general_model_file, "general_model.prototxt", ""); -DEFINE_bool(enable_general_model, false, "enable general model"); +DEFINE_bool(enable_general_model, true, "enable general model"); const char* START_OP_NAME = "startup_op"; } // namespace predictor diff --git a/core/predictor/framework/resource.cpp b/core/predictor/framework/resource.cpp index e24f3e88b9ad9bce4f625f8e99a34bb815d21f94..e6209c26f789de2a39236076a06336b91c76391f 100644 --- a/core/predictor/framework/resource.cpp +++ b/core/predictor/framework/resource.cpp @@ -155,8 +155,11 @@ int Resource::initialize(const std::string& path, const std::string& file) { // model config int Resource::general_model_initialize(const std::string& path, const std::string& file) { + VLOG(2) << "general model path: " << path; + VLOG(2) << "general model file: " << file; if (!FLAGS_enable_general_model) { - return 0; + LOG(ERROR) << "general model is not enabled"; + return -1; } ResourceConf resource_conf; if (configure::read_proto_conf(path, file, &resource_conf) != 0) { @@ -183,6 +186,8 @@ int Resource::general_model_initialize(const std::string& path, _config.reset(new PaddleGeneralModelConfig()); int feed_var_num = model_config.feed_var_size(); + VLOG(2) << "load general model config"; + VLOG(2) << "feed var num: " << feed_var_num; _config->_feed_name.resize(feed_var_num); _config->_feed_type.resize(feed_var_num); _config->_is_lod_feed.resize(feed_var_num); @@ -190,15 +195,23 @@ int Resource::general_model_initialize(const std::string& path, _config->_feed_shape.resize(feed_var_num); for (int i = 0; i < feed_var_num; ++i) { _config->_feed_name[i] = model_config.feed_var(i).name(); + VLOG(2) << "feed var[" << i << "]: " + << _config->_feed_name[i]; _config->_feed_type[i] = model_config.feed_var(i).feed_type(); + VLOG(2) << "feed type[" << i << "]: " + << _config->_feed_type[i]; + if (model_config.feed_var(i).is_lod_tensor()) { + VLOG(2) << "var[" << i << "] is lod tensor"; _config->_feed_shape[i] = {-1}; _config->_is_lod_feed[i] = true; } else { + VLOG(2) << "var[" << i << "] is tensor"; _config->_capacity[i] = 1; _config->_is_lod_feed[i] = false; for (int j = 0; j < model_config.feed_var(i).shape_size(); ++j) { int32_t dim = model_config.feed_var(i).shape(j); + VLOG(2) << "var[" << i << "].shape[" << i << "]: " << dim; _config->_feed_shape[i].push_back(dim); _config->_capacity[i] *= dim; } diff --git a/core/predictor/src/pdserving.cpp b/core/predictor/src/pdserving.cpp index 5547469500af8594e8b863a7abf847a9ab8993e3..e8a7591d0d353758a3cbfd32498a80226d17358d 100644 --- a/core/predictor/src/pdserving.cpp +++ b/core/predictor/src/pdserving.cpp @@ -126,10 +126,6 @@ int main(int argc, char** argv) { return 0; } - if (!FLAGS_g) { - google::SetCommandLineOption("flagfile", "conf/gflags.conf"); - } - google::ParseCommandLineFlags(&argc, &argv, true); g_change_server_port(); diff --git a/core/sdk-cpp/include/common.h b/core/sdk-cpp/include/common.h index 81088660df275039aebdd8b6f0877dd5de443681..f6a600cd4024c5b14c1aa774b5199d1ecc754c28 100644 --- a/core/sdk-cpp/include/common.h +++ b/core/sdk-cpp/include/common.h @@ -55,6 +55,7 @@ #include "core/configure/include/configure_parser.h" #include "core/configure/sdk_configure.pb.h" +#include "core/configure/general_model_config.pb.h" #include "core/sdk-cpp/include/utils.h"