From 50da611b9f9703b237eb7cb613cf38cafdbb0078 Mon Sep 17 00:00:00 2001 From: barrierye Date: Tue, 10 Mar 2020 11:37:00 +0800 Subject: [PATCH] add code style check --- .travis.yml | 3 +- core/configure/CMakeLists.txt | 1 - core/configure/include/configure_parser.h | 20 +- core/cube/cube-server/src/server.cpp | 2 +- .../general-client/src/general_model_main.cpp | 21 +- core/general-server/op/general_copy_op.cpp | 3 +- core/general-server/op/general_copy_op.h | 9 +- core/general-server/op/general_infer_op.h | 1 - core/general-server/op/general_reader_op.cpp | 6 +- core/general-server/op/general_reader_op.h | 9 +- .../general-server/op/general_response_op.cpp | 10 +- core/general-server/op/general_response_op.h | 1 - .../op/general_text_reader_op.cpp | 15 +- .../op/general_text_reader_op.h | 8 +- .../op/general_text_response_op.h | 1 - .../proto/general_model_service.proto | 8 +- core/kvdb/include/kvdb/kvdb_impl.h | 15 +- core/kvdb/include/kvdb/paddle_rocksdb.h | 3 +- core/kvdb/src/mock_param_dict_impl.cpp | 13 +- core/kvdb/src/paddle_rocksdb.cpp | 2 +- core/kvdb/src/rockskvdb_impl.cpp | 8 +- core/pdcodegen/src/pdcodegen.cpp | 12 +- core/predictor/common/inner_common.h | 2 +- core/predictor/framework/dag_view.cpp | 8 +- core/predictor/framework/factory.h | 2 +- core/predictor/framework/resource.cpp | 9 +- core/predictor/framework/resource.h | 18 +- core/predictor/src/pdserving.cpp | 7 +- core/predictor/unittest/test_message_op.h | 2 +- .../unittest/test_server_manager.cpp | 2 +- core/sdk-cpp/include/common.h | 2 +- core/sdk-cpp/include/config_manager.h | 4 +- core/sdk-cpp/include/factory.h | 2 +- core/sdk-cpp/include/predictor_sdk.h | 2 +- .../sdk-cpp/proto/general_model_service.proto | 8 +- core/sdk-cpp/src/endpoint.cpp | 3 +- core/sdk-cpp/src/predictor_sdk.cpp | 2 +- core/sdk-cpp/src/variant.cpp | 2 +- core/util/CMakeLists.txt | 1 - core/util/include/timer.h | 1 - core/util/src/CMakeLists.txt | 1 - doc/COMPILE.md | 1 - doc/CONTRIBUTE.md | 1 - doc/IMDB_GO_CLIENT.md | 3 - doc/NEW_OPERATOR.md | 3 - doc/SERVER_DAG.md | 7 - .../include/fluid_cpu_engine.h | 8 +- .../include/fluid_gpu_engine.h | 6 +- python/examples/bert/benchmark.py | 23 ++- python/examples/bert/bert_client.py | 12 +- python/examples/bert/bert_reader.py | 25 ++- python/examples/bert/bert_web_service.py | 7 +- python/examples/bert/prepare_model.py | 21 +- python/examples/bert/tokenization.py | 20 +- python/examples/criteo_ctr/README.md | 1 - python/examples/criteo_ctr/args.py | 189 ++++++++++-------- python/examples/criteo_ctr/criteo_reader.py | 35 +++- python/examples/criteo_ctr/local_train.py | 43 ++-- python/examples/criteo_ctr/network_conf.py | 60 ++++-- python/examples/criteo_ctr/test_client.py | 25 ++- python/examples/criteo_ctr/test_server.py | 15 ++ python/examples/fit_a_line/README.md | 2 +- python/examples/fit_a_line/benchmark.py | 21 +- python/examples/fit_a_line/local_train.py | 41 ++-- python/examples/fit_a_line/test_client.py | 22 +- python/examples/fit_a_line/test_server.py | 15 ++ python/examples/imdb/benchmark.py | 13 +- python/examples/imdb/imdb_reader.py | 12 +- python/examples/imdb/imdb_web_service_demo.sh | 1 - python/examples/imdb/local_train.py | 1 + python/examples/imdb/nets.py | 37 ++-- python/examples/imdb/test_client.py | 2 +- python/examples/imdb/test_client_batch.py | 1 + python/examples/imdb/text_classify_service.py | 8 +- python/examples/util/get_acc.py | 15 ++ python/paddle_serving_client/io/__init__.py | 28 ++- python/paddle_serving_client/metric/acc.py | 4 +- python/paddle_serving_client/metric/auc.py | 26 +-- .../paddle_serving_client/utils/__init__.py | 14 +- python/paddle_serving_server/serve.py | 25 ++- python/paddle_serving_server/web_serve.py | 28 ++- python/paddle_serving_server/web_service.py | 21 +- python/paddle_serving_server_gpu/__init__.py | 6 +- python/paddle_serving_server_gpu/serve.py | 15 +- .../paddle_serving_server_gpu/web_service.py | 35 ++-- python/setup.py.in | 1 - python/setup.py.server.in | 1 - tools/Dockerfile.ci | 27 ++- .../SOURCES.txt | 2 +- .../dependency_links.txt | 1 - .../demo-client/src/general_model.cpp | 63 +++--- .../demo-client/src/general_model.h | 40 ++-- .../demo-client/src/general_model_main.cpp | 23 ++- .../demo-client/src/load_general_model.cpp | 6 +- .../demo-client/src/pybind_general_model.cpp | 48 +++-- .../demo-serving/op/bert_service_op.h | 4 +- .../demo-serving/op/classify_op.cpp | 2 +- .../demo-serving/op/classify_op.h | 2 +- .../demo-serving/op/ctr_prediction_op.h | 2 +- .../demo-serving/op/general_model_op.h | 3 +- .../demo-serving/op/kvdb_echo_op.h | 2 +- .../op/load_general_model_conf_op.h | 14 +- .../cpp_examples/demo-serving/op/reader_op.h | 2 +- .../demo-serving/op/text_classification_op.h | 2 +- .../demo-serving/op/write_json_op.cpp | 2 +- .../demo-serving/op/write_json_op.h | 2 +- .../cpp_examples/demo-serving/op/write_op.cpp | 4 +- tools/cpp_examples/demo-serving/op/write_op.h | 2 +- .../proto/general_model_service.proto | 16 +- .../elastic-ctr/client/demo/elastic_ctr.py | 38 ++-- .../serving/op/elastic_ctr_prediction_op.cpp | 27 ++- .../serving/op/elastic_ctr_prediction_op.h | 4 +- tools/{serving-build.sh => serving_build.sh} | 0 tools/serving_check_style.sh | 38 ++++ 114 files changed, 913 insertions(+), 612 deletions(-) rename tools/{serving-build.sh => serving_build.sh} (100%) create mode 100644 tools/serving_check_style.sh diff --git a/.travis.yml b/.travis.yml index 57e7d79f..0be48fdf 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,4 +10,5 @@ services: before_install: - docker build -f ${DOCKERFILE_CPU} -t serving-img:${COMPILE_TYPE} . install: - - docker run -it -v $PWD:/Serving serving-img:${COMPILE_TYPE} /bin/bash Serving/tools/serving-build.sh $COMPILE_TYPE + - if [ $COMPILE_TYPE == "CPU" ]; then docker run -it -v $PWD:/Serving serving-img:${COMPILE_TYPE} /bin/bash Serving/tools/serving_check_style.sh ; fi; + - docker run -it -v $PWD:/Serving serving-img:${COMPILE_TYPE} /bin/bash Serving/tools/serving_build.sh $COMPILE_TYPE diff --git a/core/configure/CMakeLists.txt b/core/configure/CMakeLists.txt index e6d461d6..685eff01 100644 --- a/core/configure/CMakeLists.txt +++ b/core/configure/CMakeLists.txt @@ -87,4 +87,3 @@ add_custom_command(TARGET general_model_config_py_proto POST_BUILD WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() endif() - diff --git a/core/configure/include/configure_parser.h b/core/configure/include/configure_parser.h index c69b8605..3f237768 100644 --- a/core/configure/include/configure_parser.h +++ b/core/configure/include/configure_parser.h @@ -20,16 +20,16 @@ namespace baidu { namespace paddle_serving { namespace configure { - int read_proto_conf(const std::string &conf_full_path, - google::protobuf::Message *conf); - - int read_proto_conf(const std::string &conf_path, - const std::string &conf_file, - google::protobuf::Message *conf); - - int write_proto_conf(google::protobuf::Message *message, - const std::string &output_path, - const std::string &output_file); +int read_proto_conf(const std::string &conf_full_path, + google::protobuf::Message *conf); + +int read_proto_conf(const std::string &conf_path, + const std::string &conf_file, + google::protobuf::Message *conf); + +int write_proto_conf(google::protobuf::Message *message, + const std::string &output_path, + const std::string &output_file); } // namespace configure } // namespace paddle_serving diff --git a/core/cube/cube-server/src/server.cpp b/core/cube/cube-server/src/server.cpp index f431e4a3..fba542d8 100644 --- a/core/cube/cube-server/src/server.cpp +++ b/core/cube/cube-server/src/server.cpp @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "core/cube/cube-server/include/cube/server.h" #include #include "core/cube/cube-server/include/cube/framework.h" -#include "core/cube/cube-server/include/cube/server.h" namespace rec { namespace mcube { diff --git a/core/general-client/src/general_model_main.cpp b/core/general-client/src/general_model_main.cpp index d5090b1f..9fd6ac21 100644 --- a/core/general-client/src/general_model_main.cpp +++ b/core/general-client/src/general_model_main.cpp @@ -17,18 +17,18 @@ #include "core/general-client/include/general_model.h" -using namespace std; +using namespace std; // NOLINT using baidu::paddle_serving::general_model::PredictorClient; using baidu::paddle_serving::general_model::FetchedMap; -int main(int argc, char * argv[]) { - PredictorClient * client = new PredictorClient(); +int main(int argc, char* argv[]) { + PredictorClient* client = new PredictorClient(); client->init("inference.conf"); client->set_predictor_conf("./", "predictor.conf"); client->create_predictor(); - std::vector > float_feed; - std::vector > int_feed; + std::vector> float_feed; + std::vector> int_feed; std::vector float_feed_name; std::vector int_feed_name = {"words", "label"}; std::vector fetch_name = {"cost", "acc", "prediction"}; @@ -53,13 +53,14 @@ int main(int argc, char * argv[]) { cin >> label; int_feed.push_back({label}); - FetchedMap result; - client->predict( - float_feed, float_feed_name, - int_feed, int_feed_name, fetch_name, - &result); + client->predict(float_feed, + float_feed_name, + int_feed, + int_feed_name, + fetch_name, + &result); cout << label << "\t" << result["prediction"][1] << endl; diff --git a/core/general-server/op/general_copy_op.cpp b/core/general-server/op/general_copy_op.cpp index 8fc63f7a..a7f7d290 100644 --- a/core/general-server/op/general_copy_op.cpp +++ b/core/general-server/op/general_copy_op.cpp @@ -69,8 +69,7 @@ int GeneralCopyOp::inference() { for (int i = 0; i < out->size(); ++i) { int64_t *src_ptr = static_cast(in->at(i).data.data()); - out->at(i).data.Resize( - out->at(i).lod[0].back() * sizeof(int64_t)); + out->at(i).data.Resize(out->at(i).lod[0].back() * sizeof(int64_t)); out->at(i).shape = {out->at(i).lod[0].back(), 1}; int64_t *tgt_ptr = static_cast(out->at(i).data.data()); for (int j = 0; j < out->at(i).lod[0].back(); ++j) { diff --git a/core/general-server/op/general_copy_op.h b/core/general-server/op/general_copy_op.h index 38fb25c2..89627ffb 100644 --- a/core/general-server/op/general_copy_op.h +++ b/core/general-server/op/general_copy_op.h @@ -24,23 +24,22 @@ #include "paddle_inference_api.h" // NOLINT #endif #include -#include "core/predictor/framework/resource.h" -#include "core/general-server/op/general_infer_helper.h" #include "core/general-server/general_model_service.pb.h" +#include "core/general-server/op/general_infer_helper.h" +#include "core/predictor/framework/resource.h" namespace baidu { namespace paddle_serving { namespace serving { -class GeneralCopyOp : - public baidu::paddle_serving::predictor::OpWithChannel { +class GeneralCopyOp + : public baidu::paddle_serving::predictor::OpWithChannel { public: typedef std::vector TensorVector; DECLARE_OP(GeneralCopyOp); int inference(); - }; } // namespace serving diff --git a/core/general-server/op/general_infer_op.h b/core/general-server/op/general_infer_op.h index 6c8d9fdc..ff0b210a 100644 --- a/core/general-server/op/general_infer_op.h +++ b/core/general-server/op/general_infer_op.h @@ -39,7 +39,6 @@ class GeneralInferOp DECLARE_OP(GeneralInferOp); int inference(); - }; } // namespace serving diff --git a/core/general-server/op/general_reader_op.cpp b/core/general-server/op/general_reader_op.cpp index 93f6cfbb..8695da25 100644 --- a/core/general-server/op/general_reader_op.cpp +++ b/core/general-server/op/general_reader_op.cpp @@ -188,8 +188,7 @@ int GeneralReaderOp::inference() { for (int j = 0; j < batch_size; ++j) { int elem_num = req->insts(j).tensor_array(i).int64_data_size(); for (int k = 0; k < elem_num; ++k) { - dst_ptr[offset + k] = - req->insts(j).tensor_array(i).int64_data(k); + dst_ptr[offset + k] = req->insts(j).tensor_array(i).int64_data(k); } if (out->at(i).lod.size() == 1) { offset = out->at(i).lod[0][j + 1]; @@ -203,8 +202,7 @@ int GeneralReaderOp::inference() { for (int j = 0; j < batch_size; ++j) { int elem_num = req->insts(j).tensor_array(i).float_data_size(); for (int k = 0; k < elem_num; ++k) { - dst_ptr[offset + k] = - req->insts(j).tensor_array(i).float_data(k); + dst_ptr[offset + k] = req->insts(j).tensor_array(i).float_data(k); } if (out->at(i).lod.size() == 1) { offset = out->at(i).lod[0][j + 1]; diff --git a/core/general-server/op/general_reader_op.h b/core/general-server/op/general_reader_op.h index 137fad98..c45d6ad5 100644 --- a/core/general-server/op/general_reader_op.h +++ b/core/general-server/op/general_reader_op.h @@ -24,24 +24,23 @@ #include "paddle_inference_api.h" // NOLINT #endif #include -#include "core/predictor/framework/resource.h" -#include "core/general-server/op/general_infer_helper.h" #include "core/general-server/general_model_service.pb.h" #include "core/general-server/load_general_model_service.pb.h" +#include "core/general-server/op/general_infer_helper.h" +#include "core/predictor/framework/resource.h" namespace baidu { namespace paddle_serving { namespace serving { -class GeneralReaderOp : public baidu::paddle_serving::predictor::OpWithChannel< - GeneralBlob> { +class GeneralReaderOp + : public baidu::paddle_serving::predictor::OpWithChannel { public: typedef std::vector TensorVector; DECLARE_OP(GeneralReaderOp); int inference(); - }; } // namespace serving diff --git a/core/general-server/op/general_response_op.cpp b/core/general-server/op/general_response_op.cpp index 6d9b7971..c5248227 100644 --- a/core/general-server/op/general_response_op.cpp +++ b/core/general-server/op/general_response_op.cpp @@ -122,8 +122,7 @@ int GeneralResponseOp::inference() { } else { for (int j = 0; j < batch_size; ++j) { FetchInst *fetch_p = res->mutable_insts(j); - fetch_p->mutable_tensor_array(var_idx)->add_int64_data( - data_ptr[0]); + fetch_p->mutable_tensor_array(var_idx)->add_int64_data(data_ptr[0]); } } } @@ -143,16 +142,15 @@ int GeneralResponseOp::inference() { if (var_size == batch_size) { for (int j = 0; j < batch_size; ++j) { for (int k = j * cap; k < (j + 1) * cap; ++k) { - FetchInst * fetch_p = res->mutable_insts(j); + FetchInst *fetch_p = res->mutable_insts(j); fetch_p->mutable_tensor_array(var_idx)->add_float_data( data_ptr[k]); } } } else { for (int j = 0; j < batch_size; ++j) { - FetchInst * fetch_p = res->mutable_insts(j); - fetch_p->mutable_tensor_array(var_idx)->add_float_data( - data_ptr[0]); + FetchInst *fetch_p = res->mutable_insts(j); + fetch_p->mutable_tensor_array(var_idx)->add_float_data(data_ptr[0]); } } } diff --git a/core/general-server/op/general_response_op.h b/core/general-server/op/general_response_op.h index 95b9f770..4b0f6ed1 100644 --- a/core/general-server/op/general_response_op.h +++ b/core/general-server/op/general_response_op.h @@ -39,7 +39,6 @@ class GeneralResponseOp DECLARE_OP(GeneralResponseOp); int inference(); - }; } // namespace serving diff --git a/core/general-server/op/general_text_reader_op.cpp b/core/general-server/op/general_text_reader_op.cpp index 81df8171..154e975d 100644 --- a/core/general-server/op/general_text_reader_op.cpp +++ b/core/general-server/op/general_text_reader_op.cpp @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "core/general-server/op/general_text_reader_op.h" #include #include #include #include -#include "core/general-server/op/general_text_reader_op.h" #include "core/predictor/framework/infer.h" #include "core/predictor/framework/memory.h" #include "core/util/include/timer.h" @@ -32,7 +32,6 @@ using baidu::paddle_serving::predictor::general_model::Request; using baidu::paddle_serving::predictor::general_model::FeedInst; using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; - int GeneralTextReaderOp::inference() { // reade request from client const Request *req = dynamic_cast(get_request_message()); @@ -132,11 +131,9 @@ int GeneralTextReaderOp::inference() { int64_t *dst_ptr = static_cast(out->at(i).data.data()); int offset = 0; for (int j = 0; j < batch_size; ++j) { - for (int k = 0; - k < req->insts(j).tensor_array(i).int_data_size(); + for (int k = 0; k < req->insts(j).tensor_array(i).int_data_size(); ++k) { - dst_ptr[offset + k] = - req->insts(j).tensor_array(i).int_data(k); + dst_ptr[offset + k] = req->insts(j).tensor_array(i).int_data(k); } if (out->at(i).lod.size() == 1) { offset = out->at(i).lod[0][j + 1]; @@ -148,11 +145,9 @@ int GeneralTextReaderOp::inference() { float *dst_ptr = static_cast(out->at(i).data.data()); int offset = 0; for (int j = 0; j < batch_size; ++j) { - for (int k = 0; - k < req->insts(j).tensor_array(i).int_data_size(); + for (int k = 0; k < req->insts(j).tensor_array(i).int_data_size(); ++k) { - dst_ptr[offset + k] = - req->insts(j).tensor_array(i).int_data(k); + dst_ptr[offset + k] = req->insts(j).tensor_array(i).int_data(k); } if (out->at(i).lod.size() == 1) { offset = out->at(i).lod[0][j + 1]; diff --git a/core/general-server/op/general_text_reader_op.h b/core/general-server/op/general_text_reader_op.h index 80573a15..ca134256 100644 --- a/core/general-server/op/general_text_reader_op.h +++ b/core/general-server/op/general_text_reader_op.h @@ -24,17 +24,17 @@ #include "paddle_inference_api.h" // NOLINT #endif #include -#include "core/predictor/framework/resource.h" -#include "core/general-server/op/general_infer_helper.h" #include "core/general-server/general_model_service.pb.h" #include "core/general-server/load_general_model_service.pb.h" +#include "core/general-server/op/general_infer_helper.h" +#include "core/predictor/framework/resource.h" namespace baidu { namespace paddle_serving { namespace serving { -class GeneralTextReaderOp : - public baidu::paddle_serving::predictor::OpWithChannel { +class GeneralTextReaderOp + : public baidu::paddle_serving::predictor::OpWithChannel { public: typedef std::vector TensorVector; diff --git a/core/general-server/op/general_text_response_op.h b/core/general-server/op/general_text_response_op.h index 5efefefb..52f7bbf0 100644 --- a/core/general-server/op/general_text_response_op.h +++ b/core/general-server/op/general_text_response_op.h @@ -40,7 +40,6 @@ class GeneralTextResponseOp DECLARE_OP(GeneralTextResponseOp); int inference(); - }; } // namespace serving diff --git a/core/general-server/proto/general_model_service.proto b/core/general-server/proto/general_model_service.proto index 871f5de6..09e2854d 100644 --- a/core/general-server/proto/general_model_service.proto +++ b/core/general-server/proto/general_model_service.proto @@ -28,13 +28,9 @@ message Tensor { repeated int32 shape = 6; }; -message FeedInst { - repeated Tensor tensor_array = 1; -}; +message FeedInst { repeated Tensor tensor_array = 1; }; -message FetchInst { - repeated Tensor tensor_array = 1; -}; +message FetchInst { repeated Tensor tensor_array = 1; }; message Request { repeated FeedInst insts = 1; diff --git a/core/kvdb/include/kvdb/kvdb_impl.h b/core/kvdb/include/kvdb/kvdb_impl.h index cab06cad..95437d6e 100644 --- a/core/kvdb/include/kvdb/kvdb_impl.h +++ b/core/kvdb/include/kvdb/kvdb_impl.h @@ -27,11 +27,11 @@ // limitations under the License. #pragma once -#include +#include // NOLINT +#include #include #include #include -#include class AbstractKVDB; class FileReader; class ParamDict; @@ -65,7 +65,7 @@ class FileReader { std::string data; FILE *stream = nullptr; const int max_buffer = 256; - char buffer[max_buffer]; + char buffer[max_buffer]; // NOLINT cmd.append(" 2>&1"); stream = popen(cmd.c_str(), "r"); if (stream) { @@ -76,7 +76,8 @@ class FileReader { return data; }; std::string cmd = "md5sum " + this->filename_; - // TODO: throw exception if error occurs during execution of shell command + // NOLINT TODO: throw exception if error occurs during execution of shell + // command std::string md5val = getCmdOut(cmd); this->time_stamp_ = md5val == this->last_md5_val_ ? this->time_stamp_ @@ -93,7 +94,7 @@ class FileReader { return this->time_stamp_; } - inline virtual ~FileReader(){}; + inline virtual ~FileReader() {} private: std::string filename_; @@ -128,7 +129,7 @@ class ParamDict { virtual ~ParamDict(); private: - std::function(std::string)> read_func_; + std::function(std::string)> read_func_; // NOLINT std::vector file_reader_lst_; AbsKVDBPtr front_db, back_db; }; @@ -139,5 +140,5 @@ class ParamDictMgr { void InsertParamDict(std::string, ParamDictPtr); private: - std::unordered_map ParamDictMap; + std::unordered_map ParamDictMap; // NOLINT }; diff --git a/core/kvdb/include/kvdb/paddle_rocksdb.h b/core/kvdb/include/kvdb/paddle_rocksdb.h index f11e11d1..5aa82689 100644 --- a/core/kvdb/include/kvdb/paddle_rocksdb.h +++ b/core/kvdb/include/kvdb/paddle_rocksdb.h @@ -25,7 +25,7 @@ class RocksDBWrapper { public: - RocksDBWrapper(std::string db_name); + RocksDBWrapper(std::string db_name); // NOLINT std::string Get(std::string key); bool Put(std::string key, std::string value); @@ -33,6 +33,7 @@ class RocksDBWrapper { static std::shared_ptr RocksDBWrapperFactory( std::string db_name = "SparseMatrix"); void Close(); + private: rocksdb::DB *db_; std::string db_name_; diff --git a/core/kvdb/src/mock_param_dict_impl.cpp b/core/kvdb/src/mock_param_dict_impl.cpp index 5f76ebce..106bda3e 100644 --- a/core/kvdb/src/mock_param_dict_impl.cpp +++ b/core/kvdb/src/mock_param_dict_impl.cpp @@ -16,7 +16,7 @@ #include #include #include -#include +#include // NOLINT #include "core/kvdb/include/kvdb/rocksdb_impl.h" std::vector ParamDict::GetDictReaderLst() { @@ -33,8 +33,10 @@ void ParamDict::SetFileReaderLst(std::vector lst) { std::vector ParamDict::GetSparseValue(std::string feasign, std::string slot) { - auto BytesToFloat = [](uint8_t* byte_array) { return *((float*)byte_array); }; - // TODO: the concatation of feasign and slot is TBD. + auto BytesToFloat = [](uint8_t* byte_array) { + return *((float*)byte_array); // NOLINT + }; + // NOLINT TODO: the concatation of feasign and slot is TBD. std::string result = front_db->Get(feasign + slot); std::vector value; if (result == "NOT_FOUND") return value; @@ -87,7 +89,7 @@ bool ParamDict::InsertSparseValue(std::string feasign, value.push_back(raw_values_ptr[i]); } back_db->Set(key, value); - // TODO: change stateless to stateful + // NOLINT TODO: change stateless to stateful return true; } @@ -140,5 +142,4 @@ void ParamDict::CreateKVDB() { this->back_db->CreateDB(); } -ParamDict::~ParamDict() { -} +ParamDict::~ParamDict() {} diff --git a/core/kvdb/src/paddle_rocksdb.cpp b/core/kvdb/src/paddle_rocksdb.cpp index a60b93c7..2238d41d 100644 --- a/core/kvdb/src/paddle_rocksdb.cpp +++ b/core/kvdb/src/paddle_rocksdb.cpp @@ -51,7 +51,7 @@ void RocksDBWrapper::SetDBName(std::string db_name) { void RocksDBWrapper::Close() { if (db_ != nullptr) { db_->Close(); - delete(db_); + delete (db_); db_ = nullptr; } } diff --git a/core/kvdb/src/rockskvdb_impl.cpp b/core/kvdb/src/rockskvdb_impl.cpp index 4b2c177a..9e6cdd6b 100644 --- a/core/kvdb/src/rockskvdb_impl.cpp +++ b/core/kvdb/src/rockskvdb_impl.cpp @@ -32,12 +32,8 @@ void RocksKVDB::Set(std::string key, std::string value) { return; } -void RocksKVDB::Close() { - this->db_->Close(); -} +void RocksKVDB::Close() { this->db_->Close(); } std::string RocksKVDB::Get(std::string key) { return this->db_->Get(key); } -RocksKVDB::~RocksKVDB() { - this->db_->Close(); -} +RocksKVDB::~RocksKVDB() { this->db_->Close(); } diff --git a/core/pdcodegen/src/pdcodegen.cpp b/core/pdcodegen/src/pdcodegen.cpp index 007c43c6..af4081a9 100644 --- a/core/pdcodegen/src/pdcodegen.cpp +++ b/core/pdcodegen/src/pdcodegen.cpp @@ -15,14 +15,14 @@ #include #include "boost/algorithm/string.hpp" #include "boost/scoped_ptr.hpp" +#include "core/pdcodegen/pds_option.pb.h" +#include "core/pdcodegen/plugin/strutil.h" +#include "core/pdcodegen/plugin/substitute.h" #include "google/protobuf/compiler/code_generator.h" #include "google/protobuf/compiler/plugin.h" #include "google/protobuf/descriptor.h" #include "google/protobuf/io/printer.h" #include "google/protobuf/io/zero_copy_stream.h" -#include "core/pdcodegen/pds_option.pb.h" -#include "core/pdcodegen/plugin/strutil.h" -#include "core/pdcodegen/plugin/substitute.h" using std::string; using google::protobuf::Descriptor; using google::protobuf::FileDescriptor; @@ -115,7 +115,8 @@ class PdsCodeGenerator : public CodeGenerator { printer.Print("#include \"core/predictor/common/inner_common.h\"\n"); printer.Print("#include \"core/predictor/framework/service.h\"\n"); printer.Print("#include \"core/predictor/framework/manager.h\"\n"); - printer.Print("#include \"core/predictor/framework/service_manager.h\"\n"); + printer.Print( + "#include \"core/predictor/framework/service_manager.h\"\n"); } if (generate_stub) { printer.Print("#include \n"); @@ -845,7 +846,8 @@ class PdsCodeGenerator : public CodeGenerator { printer.Print("#include \"core/predictor/common/inner_common.h\"\n"); printer.Print("#include \"core/predictor/framework/service.h\"\n"); printer.Print("#include \"core/predictor/framework/manager.h\"\n"); - printer.Print("#include \"core/predictor/framework/service_manager.h\"\n"); + printer.Print( + "#include \"core/predictor/framework/service_manager.h\"\n"); } if (generate_stub) { printer.Print("#include \n"); diff --git a/core/predictor/common/inner_common.h b/core/predictor/common/inner_common.h index 4d9b2c19..96b8a802 100644 --- a/core/predictor/common/inner_common.h +++ b/core/predictor/common/inner_common.h @@ -52,9 +52,9 @@ #include "glog/raw_logging.h" +#include "core/configure/general_model_config.pb.h" #include "core/configure/include/configure_parser.h" #include "core/configure/server_configure.pb.h" -#include "core/configure/general_model_config.pb.h" #include "core/predictor/common/constant.h" #include "core/predictor/common/types.h" diff --git a/core/predictor/framework/dag_view.cpp b/core/predictor/framework/dag_view.cpp index 42a61b08..743e7341 100644 --- a/core/predictor/framework/dag_view.cpp +++ b/core/predictor/framework/dag_view.cpp @@ -45,7 +45,7 @@ int DagView::init(Dag* dag, const std::string& service_name) { << "at:" << si; return ERR_MEM_ALLOC_FAILURE; } - VLOG(2) << "stage[" << si << "] name: " << stage->full_name; + VLOG(2) << "stage[" << si << "] name: " << stage->full_name; VLOG(2) << "stage[" << si << "] node size: " << stage->nodes.size(); vstage->full_name = service_name + NAME_DELIMITER + stage->full_name; uint32_t node_size = stage->nodes.size(); @@ -74,7 +74,7 @@ int DagView::init(Dag* dag, const std::string& service_name) { LOG(WARNING) << "Failed init op, type:" << node->type; return ERR_INTERNAL_FAILURE; } - + op->set_full_name(service_name + NAME_DELIMITER + node->full_name); vnode->conf = node; vnode->op = op; @@ -85,9 +85,9 @@ int DagView::init(Dag* dag, const std::string& service_name) { VLOG(2) << "set op pre name: \n" << "current op name: " << vstage->nodes.back()->op->op_name() << " previous op name: " - << _view[si-1]->nodes.back()->op->op_name(); + << _view[si - 1]->nodes.back()->op->op_name(); vstage->nodes.back()->op->set_pre_node_name( - _view[si-1]->nodes.back()->op->op_name()); + _view[si - 1]->nodes.back()->op->op_name()); } _view.push_back(vstage); } diff --git a/core/predictor/framework/factory.h b/core/predictor/framework/factory.h index 033bfb71..8d5fc9a1 100644 --- a/core/predictor/framework/factory.h +++ b/core/predictor/framework/factory.h @@ -16,8 +16,8 @@ #include #include #include -#include "glog/raw_logging.h" #include "core/predictor/common/inner_common.h" +#include "glog/raw_logging.h" namespace baidu { namespace paddle_serving { namespace predictor { diff --git a/core/predictor/framework/resource.cpp b/core/predictor/framework/resource.cpp index 98571ad7..943b1715 100644 --- a/core/predictor/framework/resource.cpp +++ b/core/predictor/framework/resource.cpp @@ -197,13 +197,10 @@ int Resource::general_model_initialize(const std::string& path, for (int i = 0; i < feed_var_num; ++i) { _config->_feed_name[i] = model_config.feed_var(i).name(); _config->_feed_alias_name[i] = model_config.feed_var(i).alias_name(); - VLOG(2) << "feed var[" << i << "]: " - << _config->_feed_name[i]; - VLOG(2) << "feed var[" << i << "]: " - << _config->_feed_alias_name[i]; + VLOG(2) << "feed var[" << i << "]: " << _config->_feed_name[i]; + VLOG(2) << "feed var[" << i << "]: " << _config->_feed_alias_name[i]; _config->_feed_type[i] = model_config.feed_var(i).feed_type(); - VLOG(2) << "feed type[" << i << "]: " - << _config->_feed_type[i]; + VLOG(2) << "feed type[" << i << "]: " << _config->_feed_type[i]; if (model_config.feed_var(i).is_lod_tensor()) { VLOG(2) << "var[" << i << "] is lod tensor"; diff --git a/core/predictor/framework/resource.h b/core/predictor/framework/resource.h index 05820f9f..eedd4fc1 100644 --- a/core/predictor/framework/resource.h +++ b/core/predictor/framework/resource.h @@ -13,10 +13,10 @@ // limitations under the License. #pragma once +#include #include #include #include -#include #include "core/cube/cube-api/include/cube_api.h" #include "core/kvdb/include/kvdb/paddle_rocksdb.h" #include "core/predictor/common/inner_common.h" @@ -36,15 +36,15 @@ class PaddleGeneralModelConfig { public: std::vector _feed_name; std::vector _feed_alias_name; - std::vector _feed_type; // 0 int64, 1 float - std::vector _is_lod_feed; // true lod tensor + std::vector _feed_type; // 0 int64, 1 float + std::vector _is_lod_feed; // true lod tensor std::vector _is_lod_fetch; // whether a fetch var is lod_tensor - std::vector _capacity; // capacity for each tensor - /* - feed_shape_ for feeded variable - feed_shape_[i][j] represents the jth dim for ith input Tensor - if is_lod_feed_[i] == False, feed_shape_[i][0] = -1 - */ + std::vector _capacity; // capacity for each tensor + /* + feed_shape_ for feeded variable + feed_shape_[i][j] represents the jth dim for ith input Tensor + if is_lod_feed_[i] == False, feed_shape_[i][0] = -1 + */ std::vector> _feed_shape; std::vector _fetch_name; diff --git a/core/predictor/src/pdserving.cpp b/core/predictor/src/pdserving.cpp index f0fedacf..fe8693de 100644 --- a/core/predictor/src/pdserving.cpp +++ b/core/predictor/src/pdserving.cpp @@ -99,8 +99,8 @@ static void g_change_server_port() { if (read_proto_conf(FLAGS_inferservice_path.c_str(), FLAGS_inferservice_file.c_str(), &conf) != 0) { - VLOG(2) << "failed to load configure[" << FLAGS_inferservice_path - << "," << FLAGS_inferservice_file << "]."; + VLOG(2) << "failed to load configure[" << FLAGS_inferservice_path << "," + << FLAGS_inferservice_file << "]."; return; } uint32_t port = conf.port(); @@ -157,8 +157,7 @@ int main(int argc, char** argv) { mkdir(FLAGS_log_dir.c_str(), 0777); ret = stat(FLAGS_log_dir.c_str(), &st_buf); if (ret != 0) { - VLOG(2) << "Log path " << FLAGS_log_dir - << " not exist, and create fail"; + VLOG(2) << "Log path " << FLAGS_log_dir << " not exist, and create fail"; return -1; } } diff --git a/core/predictor/unittest/test_message_op.h b/core/predictor/unittest/test_message_op.h index 5b4a2e5f..6ff8fc42 100644 --- a/core/predictor/unittest/test_message_op.h +++ b/core/predictor/unittest/test_message_op.h @@ -15,8 +15,8 @@ #pragma once #include #include "core/predictor/framework/channel.h" -#include "core/predictor/op/op.h" #include "core/predictor/msg_data.pb.h" +#include "core/predictor/op/op.h" namespace baidu { namespace paddle_serving { diff --git a/core/predictor/unittest/test_server_manager.cpp b/core/predictor/unittest/test_server_manager.cpp index df28e2ee..07f02256 100644 --- a/core/predictor/unittest/test_server_manager.cpp +++ b/core/predictor/unittest/test_server_manager.cpp @@ -13,7 +13,7 @@ // limitations under the License. #include "core/predictor/unittest/test_server_manager.h" // TestServerManager -#include // FLAGS +#include // FLAGS #include #include "core/predictor/framework/server.h" // ServerManager diff --git a/core/sdk-cpp/include/common.h b/core/sdk-cpp/include/common.h index f6a600cd..8cfe979e 100644 --- a/core/sdk-cpp/include/common.h +++ b/core/sdk-cpp/include/common.h @@ -53,9 +53,9 @@ #include "json2pb/json_to_pb.h" #endif +#include "core/configure/general_model_config.pb.h" #include "core/configure/include/configure_parser.h" #include "core/configure/sdk_configure.pb.h" -#include "core/configure/general_model_config.pb.h" #include "core/sdk-cpp/include/utils.h" diff --git a/core/sdk-cpp/include/config_manager.h b/core/sdk-cpp/include/config_manager.h index 44134716..6cd50e7f 100644 --- a/core/sdk-cpp/include/config_manager.h +++ b/core/sdk-cpp/include/config_manager.h @@ -32,9 +32,9 @@ class EndpointConfigManager { EndpointConfigManager() : _last_update_timestamp(0), _current_endpointmap_id(1) {} - int create(const std::string & sdk_desc_str); + int create(const std::string& sdk_desc_str); - int load(const std::string & sdk_desc_str); + int load(const std::string& sdk_desc_str); int create(const char* path, const char* file); diff --git a/core/sdk-cpp/include/factory.h b/core/sdk-cpp/include/factory.h index 7e9e2227..4a3d988a 100644 --- a/core/sdk-cpp/include/factory.h +++ b/core/sdk-cpp/include/factory.h @@ -16,9 +16,9 @@ #include #include #include -#include "glog/raw_logging.h" #include "core/sdk-cpp/include/common.h" #include "core/sdk-cpp/include/stub_impl.h" +#include "glog/raw_logging.h" namespace baidu { namespace paddle_serving { diff --git a/core/sdk-cpp/include/predictor_sdk.h b/core/sdk-cpp/include/predictor_sdk.h index 6b2e8a2c..65d80672 100644 --- a/core/sdk-cpp/include/predictor_sdk.h +++ b/core/sdk-cpp/include/predictor_sdk.h @@ -31,7 +31,7 @@ class PredictorApi { int register_all(); - int create(const std::string & sdk_desc_str); + int create(const std::string& sdk_desc_str); int create(const char* path, const char* file); diff --git a/core/sdk-cpp/proto/general_model_service.proto b/core/sdk-cpp/proto/general_model_service.proto index 95c2abf0..827bb880 100644 --- a/core/sdk-cpp/proto/general_model_service.proto +++ b/core/sdk-cpp/proto/general_model_service.proto @@ -28,13 +28,9 @@ message Tensor { repeated int32 shape = 6; }; -message FeedInst { - repeated Tensor tensor_array = 1; -}; +message FeedInst { repeated Tensor tensor_array = 1; }; -message FetchInst { - repeated Tensor tensor_array = 1; -}; +message FetchInst { repeated Tensor tensor_array = 1; }; message Request { repeated FeedInst insts = 1; diff --git a/core/sdk-cpp/src/endpoint.cpp b/core/sdk-cpp/src/endpoint.cpp index fe0bf024..517fe6dd 100644 --- a/core/sdk-cpp/src/endpoint.cpp +++ b/core/sdk-cpp/src/endpoint.cpp @@ -35,8 +35,7 @@ int Endpoint::initialize(const EndpointInfo& ep_info) { return -1; } _variant_list.push_back(var); - VLOG(2) << "Succ create variant: " << vi - << ", endpoint:" << _endpoint_name; + VLOG(2) << "Succ create variant: " << vi << ", endpoint:" << _endpoint_name; } return 0; diff --git a/core/sdk-cpp/src/predictor_sdk.cpp b/core/sdk-cpp/src/predictor_sdk.cpp index c9f4a694..36eb1866 100644 --- a/core/sdk-cpp/src/predictor_sdk.cpp +++ b/core/sdk-cpp/src/predictor_sdk.cpp @@ -30,7 +30,7 @@ int PredictorApi::register_all() { return 0; } -int PredictorApi::create(const std::string & api_desc_str) { +int PredictorApi::create(const std::string& api_desc_str) { VLOG(2) << api_desc_str; if (register_all() != 0) { LOG(ERROR) << "Failed do register all!"; diff --git a/core/sdk-cpp/src/variant.cpp b/core/sdk-cpp/src/variant.cpp index ae1d787b..6f9be664 100644 --- a/core/sdk-cpp/src/variant.cpp +++ b/core/sdk-cpp/src/variant.cpp @@ -54,7 +54,7 @@ int Variant::initialize(const EndpointInfo& ep_info, if (_stub_map.size() > 0) { VLOG(2) << "Initialize variants from VariantInfo" - << ", stubs count: " << _stub_map.size(); + << ", stubs count: " << _stub_map.size(); return 0; } diff --git a/core/util/CMakeLists.txt b/core/util/CMakeLists.txt index 7d3b2fd5..290f2bbc 100644 --- a/core/util/CMakeLists.txt +++ b/core/util/CMakeLists.txt @@ -1,3 +1,2 @@ include(src/CMakeLists.txt) add_library(utils ${util_srcs}) - diff --git a/core/util/include/timer.h b/core/util/include/timer.h index 9d547bc1..6b91a27f 100644 --- a/core/util/include/timer.h +++ b/core/util/include/timer.h @@ -15,7 +15,6 @@ limitations under the License. */ #pragma once #include - namespace baidu { namespace paddle_serving { diff --git a/core/util/src/CMakeLists.txt b/core/util/src/CMakeLists.txt index 0a45dc9e..8b1d4f5e 100644 --- a/core/util/src/CMakeLists.txt +++ b/core/util/src/CMakeLists.txt @@ -1,3 +1,2 @@ FILE(GLOB srcs ${CMAKE_CURRENT_LIST_DIR}/*.cc) LIST(APPEND util_srcs ${srcs}) - diff --git a/doc/COMPILE.md b/doc/COMPILE.md index e93d0b7e..4ddbf701 100644 --- a/doc/COMPILE.md +++ b/doc/COMPILE.md @@ -27,4 +27,3 @@ make -j10 cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=/home/users/dongdaxiang/software/baidu/third-party/python/bin/python -DCLIENT_ONLY=ON .. make -j10 ``` - diff --git a/doc/CONTRIBUTE.md b/doc/CONTRIBUTE.md index a7ebba0a..d788ac27 100644 --- a/doc/CONTRIBUTE.md +++ b/doc/CONTRIBUTE.md @@ -152,4 +152,3 @@ GLOG_minloglevel=1 bin/serving 2 -ERROR 3 - FATAL (Be careful as FATAL log will generate a coredump) - diff --git a/doc/IMDB_GO_CLIENT.md b/doc/IMDB_GO_CLIENT.md index ec3a3a8c..5b101925 100644 --- a/doc/IMDB_GO_CLIENT.md +++ b/doc/IMDB_GO_CLIENT.md @@ -193,6 +193,3 @@ total num: 25000 acc num: 22014 acc: 0.88056 ``` - - - diff --git a/doc/NEW_OPERATOR.md b/doc/NEW_OPERATOR.md index 8ff4b3e2..f839be94 100644 --- a/doc/NEW_OPERATOR.md +++ b/doc/NEW_OPERATOR.md @@ -143,6 +143,3 @@ self.op_dict = { "general_dist_kv": "GeneralDistKVOp" } ``` - - - diff --git a/doc/SERVER_DAG.md b/doc/SERVER_DAG.md index 12566d61..fd15140f 100644 --- a/doc/SERVER_DAG.md +++ b/doc/SERVER_DAG.md @@ -54,10 +54,3 @@ op_seq_maker.add_op(dist_kv_op) op_seq_maker.add_op(general_infer_op) op_seq_maker.add_op(general_response_op) ``` - - - - - - - diff --git a/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h b/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h index 7a7291a0..24148e37 100644 --- a/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h +++ b/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h @@ -21,8 +21,8 @@ #include #include "core/configure/include/configure_parser.h" #include "core/configure/inferencer_configure.pb.h" -#include "paddle_inference_api.h" // NOLINT #include "core/predictor/framework/infer.h" +#include "paddle_inference_api.h" // NOLINT namespace baidu { namespace paddle_serving { @@ -336,7 +336,7 @@ class SigmoidModel { return -1; } VLOG(2) << "load sigmoid_b [" << _sigmoid_b._params[0] << "] [" - << _sigmoid_b._params[1] << "]."; + << _sigmoid_b._params[1] << "]."; _exp_max_input = exp_max; _exp_min_input = exp_min; return 0; @@ -373,7 +373,7 @@ class SigmoidFluidModel { clone_model.reset(new SigmoidFluidModel()); clone_model->_sigmoid_core = _sigmoid_core; clone_model->_fluid_core = _fluid_core->Clone(); - return std::move(clone_model); + return std::move(clone_model); // NOLINT } public: @@ -459,7 +459,7 @@ class FluidCpuWithSigmoidCore : public FluidFamilyCore { } protected: - std::unique_ptr _core; + std::unique_ptr _core; // NOLINT }; class FluidCpuNativeDirWithSigmoidCore : public FluidCpuWithSigmoidCore { diff --git a/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h b/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h index ca546207..a3fa3654 100644 --- a/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h +++ b/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h @@ -23,8 +23,8 @@ #include #include "core/configure/include/configure_parser.h" #include "core/configure/inferencer_configure.pb.h" -#include "paddle_inference_api.h" // NOLINT #include "core/predictor/framework/infer.h" +#include "paddle_inference_api.h" // NOLINT DECLARE_int32(gpuid); @@ -334,13 +334,13 @@ class SigmoidModel { return -1; } VLOG(2) << "load sigmoid_w [" << _sigmoid_w._params[0] << "] [" - << _sigmoid_w._params[1] << "]."; + << _sigmoid_w._params[1] << "]."; if (0 != _sigmoid_b.init(2, 1, sigmoid_b_file) || 0 != _sigmoid_b.load()) { LOG(ERROR) << "load params sigmoid_b failed."; return -1; } VLOG(2) << "load sigmoid_b [" << _sigmoid_b._params[0] << "] [" - << _sigmoid_b._params[1] << "]."; + << _sigmoid_b._params[1] << "]."; _exp_max_input = exp_max; _exp_min_input = exp_min; return 0; diff --git a/python/examples/bert/benchmark.py b/python/examples/bert/benchmark.py index dd11f124..e770dae0 100644 --- a/python/examples/bert/benchmark.py +++ b/python/examples/bert/benchmark.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=doc-string-missing from __future__ import unicode_literals, absolute_import import os @@ -29,6 +30,7 @@ from bert_reader import BertReader args = benchmark_args() + def single_func(idx, resource): fin = open("data-c.txt") if args.request == "rpc": @@ -38,29 +40,32 @@ def single_func(idx, resource): client = Client() client.load_client_config(args.model) client.connect([resource["endpoint"][idx % 4]]) - + start = time.time() for line in fin: feed_dict = reader.process(line) - result = client.predict(feed=feed_dict, - fetch=fetch) + result = client.predict(feed=feed_dict, fetch=fetch) end = time.time() elif args.request == "http": start = time.time() - header = {"Content-Type":"application/json"} + header = {"Content-Type": "application/json"} for line in fin: #dict_data = {"words": "this is for output ", "fetch": ["pooled_output"]} dict_data = {"words": line, "fetch": ["pooled_output"]} - r = requests.post('http://{}/bert/prediction'.format(resource["endpoint"][0]), - data=json.dumps(dict_data), headers=header) + r = requests.post( + 'http://{}/bert/prediction'.format(resource["endpoint"][0]), + data=json.dumps(dict_data), + headers=header) end = time.time() return [[end - start]] + if __name__ == '__main__': multi_thread_runner = MultiThreadRunner() - endpoint_list = ["127.0.0.1:9494", "127.0.0.1:9495", "127.0.0.1:9496", "127.0.0.1:9497"] + endpoint_list = [ + "127.0.0.1:9494", "127.0.0.1:9495", "127.0.0.1:9496", "127.0.0.1:9497" + ] #endpoint_list = endpoint_list + endpoint_list + endpoint_list #result = multi_thread_runner.run(single_func, args.thread, {"endpoint":endpoint_list}) - result = single_func(0, {"endpoint":endpoint_list}) + result = single_func(0, {"endpoint": endpoint_list}) print(result) - diff --git a/python/examples/bert/bert_client.py b/python/examples/bert/bert_client.py index cdb75ff8..343a6e01 100644 --- a/python/examples/bert/bert_client.py +++ b/python/examples/bert/bert_client.py @@ -1,4 +1,5 @@ # coding:utf-8 +# pylint: disable=doc-string-missing import os import sys import numpy as np @@ -143,9 +144,12 @@ def single_func(idx, resource): end = time.time() return [[end - start]] + if __name__ == '__main__': multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(single_func, args.thread, {"endpoint":["127.0.0.1:9494", "127.0.0.1:9495", "127.0.0.1:9496", "127.0.0.1:9497"]}) - - - + result = multi_thread_runner.run(single_func, args.thread, { + "endpoint": [ + "127.0.0.1:9494", "127.0.0.1:9495", "127.0.0.1:9496", + "127.0.0.1:9497" + ] + }) diff --git a/python/examples/bert/bert_reader.py b/python/examples/bert/bert_reader.py index 52bb6ebe..366c19b5 100644 --- a/python/examples/bert/bert_reader.py +++ b/python/examples/bert/bert_reader.py @@ -1,6 +1,21 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing from batching import pad_batch_data import tokenization + class BertReader(): def __init__(self, vocab_file="", max_seq_len=128): self.vocab_file = vocab_file @@ -48,8 +63,10 @@ class BertReader(): position_ids = list(range(len(token_ids))) p_token_ids, p_pos_ids, p_text_type_ids, input_mask = \ self.pad_batch(token_ids, text_type_ids, position_ids) - feed_result = {"input_ids": p_token_ids.reshape(-1).tolist(), - "position_ids": p_pos_ids.reshape(-1).tolist(), - "segment_ids": p_text_type_ids.reshape(-1).tolist(), - "input_mask": input_mask.reshape(-1).tolist()} + feed_result = { + "input_ids": p_token_ids.reshape(-1).tolist(), + "position_ids": p_pos_ids.reshape(-1).tolist(), + "segment_ids": p_text_type_ids.reshape(-1).tolist(), + "input_mask": input_mask.reshape(-1).tolist() + } return feed_result diff --git a/python/examples/bert/bert_web_service.py b/python/examples/bert/bert_web_service.py index 2b52d2fb..a420b665 100644 --- a/python/examples/bert/bert_web_service.py +++ b/python/examples/bert/bert_web_service.py @@ -12,12 +12,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +# pylint: disable=doc-string-missing from paddle_serving_server_gpu.web_service import WebService from bert_reader import BertReader import sys import os + class BertService(WebService): def load(self): self.reader = BertReader(vocab_file="vocab.txt", max_seq_len=20) @@ -26,12 +27,12 @@ class BertService(WebService): feed_res = self.reader.process(feed["words"].encode("utf-8")) return feed_res, fetch + bert_service = BertService(name="bert") bert_service.load() bert_service.load_model_config(sys.argv[1]) gpu_ids = os.environ["CUDA_VISIBLE_DEVICES"] gpus = [int(x) for x in gpu_ids.split(",")] bert_service.set_gpus(gpus) -bert_service.prepare_server( - workdir="workdir", port=9494, device="gpu") +bert_service.prepare_server(workdir="workdir", port=9494, device="gpu") bert_service.run_server() diff --git a/python/examples/bert/prepare_model.py b/python/examples/bert/prepare_model.py index 5a7c5893..70902adf 100644 --- a/python/examples/bert/prepare_model.py +++ b/python/examples/bert/prepare_model.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +# pylint: disable=doc-string-missing import paddlehub as hub import paddle.fluid as fluid import sys @@ -19,7 +19,8 @@ import paddle_serving_client.io as serving_io model_name = "bert_chinese_L-12_H-768_A-12" module = hub.Module(model_name) -inputs, outputs, program = module.context(trainable=True, max_seq_len=int(sys.argv[1])) +inputs, outputs, program = module.context( + trainable=True, max_seq_len=int(sys.argv[1])) place = fluid.core_avx.CPUPlace() exe = fluid.Executor(place) input_ids = inputs["input_ids"] @@ -35,10 +36,12 @@ feed_var_names = [ target_vars = [pooled_output, sequence_output] -serving_io.save_model("bert_seq{}_model".format(sys.argv[1]), "bert_seq{}_client".format(sys.argv[1]), { - "input_ids": input_ids, - "position_ids": position_ids, - "segment_ids": segment_ids, - "input_mask": input_mask, -}, {"pooled_output": pooled_output, - "sequence_output": sequence_output}, program) +serving_io.save_model( + "bert_seq{}_model".format(sys.argv[1]), + "bert_seq{}_client".format(sys.argv[1]), { + "input_ids": input_ids, + "position_ids": position_ids, + "segment_ids": segment_ids, + "input_mask": input_mask, + }, {"pooled_output": pooled_output, + "sequence_output": sequence_output}, program) diff --git a/python/examples/bert/tokenization.py b/python/examples/bert/tokenization.py index bde0ed43..0d84ed38 100644 --- a/python/examples/bert/tokenization.py +++ b/python/examples/bert/tokenization.py @@ -26,7 +26,7 @@ import sentencepiece as spm import pickle -def convert_to_unicode(text): +def convert_to_unicode(text): # pylint: disable=doc-string-with-all-args """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if six.PY3: if isinstance(text, str): @@ -46,7 +46,7 @@ def convert_to_unicode(text): raise ValueError("Not running on Python2 or Python 3?") -def printable_text(text): +def printable_text(text): # pylint: disable=doc-string-with-all-args """Returns text encoded in a way suitable for print or `tf.logging`.""" # These functions want `str` for both Python2 and Python3, but in one case @@ -69,7 +69,7 @@ def printable_text(text): raise ValueError("Not running on Python2 or Python 3?") -def load_vocab(vocab_file): +def load_vocab(vocab_file): # pylint: disable=doc-string-with-all-args, doc-string-with-returns """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() fin = io.open(vocab_file, "r", encoding="UTF-8") @@ -163,7 +163,7 @@ class CharTokenizer(object): return convert_by_vocab(self.inv_vocab, ids) -class WSSPTokenizer(object): +class WSSPTokenizer(object): # pylint: disable=doc-string-missing def __init__(self, vocab_file, sp_model_dir, word_dict, ws=True, lower=True): self.vocab = load_vocab(vocab_file) @@ -175,7 +175,7 @@ class WSSPTokenizer(object): self.window_size = 5 self.sp_model.Load(sp_model_dir) - def cut(self, chars): + def cut(self, chars): # pylint: disable=doc-string-missing words = [] idx = 0 while idx < len(chars): @@ -192,7 +192,7 @@ class WSSPTokenizer(object): idx += i return words - def tokenize(self, text, unk_token="[UNK]"): + def tokenize(self, text, unk_token="[UNK]"): # pylint: disable=doc-string-missing text = convert_to_unicode(text) if self.ws: text = [s for s in self.cut(text) if s != ' '] @@ -228,7 +228,7 @@ class BasicTokenizer(object): """ self.do_lower_case = do_lower_case - def tokenize(self, text): + def tokenize(self, text): # pylint: disable=doc-string-with-all-args, doc-string-with-returns """Tokenizes a piece of text.""" text = convert_to_unicode(text) text = self._clean_text(text) @@ -345,7 +345,7 @@ class WordpieceTokenizer(object): self.max_input_chars_per_word = max_input_chars_per_word self.use_sentence_piece_vocab = use_sentence_piece_vocab - def tokenize(self, text): + def tokenize(self, text): # pylint: disable=doc-string-with-all-args """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization @@ -432,8 +432,8 @@ def _is_punctuation(char): # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. - if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) - or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): + if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or + (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): diff --git a/python/examples/criteo_ctr/README.md b/python/examples/criteo_ctr/README.md index 3b7743bc..7a994794 100644 --- a/python/examples/criteo_ctr/README.md +++ b/python/examples/criteo_ctr/README.md @@ -1,2 +1 @@ # CTR task on Criteo Dataset - diff --git a/python/examples/criteo_ctr/args.py b/python/examples/criteo_ctr/args.py index a30398e3..30124d4e 100644 --- a/python/examples/criteo_ctr/args.py +++ b/python/examples/criteo_ctr/args.py @@ -1,90 +1,105 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing import argparse + def parse_args(): - parser = argparse.ArgumentParser(description="PaddlePaddle CTR example") - parser.add_argument( - '--train_data_path', - type=str, - default='./data/raw/train.txt', - help="The path of training dataset") - parser.add_argument( - '--sparse_only', - type=bool, - default=False, - help="Whether we use sparse features only") - parser.add_argument( - '--test_data_path', - type=str, - default='./data/raw/valid.txt', - help="The path of testing dataset") - parser.add_argument( - '--batch_size', - type=int, - default=1000, - help="The size of mini-batch (default:1000)") - parser.add_argument( - '--embedding_size', - type=int, - default=10, - help="The size for embedding layer (default:10)") - parser.add_argument( - '--num_passes', - type=int, - default=10, - help="The number of passes to train (default: 10)") - parser.add_argument( - '--model_output_dir', - type=str, - default='models', - help='The path for model to store (default: models)') - parser.add_argument( - '--sparse_feature_dim', - type=int, - default=1000001, - help='sparse feature hashing space for index processing') - parser.add_argument( - '--is_local', - type=int, - default=1, - help='Local train or distributed train (default: 1)') - parser.add_argument( - '--cloud_train', - type=int, - default=0, - help='Local train or distributed train on paddlecloud (default: 0)') - parser.add_argument( - '--async_mode', - action='store_true', - default=False, - help='Whether start pserver in async mode to support ASGD') - parser.add_argument( - '--no_split_var', - action='store_true', - default=False, - help='Whether split variables into blocks when update_method is pserver') - parser.add_argument( - '--role', - type=str, - default='pserver', # trainer or pserver - help='The path for model to store (default: models)') - parser.add_argument( - '--endpoints', - type=str, - default='127.0.0.1:6000', - help='The pserver endpoints, like: 127.0.0.1:6000,127.0.0.1:6001') - parser.add_argument( - '--current_endpoint', - type=str, - default='127.0.0.1:6000', - help='The path for model to store (default: 127.0.0.1:6000)') - parser.add_argument( - '--trainer_id', - type=int, - default=0, - help='The path for model to store (default: models)') - parser.add_argument( - '--trainers', - type=int, - default=1, - help='The num of trianers, (default: 1)') - return parser.parse_args() + parser = argparse.ArgumentParser(description="PaddlePaddle CTR example") + parser.add_argument( + '--train_data_path', + type=str, + default='./data/raw/train.txt', + help="The path of training dataset") + parser.add_argument( + '--sparse_only', + type=bool, + default=False, + help="Whether we use sparse features only") + parser.add_argument( + '--test_data_path', + type=str, + default='./data/raw/valid.txt', + help="The path of testing dataset") + parser.add_argument( + '--batch_size', + type=int, + default=1000, + help="The size of mini-batch (default:1000)") + parser.add_argument( + '--embedding_size', + type=int, + default=10, + help="The size for embedding layer (default:10)") + parser.add_argument( + '--num_passes', + type=int, + default=10, + help="The number of passes to train (default: 10)") + parser.add_argument( + '--model_output_dir', + type=str, + default='models', + help='The path for model to store (default: models)') + parser.add_argument( + '--sparse_feature_dim', + type=int, + default=1000001, + help='sparse feature hashing space for index processing') + parser.add_argument( + '--is_local', + type=int, + default=1, + help='Local train or distributed train (default: 1)') + parser.add_argument( + '--cloud_train', + type=int, + default=0, + help='Local train or distributed train on paddlecloud (default: 0)') + parser.add_argument( + '--async_mode', + action='store_true', + default=False, + help='Whether start pserver in async mode to support ASGD') + parser.add_argument( + '--no_split_var', + action='store_true', + default=False, + help='Whether split variables into blocks when update_method is pserver') + parser.add_argument( + '--role', + type=str, + default='pserver', # trainer or pserver + help='The path for model to store (default: models)') + parser.add_argument( + '--endpoints', + type=str, + default='127.0.0.1:6000', + help='The pserver endpoints, like: 127.0.0.1:6000,127.0.0.1:6001') + parser.add_argument( + '--current_endpoint', + type=str, + default='127.0.0.1:6000', + help='The path for model to store (default: 127.0.0.1:6000)') + parser.add_argument( + '--trainer_id', + type=int, + default=0, + help='The path for model to store (default: models)') + parser.add_argument( + '--trainers', + type=int, + default=1, + help='The num of trianers, (default: 1)') + return parser.parse_args() diff --git a/python/examples/criteo_ctr/criteo_reader.py b/python/examples/criteo_ctr/criteo_reader.py index 06f90d27..2a80af78 100644 --- a/python/examples/criteo_ctr/criteo_reader.py +++ b/python/examples/criteo_ctr/criteo_reader.py @@ -1,11 +1,31 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + import sys import paddle.fluid.incubate.data_generator as dg + class CriteoDataset(dg.MultiSlotDataGenerator): def setup(self, sparse_feature_dim): self.cont_min_ = [0, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - self.cont_max_ = [20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50] - self.cont_diff_ = [20, 603, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50] + self.cont_max_ = [ + 20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50 + ] + self.cont_diff_ = [ + 20, 603, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50 + ] self.hash_dim_ = sparse_feature_dim # here, training data are lines with line_index < train_idx_ self.train_idx_ = 41256555 @@ -23,8 +43,9 @@ class CriteoDataset(dg.MultiSlotDataGenerator): dense_feature.append((float(features[idx]) - self.cont_min_[idx - 1]) / \ self.cont_diff_[idx - 1]) for idx in self.categorical_range_: - sparse_feature.append([hash(str(idx) + features[idx]) % self.hash_dim_]) - + sparse_feature.append( + [hash(str(idx) + features[idx]) % self.hash_dim_]) + return dense_feature, sparse_feature, [int(features[0])] def infer_reader(self, filelist, batch, buf_size): @@ -32,16 +53,17 @@ class CriteoDataset(dg.MultiSlotDataGenerator): for fname in filelist: with open(fname.strip(), "r") as fin: for line in fin: - dense_feature, sparse_feature, label = self._process_line(line) + dense_feature, sparse_feature, label = self._process_line( + line) #yield dense_feature, sparse_feature, label yield [dense_feature] + sparse_feature + [label] + import paddle batch_iter = paddle.batch( paddle.reader.shuffle( local_iter, buf_size=buf_size), batch_size=batch) return batch_iter - def generate_sample(self, line): def data_iter(): @@ -54,6 +76,7 @@ class CriteoDataset(dg.MultiSlotDataGenerator): return data_iter + if __name__ == "__main__": criteo_dataset = CriteoDataset() criteo_dataset.setup(int(sys.argv[1])) diff --git a/python/examples/criteo_ctr/local_train.py b/python/examples/criteo_ctr/local_train.py index a3cf57c8..bbc94075 100644 --- a/python/examples/criteo_ctr/local_train.py +++ b/python/examples/criteo_ctr/local_train.py @@ -1,3 +1,18 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + from __future__ import print_function from args import parse_args @@ -17,15 +32,17 @@ def train(): dense_input = fluid.layers.data( name="dense_input", shape=[dense_feature_dim], dtype='float32') sparse_input_ids = [ - fluid.layers.data(name="C" + str(i), shape=[1], lod_level=1, dtype="int64") - for i in range(1, 27)] + fluid.layers.data( + name="C" + str(i), shape=[1], lod_level=1, dtype="int64") + for i in range(1, 27) + ] label = fluid.layers.data(name='label', shape=[1], dtype='int64') #nn_input = None if sparse_only else dense_input nn_input = dense_input predict_y, loss, auc_var, batch_auc_var = dnn_model( - nn_input, sparse_input_ids, label, - args.embedding_size, args.sparse_feature_dim) + nn_input, sparse_input_ids, label, args.embedding_size, + args.sparse_feature_dim) optimizer = fluid.optimizer.SGD(learning_rate=1e-4) optimizer.minimize(loss) @@ -36,16 +53,17 @@ def train(): dataset.set_use_var([dense_input] + sparse_input_ids + [label]) python_executable = "python" - pipe_command = "{} criteo_reader.py {}".format( - python_executable, args.sparse_feature_dim) + pipe_command = "{} criteo_reader.py {}".format(python_executable, + args.sparse_feature_dim) dataset.set_pipe_command(pipe_command) dataset.set_batch_size(128) thread_num = 10 dataset.set_thread(thread_num) - whole_filelist = ["raw_data/part-%d" % x for x in - range(len(os.listdir("raw_data")))] + whole_filelist = [ + "raw_data/part-%d" % x for x in range(len(os.listdir("raw_data"))) + ] dataset.set_filelist(whole_filelist[:thread_num]) dataset.load_into_memory() @@ -53,8 +71,7 @@ def train(): epochs = 1 for i in range(epochs): exe.train_from_dataset( - program=fluid.default_main_program(), - dataset=dataset, debug=True) + program=fluid.default_main_program(), dataset=dataset, debug=True) print("epoch {} finished".format(i)) import paddle_serving_client.io as server_io @@ -63,9 +80,9 @@ def train(): feed_var_dict["sparse_{}".format(i)] = sparse fetch_var_dict = {"prob": predict_y} - server_io.save_model( - "ctr_serving_model", "ctr_client_conf", - feed_var_dict, fetch_var_dict, fluid.default_main_program()) + server_io.save_model("ctr_serving_model", "ctr_client_conf", feed_var_dict, + fetch_var_dict, fluid.default_main_program()) + if __name__ == '__main__': train() diff --git a/python/examples/criteo_ctr/network_conf.py b/python/examples/criteo_ctr/network_conf.py index 429921da..ec5eb7d5 100644 --- a/python/examples/criteo_ctr/network_conf.py +++ b/python/examples/criteo_ctr/network_conf.py @@ -1,17 +1,33 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + import paddle.fluid as fluid import math -def dnn_model(dense_input, sparse_inputs, label, - embedding_size, sparse_feature_dim): +def dnn_model(dense_input, sparse_inputs, label, embedding_size, + sparse_feature_dim): def embedding_layer(input): emb = fluid.layers.embedding( input=input, is_sparse=True, is_distributed=False, size=[sparse_feature_dim, embedding_size], - param_attr=fluid.ParamAttr(name="SparseFeatFactors", - initializer=fluid.initializer.Uniform())) + param_attr=fluid.ParamAttr( + name="SparseFeatFactors", + initializer=fluid.initializer.Uniform())) return fluid.layers.sequence_pool(input=emb, pool_type='sum') def mlp_input_tensor(emb_sums, dense_tensor): @@ -21,18 +37,30 @@ def dnn_model(dense_input, sparse_inputs, label, return fluid.layers.concat(emb_sums + [dense_tensor], axis=1) def mlp(mlp_input): - fc1 = fluid.layers.fc(input=mlp_input, size=400, act='relu', - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(mlp_input.shape[1])))) - fc2 = fluid.layers.fc(input=fc1, size=400, act='relu', - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(fc1.shape[1])))) - fc3 = fluid.layers.fc(input=fc2, size=400, act='relu', - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(fc2.shape[1])))) - pre = fluid.layers.fc(input=fc3, size=2, act='softmax', - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(fc3.shape[1])))) + fc1 = fluid.layers.fc(input=mlp_input, + size=400, + act='relu', + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal( + scale=1 / math.sqrt(mlp_input.shape[1])))) + fc2 = fluid.layers.fc(input=fc1, + size=400, + act='relu', + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal( + scale=1 / math.sqrt(fc1.shape[1])))) + fc3 = fluid.layers.fc(input=fc2, + size=400, + act='relu', + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal( + scale=1 / math.sqrt(fc2.shape[1])))) + pre = fluid.layers.fc(input=fc3, + size=2, + act='softmax', + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal( + scale=1 / math.sqrt(fc3.shape[1])))) return pre emb_sums = list(map(embedding_layer, sparse_inputs)) diff --git a/python/examples/criteo_ctr/test_client.py b/python/examples/criteo_ctr/test_client.py index 71bbb836..40111928 100644 --- a/python/examples/criteo_ctr/test_client.py +++ b/python/examples/criteo_ctr/test_client.py @@ -1,3 +1,18 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + from paddle_serving_client import Client import paddle import sys @@ -13,9 +28,12 @@ batch = 1 buf_size = 100 dataset = criteo.CriteoDataset() dataset.setup(1000001) -test_filelists = ["{}/part-%d".format(sys.argv[2]) % x - for x in range(len(os.listdir(sys.argv[2])))] -reader = dataset.infer_reader(test_filelists[len(test_filelists)-40:], batch, buf_size) +test_filelists = [ + "{}/part-%d".format(sys.argv[2]) % x + for x in range(len(os.listdir(sys.argv[2]))) +] +reader = dataset.infer_reader(test_filelists[len(test_filelists) - 40:], batch, + buf_size) label_list = [] prob_list = [] @@ -25,4 +43,3 @@ for data in reader(): feed_dict["sparse_{}".format(i - 1)] = data[0][i] fetch_map = client.predict(feed=feed_dict, fetch=["prob"]) print(fetch_map) - diff --git a/python/examples/criteo_ctr/test_server.py b/python/examples/criteo_ctr/test_server.py index 98400108..34f859da 100644 --- a/python/examples/criteo_ctr/test_server.py +++ b/python/examples/criteo_ctr/test_server.py @@ -1,3 +1,18 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + import os import sys from paddle_serving_server import OpMaker diff --git a/python/examples/fit_a_line/README.md b/python/examples/fit_a_line/README.md index fedd4089..da0ec8f3 100644 --- a/python/examples/fit_a_line/README.md +++ b/python/examples/fit_a_line/README.md @@ -19,4 +19,4 @@ python -m paddle_serving_server.web_serve --model uci_housing_model/ --thread 10 Prediction through http post ``` shell curl -H "Content-Type:application/json" -X POST -d '{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], "fetch":["price"]}' http://127.0.0.1:9393/uci/prediction -``` \ No newline at end of file +``` diff --git a/python/examples/fit_a_line/benchmark.py b/python/examples/fit_a_line/benchmark.py index b68d37e6..0ddda2a0 100644 --- a/python/examples/fit_a_line/benchmark.py +++ b/python/examples/fit_a_line/benchmark.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=doc-string-missing + from paddle_serving_client import Client from paddle_serving_client.utils import MultiThreadRunner from paddle_serving_client.utils import benchmark_args @@ -21,28 +23,35 @@ import requests args = benchmark_args() + def single_func(idx, resource): if args.request == "rpc": client = Client() client.load_client_config(args.model) client.connect([args.endpoint]) - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.uci_housing.train(), buf_size=500), batch_size=1) + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=1) start = time.time() for data in train_reader(): fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"]) end = time.time() return [[end - start]] elif args.request == "http": - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.uci_housing.train(), buf_size=500), batch_size=1) + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=1) start = time.time() for data in train_reader(): - r = requests.post('http://{}/uci/prediction'.format(args.endpoint), - data = {"x": data[0]}) + r = requests.post( + 'http://{}/uci/prediction'.format(args.endpoint), + data={"x": data[0]}) end = time.time() return [[end - start]] + multi_thread_runner = MultiThreadRunner() result = multi_thread_runner.run(single_func, args.thread, {}) print(result) diff --git a/python/examples/fit_a_line/local_train.py b/python/examples/fit_a_line/local_train.py index fae6f72c..900b4a67 100644 --- a/python/examples/fit_a_line/local_train.py +++ b/python/examples/fit_a_line/local_train.py @@ -1,12 +1,31 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + import sys import paddle import paddle.fluid as fluid -train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.uci_housing.train(), buf_size=500), batch_size=16) +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=16) -test_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.uci_housing.test(), buf_size=500), batch_size=16) +test_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.test(), buf_size=500), + batch_size=16) x = fluid.data(name='x', shape=[None, 13], dtype='float32') y = fluid.data(name='y', shape=[None, 1], dtype='float32') @@ -26,11 +45,9 @@ import paddle_serving_client.io as serving_io for pass_id in range(30): for data_train in train_reader(): - avg_loss_value, = exe.run( - fluid.default_main_program(), - feed=feeder.feed(data_train), - fetch_list=[avg_loss]) - -serving_io.save_model( - "uci_housing_model", "uci_housing_client", - {"x": x}, {"price": y_predict}, fluid.default_main_program()) + avg_loss_value, = exe.run(fluid.default_main_program(), + feed=feeder.feed(data_train), + fetch_list=[avg_loss]) + +serving_io.save_model("uci_housing_model", "uci_housing_client", {"x": x}, + {"price": y_predict}, fluid.default_main_program()) diff --git a/python/examples/fit_a_line/test_client.py b/python/examples/fit_a_line/test_client.py index 02466eb8..442ed230 100644 --- a/python/examples/fit_a_line/test_client.py +++ b/python/examples/fit_a_line/test_client.py @@ -1,3 +1,18 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + from paddle_serving_client import Client import sys @@ -6,10 +21,11 @@ client.load_client_config(sys.argv[1]) client.connect(["127.0.0.1:9393"]) import paddle -test_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.uci_housing.test(), buf_size=500), batch_size=1) +test_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.test(), buf_size=500), + batch_size=1) for data in test_reader(): fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"]) print("{} {}".format(fetch_map["price"][0], data[0][1][0])) - diff --git a/python/examples/fit_a_line/test_server.py b/python/examples/fit_a_line/test_server.py index 7f90f18c..3293be06 100644 --- a/python/examples/fit_a_line/test_server.py +++ b/python/examples/fit_a_line/test_server.py @@ -1,3 +1,18 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + import os import sys from paddle_serving_server import OpMaker diff --git a/python/examples/imdb/benchmark.py b/python/examples/imdb/benchmark.py index e2e45f44..05459257 100644 --- a/python/examples/imdb/benchmark.py +++ b/python/examples/imdb/benchmark.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=doc-string-missing import sys import time @@ -22,6 +23,7 @@ from paddle_serving_client.utils import benchmark_args args = benchmark_args() + def single_func(idx, resource): imdb_dataset = IMDBDataset() imdb_dataset.load_resource(args.vocab) @@ -40,18 +42,21 @@ def single_func(idx, resource): fin = open(fn) for line in fin: word_ids, label = imdb_dataset.get_words_and_label(line) - fetch_map = client.predict(feed={"words": word_ids}, - fetch=["prediction"]) + fetch_map = client.predict( + feed={"words": word_ids}, fetch=["prediction"]) elif args.request == "http": for fn in filelist: fin = open(fn) for line in fin: word_ids, label = imdb_dataset.get_words_and_label(line) - r = requests.post("http://{}/imdb/prediction".format(args.endpoint), - data={"words": word_ids, "fetch": ["prediction"]}) + r = requests.post( + "http://{}/imdb/prediction".format(args.endpoint), + data={"words": word_ids, + "fetch": ["prediction"]}) end = time.time() return [[end - start]] + multi_thread_runner = MultiThreadRunner() result = multi_thread_runner.run(single_func, args.thread, {}) print(result) diff --git a/python/examples/imdb/imdb_reader.py b/python/examples/imdb/imdb_reader.py index cad28ab2..38a46c5c 100644 --- a/python/examples/imdb/imdb_reader.py +++ b/python/examples/imdb/imdb_reader.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=doc-string-missing import sys import os @@ -18,6 +19,7 @@ import paddle import re import paddle.fluid.incubate.data_generator as dg + class IMDBDataset(dg.MultiSlotDataGenerator): def load_resource(self, dictfile): self._vocab = {} @@ -42,7 +44,7 @@ class IMDBDataset(dg.MultiSlotDataGenerator): send = '|'.join(line.split('|')[:-1]).lower().replace("
", " ").strip() label = [int(line.split('|')[-1])] - + words = [x for x in self._pattern.split(send) if x and x != " "] feas = [ self._vocab[x] if x in self._vocab else self._unk_id for x in words @@ -56,9 +58,11 @@ class IMDBDataset(dg.MultiSlotDataGenerator): for line in fin: feas, label = self.get_words_and_label(line) yield feas, label + import paddle batch_iter = paddle.batch( - paddle.reader.shuffle(local_iter, buf_size=buf_size), + paddle.reader.shuffle( + local_iter, buf_size=buf_size), batch_size=batch) return batch_iter @@ -66,13 +70,15 @@ class IMDBDataset(dg.MultiSlotDataGenerator): def memory_iter(): for i in range(1000): yield self.return_value + def data_iter(): feas, label = self.get_words_and_label(line) yield ("words", feas), ("label", label) + return data_iter + if __name__ == "__main__": imdb = IMDBDataset() imdb.load_resource("imdb.vocab") imdb.run_from_stdin() - diff --git a/python/examples/imdb/imdb_web_service_demo.sh b/python/examples/imdb/imdb_web_service_demo.sh index 0b69a353..05d1b729 100644 --- a/python/examples/imdb/imdb_web_service_demo.sh +++ b/python/examples/imdb/imdb_web_service_demo.sh @@ -3,4 +3,3 @@ tar -xzf imdb_service.tar.gz wget --no-check-certificate https://fleet.bj.bcebos.com/text_classification_data.tar.gz tar -zxvf text_classification_data.tar.gz python text_classify_service.py serving_server_model/ workdir imdb.vocab - diff --git a/python/examples/imdb/local_train.py b/python/examples/imdb/local_train.py index 53692298..b5b46073 100644 --- a/python/examples/imdb/local_train.py +++ b/python/examples/imdb/local_train.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=doc-string-missing import os import sys import paddle diff --git a/python/examples/imdb/nets.py b/python/examples/imdb/nets.py index 3b451d16..4f2d2af6 100644 --- a/python/examples/imdb/nets.py +++ b/python/examples/imdb/nets.py @@ -1,3 +1,18 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing, doc-string-with-all-args, doc-string-with-returns + import sys import time import numpy as np @@ -13,10 +28,9 @@ def bow_net(data, hid_dim=128, hid_dim2=96, class_dim=2): - """ - bow net - """ - emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim], is_sparse=True) + """ bow net. """ + emb = fluid.layers.embedding( + input=data, size=[dict_dim, emb_dim], is_sparse=True) bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow_tanh = fluid.layers.tanh(bow) fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") @@ -37,10 +51,9 @@ def cnn_net(data, hid_dim2=96, class_dim=2, win_size=3): - """ - conv net - """ - emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim], is_sparse=True) + """ conv net. """ + emb = fluid.layers.embedding( + input=data, size=[dict_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, @@ -67,9 +80,7 @@ def lstm_net(data, hid_dim2=96, class_dim=2, emb_lr=30.0): - """ - lstm net - """ + """ lstm net. """ emb = fluid.layers.embedding( input=data, size=[dict_dim, emb_dim], @@ -103,9 +114,7 @@ def gru_net(data, hid_dim2=96, class_dim=2, emb_lr=400.0): - """ - gru net - """ + """ gru net. """ emb = fluid.layers.embedding( input=data, size=[dict_dim, emb_dim], diff --git a/python/examples/imdb/test_client.py b/python/examples/imdb/test_client.py index bb0b9790..a938de19 100644 --- a/python/examples/imdb/test_client.py +++ b/python/examples/imdb/test_client.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=doc-string-missing from paddle_serving_client import Client from imdb_reader import IMDBDataset import sys @@ -31,4 +32,3 @@ for line in sys.stdin: fetch = ["acc", "cost", "prediction"] fetch_map = client.predict(feed=feed, fetch=fetch) print("{} {}".format(fetch_map["prediction"][1], label[0])) - diff --git a/python/examples/imdb/test_client_batch.py b/python/examples/imdb/test_client_batch.py index 76de7810..972b2c96 100644 --- a/python/examples/imdb/test_client_batch.py +++ b/python/examples/imdb/test_client_batch.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=doc-string-missing from paddle_serving_client import Client import sys diff --git a/python/examples/imdb/text_classify_service.py b/python/examples/imdb/text_classify_service.py index 8a6836f0..33399360 100755 --- a/python/examples/imdb/text_classify_service.py +++ b/python/examples/imdb/text_classify_service.py @@ -11,17 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=doc-string-missing + from paddle_serving_server.web_service import WebService from imdb_reader import IMDBDataset import sys + class IMDBService(WebService): def prepare_dict(self, args={}): if len(args) == 0: exit(-1) self.dataset = IMDBDataset() self.dataset.load_resource(args["dict_file_path"]) - + def preprocess(self, feed={}, fetch=[]): if "words" not in feed: exit(-1) @@ -29,8 +32,9 @@ class IMDBService(WebService): res_feed["words"] = self.dataset.get_words_only(feed["words"])[0] return res_feed, fetch + imdb_service = IMDBService(name="imdb") imdb_service.load_model_config(sys.argv[1]) imdb_service.prepare_server(workdir=sys.argv[2], port=9393, device="cpu") -imdb_service.prepare_dict({"dict_file_path":sys.argv[3]}) +imdb_service.prepare_dict({"dict_file_path": sys.argv[3]}) imdb_service.run_server() diff --git a/python/examples/util/get_acc.py b/python/examples/util/get_acc.py index ce4885a6..91796478 100644 --- a/python/examples/util/get_acc.py +++ b/python/examples/util/get_acc.py @@ -1,3 +1,18 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + import sys import os diff --git a/python/paddle_serving_client/io/__init__.py b/python/paddle_serving_client/io/__init__.py index 347240d6..f1a3dcf6 100644 --- a/python/paddle_serving_client/io/__init__.py +++ b/python/paddle_serving_client/io/__init__.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=doc-string-missing from paddle.fluid import Executor from paddle.fluid.compiler import CompiledProgram @@ -22,6 +23,7 @@ from paddle.fluid.io import save_inference_model from ..proto import general_model_config_pb2 as model_conf import os + def save_model(server_model_folder, client_config_folder, feed_var_dict, @@ -32,8 +34,12 @@ def save_model(server_model_folder, feed_var_names = [feed_var_dict[x].name for x in feed_var_dict] target_vars = fetch_var_dict.values() - save_inference_model(server_model_folder, feed_var_names, - target_vars, executor, main_program=main_program) + save_inference_model( + server_model_folder, + feed_var_names, + target_vars, + executor, + main_program=main_program) config = model_conf.GeneralModelConfig() @@ -67,7 +73,7 @@ def save_model(server_model_folder, fetch_var.fetch_type = 0 if fetch_var_dict[key].dtype == core.VarDesc.VarType.FP32: - fetch_var.fetch_type = 1 + fetch_var.fetch_type = 1 if fetch_var.is_lod_tensor: fetch_var.shape.extend([-1]) @@ -82,15 +88,15 @@ def save_model(server_model_folder, cmd = "mkdir -p {}".format(client_config_folder) os.system(cmd) - with open("{}/serving_client_conf.prototxt".format(client_config_folder), "w") as fout: + with open("{}/serving_client_conf.prototxt".format(client_config_folder), + "w") as fout: fout.write(str(config)) - with open("{}/serving_server_conf.prototxt".format(server_model_folder), "w") as fout: + with open("{}/serving_server_conf.prototxt".format(server_model_folder), + "w") as fout: fout.write(str(config)) - with open("{}/serving_client_conf.stream.prototxt".format(client_config_folder), "wb") as fout: + with open("{}/serving_client_conf.stream.prototxt".format( + client_config_folder), "wb") as fout: fout.write(config.SerializeToString()) - with open("{}/serving_server_conf.stream.prototxt".format(server_model_folder), "wb") as fout: + with open("{}/serving_server_conf.stream.prototxt".format( + server_model_folder), "wb") as fout: fout.write(config.SerializeToString()) - - - - diff --git a/python/paddle_serving_client/metric/acc.py b/python/paddle_serving_client/metric/acc.py index d1f43296..b225b4ec 100644 --- a/python/paddle_serving_client/metric/acc.py +++ b/python/paddle_serving_client/metric/acc.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=doc-string-missing + def acc(prob, label, threshold): # we support prob is the probability for label to be one @@ -21,5 +23,3 @@ def acc(prob, label, threshold): if (prob - threshold) * (label - prob) > 0: right += 1 return float(right) / total - - diff --git a/python/paddle_serving_client/metric/auc.py b/python/paddle_serving_client/metric/auc.py index 8b021cff..ebd77fb5 100644 --- a/python/paddle_serving_client/metric/auc.py +++ b/python/paddle_serving_client/metric/auc.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=doc-string-missing, doc-string-with-all-args, doc-string-with-returns + def tied_rank(x): """ @@ -24,21 +26,22 @@ def tied_rank(x): score : list of numbers The tied rank f each element in x """ - sorted_x = sorted(zip(x,range(len(x)))) + sorted_x = sorted(zip(x, range(len(x)))) r = [0 for k in x] cur_val = sorted_x[0][0] last_rank = 0 for i in range(len(sorted_x)): if cur_val != sorted_x[i][0]: cur_val = sorted_x[i][0] - for j in range(last_rank, i): - r[sorted_x[j][1]] = float(last_rank+1+i)/2.0 + for j in range(last_rank, i): + r[sorted_x[j][1]] = float(last_rank + 1 + i) / 2.0 last_rank = i - if i==len(sorted_x)-1: - for j in range(last_rank, i+1): - r[sorted_x[j][1]] = float(last_rank+i+2)/2.0 + if i == len(sorted_x) - 1: + for j in range(last_rank, i + 1): + r[sorted_x[j][1]] = float(last_rank + i + 2) / 2.0 return r + def auc(actual, posterior): """ Computes the area under the receiver-operater characteristic (AUC) @@ -56,10 +59,9 @@ def auc(actual, posterior): The mean squared error between actual and posterior """ r = tied_rank(posterior) - num_positive = len([0 for x in actual if x==1]) - num_negative = len(actual)-num_positive - sum_positive = sum([r[i] for i in range(len(r)) if actual[i]==1]) - auc = ((sum_positive - num_positive*(num_positive+1)/2.0) / - (num_negative*num_positive)) + num_positive = len([0 for x in actual if x == 1]) + num_negative = len(actual) - num_positive + sum_positive = sum([r[i] for i in range(len(r)) if actual[i] == 1]) + auc = ((sum_positive - num_positive * (num_positive + 1) / 2.0) / + (num_negative * num_positive)) return auc - diff --git a/python/paddle_serving_client/utils/__init__.py b/python/paddle_serving_client/utils/__init__.py index 62642314..7d39f37a 100644 --- a/python/paddle_serving_client/utils/__init__.py +++ b/python/paddle_serving_client/utils/__init__.py @@ -11,18 +11,26 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=doc-string-missing import os import sys import subprocess import argparse from multiprocessing import Pool + def benchmark_args(): parser = argparse.ArgumentParser("benchmark") parser.add_argument("--thread", type=int, default=10, help="concurrecy") - parser.add_argument("--model", type=str, default="", help="model for evaluation") - parser.add_argument("--endpoint", type=str, default="127.0.0.1:9292", help="endpoint of server") - parser.add_argument("--request", type=str, default="rpc", help="mode of service") + parser.add_argument( + "--model", type=str, default="", help="model for evaluation") + parser.add_argument( + "--endpoint", + type=str, + default="127.0.0.1:9292", + help="endpoint of server") + parser.add_argument( + "--request", type=str, default="rpc", help="mode of service") return parser.parse_args() diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index fe9dcd4d..c86c3f46 100644 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -19,16 +19,26 @@ Usage: """ import argparse -def parse_args(): + +def parse_args(): # pylint: disable=doc-string-missing parser = argparse.ArgumentParser("serve") - parser.add_argument("--thread", type=int, default=10, help="Concurrency of server") - parser.add_argument("--model", type=str, default="", help="Model for serving") - parser.add_argument("--port", type=int, default=9292, help="Port the server") - parser.add_argument("--workdir", type=str, default="workdir", help="Working dir of current service") - parser.add_argument("--device", type=str, default="cpu", help="Type of device") + parser.add_argument( + "--thread", type=int, default=10, help="Concurrency of server") + parser.add_argument( + "--model", type=str, default="", help="Model for serving") + parser.add_argument( + "--port", type=int, default=9292, help="Port the server") + parser.add_argument( + "--workdir", + type=str, + default="workdir", + help="Working dir of current service") + parser.add_argument( + "--device", type=str, default="cpu", help="Type of device") return parser.parse_args() -def start_standard_model(): + +def start_standard_model(): # pylint: disable=doc-string-missing args = parse_args() thread_num = args.thread model = args.model @@ -59,5 +69,6 @@ def start_standard_model(): server.prepare_server(workdir=workdir, port=port, device=device) server.run_server() + if __name__ == "__main__": start_standard_model() diff --git a/python/paddle_serving_server/web_serve.py b/python/paddle_serving_server/web_serve.py index 337584c8..46437ad5 100644 --- a/python/paddle_serving_server/web_serve.py +++ b/python/paddle_serving_server/web_serve.py @@ -21,19 +21,31 @@ import argparse from multiprocessing import Pool, Process from .web_service import WebService -def parse_args(): + +def parse_args(): # pylint: disable=doc-string-missing parser = argparse.ArgumentParser("web_serve") - parser.add_argument("--thread", type=int, default=10, help="Concurrency of server") - parser.add_argument("--model", type=str, default="", help="Model for serving") - parser.add_argument("--port", type=int, default=9292, help="Port the server") - parser.add_argument("--workdir", type=str, default="workdir", help="Working dir of current service") - parser.add_argument("--device", type=str, default="cpu", help="Type of device") - parser.add_argument("--name", type=str, default="default", help="Default service name") + parser.add_argument( + "--thread", type=int, default=10, help="Concurrency of server") + parser.add_argument( + "--model", type=str, default="", help="Model for serving") + parser.add_argument( + "--port", type=int, default=9292, help="Port the server") + parser.add_argument( + "--workdir", + type=str, + default="workdir", + help="Working dir of current service") + parser.add_argument( + "--device", type=str, default="cpu", help="Type of device") + parser.add_argument( + "--name", type=str, default="default", help="Default service name") return parser.parse_args() + if __name__ == "__main__": args = parse_args() service = WebService(name=args.name) service.load_model_config(args.model) - service.prepare_server(workdir=args.workdir, port=args.port, device=args.device) + service.prepare_server( + workdir=args.workdir, port=args.port, device=args.device) service.run_server() diff --git a/python/paddle_serving_server/web_service.py b/python/paddle_serving_server/web_service.py index d54dc776..78cc0e6e 100755 --- a/python/paddle_serving_server/web_service.py +++ b/python/paddle_serving_server/web_service.py @@ -12,11 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. #!flask/bin/python +# pylint: disable=doc-string-missing + from flask import Flask, request, abort from multiprocessing import Pool, Process from paddle_serving_server import OpMaker, OpSeqMaker, Server from paddle_serving_client import Client + class WebService(object): def __init__(self, name="default_service"): self.name = name @@ -38,7 +41,7 @@ class WebService(object): server.set_num_threads(16) server.load_model_config(self.model_config) server.prepare_server( - workdir=self.workdir, port=self.port+1, device=self.device) + workdir=self.workdir, port=self.port + 1, device=self.device) server.run_server() def prepare_server(self, workdir="", port=9393, device="cpu"): @@ -51,8 +54,9 @@ class WebService(object): client_service = Client() client_service.load_client_config( "{}/serving_server_conf.prototxt".format(self.model_config)) - client_service.connect(["127.0.0.1:{}".format(self.port+1)]) + client_service.connect(["127.0.0.1:{}".format(self.port + 1)]) service_name = "/" + self.name + "/prediction" + @app_instance.route(service_name, methods=['POST']) def get_prediction(): if not request.json: @@ -63,15 +67,21 @@ class WebService(object): if "fetch" in feed: del feed["fetch"] fetch_map = client_service.predict(feed=feed, fetch=fetch) - fetch_map = self.postprocess(feed=request.json, fetch=fetch, fetch_map=fetch_map) + fetch_map = self.postprocess( + feed=request.json, fetch=fetch, fetch_map=fetch_map) return fetch_map - app_instance.run(host="127.0.0.1", port=self.port, threaded=False, processes=1) + + app_instance.run(host="127.0.0.1", + port=self.port, + threaded=False, + processes=1) def run_server(self): import socket localIP = socket.gethostbyname(socket.gethostname()) print("web service address:") - print("http://{}:{}/{}/prediction".format(localIP, self.port, self.name)) + print("http://{}:{}/{}/prediction".format(localIP, self.port, + self.name)) p_rpc = Process(target=self._launch_rpc_service) p_web = Process(target=self._launch_web_service) p_rpc.start() @@ -84,4 +94,3 @@ class WebService(object): def postprocess(self, feed={}, fetch=[], fetch_map={}): return fetch_map - diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py index d3bed0ee..31ba66cf 100644 --- a/python/paddle_serving_server_gpu/__init__.py +++ b/python/paddle_serving_server_gpu/__init__.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=doc-string-missing import os from .proto import server_configure_pb2 as server_sdk @@ -22,6 +23,7 @@ import paddle_serving_server_gpu as paddle_serving_server from version import serving_server_version from contextlib import closing + def serve_args(): parser = argparse.ArgumentParser("serve") parser.add_argument( @@ -37,12 +39,12 @@ def serve_args(): help="Working dir of current service") parser.add_argument( "--device", type=str, default="gpu", help="Type of device") - parser.add_argument( - "--gpu_ids", type=str, default="", help="gpu ids") + parser.add_argument("--gpu_ids", type=str, default="", help="gpu ids") parser.add_argument( "--name", type=str, default="default", help="Default service name") return parser.parse_args() + class OpMaker(object): def __init__(self): self.op_dict = { diff --git a/python/paddle_serving_server_gpu/serve.py b/python/paddle_serving_server_gpu/serve.py index 00da8784..016815e4 100644 --- a/python/paddle_serving_server_gpu/serve.py +++ b/python/paddle_serving_server_gpu/serve.py @@ -22,7 +22,7 @@ from multiprocessing import Pool, Process from paddle_serving_server_gpu import serve_args -def start_gpu_card_model(gpuid, args): +def start_gpu_card_model(gpuid, args): # pylint: disable=doc-string-missing gpuid = int(gpuid) device = "gpu" port = args.port @@ -43,7 +43,7 @@ def start_gpu_card_model(gpuid, args): read_op = op_maker.create('general_reader') general_infer_op = op_maker.create('general_infer') general_response_op = op_maker.create('general_response') - + op_seq_maker = serving.OpSeqMaker() op_seq_maker.add_op(read_op) op_seq_maker.add_op(general_infer_op) @@ -59,7 +59,8 @@ def start_gpu_card_model(gpuid, args): server.set_gpuid(gpuid) server.run_server() -def start_multi_card(args): + +def start_multi_card(args): # pylint: disable=doc-string-missing gpus = "" if args.gpu_ids == "": gpus = os.environ["CUDA_VISIBLE_DEVICES"] @@ -70,13 +71,17 @@ def start_multi_card(args): else: gpu_processes = [] for i, gpu_id in enumerate(gpus): - p = Process(target=start_gpu_card_model, args=(i, args, )) + p = Process( + target=start_gpu_card_model, args=( + i, + args, )) gpu_processes.append(p) for p in gpu_processes: p.start() for p in gpu_processes: p.join() - + + if __name__ == "__main__": args = serve_args() start_multi_card(args) diff --git a/python/paddle_serving_server_gpu/web_service.py b/python/paddle_serving_server_gpu/web_service.py index b77d0a69..c90a6319 100755 --- a/python/paddle_serving_server_gpu/web_service.py +++ b/python/paddle_serving_server_gpu/web_service.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. #!flask/bin/python +# pylint: disable=doc-string-missing + from flask import Flask, request, abort from multiprocessing import Pool, Process from paddle_serving_server_gpu import OpMaker, OpSeqMaker, Server @@ -34,8 +36,11 @@ class WebService(object): def set_gpus(self, gpus): self.gpus = gpus - def default_rpc_service(self, workdir="conf", port=9292, - gpuid=0, thread_num=10): + def default_rpc_service(self, + workdir="conf", + port=9292, + gpuid=0, + thread_num=10): device = "gpu" if gpuid == -1: device = "cpu" @@ -43,16 +48,16 @@ class WebService(object): read_op = op_maker.create('general_reader') general_infer_op = op_maker.create('general_infer') general_response_op = op_maker.create('general_response') - + op_seq_maker = serving.OpSeqMaker() op_seq_maker.add_op(read_op) op_seq_maker.add_op(general_infer_op) op_seq_maker.add_op(general_response_op) - + server = serving.Server() server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_num_threads(thread_num) - + server.load_model_config(self.model_config) if gpuid >= 0: server.set_gpuid(gpuid) @@ -70,14 +75,16 @@ class WebService(object): if len(self.gpus) == 0: # init cpu service self.rpc_service_list.append( - self.default_rpc_service(self.workdir, self.port+1, - -1, thread_num=10)) + self.default_rpc_service( + self.workdir, self.port + 1, -1, thread_num=10)) else: for i, gpuid in enumerate(self.gpus): self.rpc_service_list.append( - self.default_rpc_service("{}_{}".format(self.workdir, i), - self.port+1+i, - gpuid, thread_num=10)) + self.default_rpc_service( + "{}_{}".format(self.workdir, i), + self.port + 1 + i, + gpuid, + thread_num=10)) def _launch_web_service(self, gpu_num): app_instance = Flask(__name__) @@ -100,8 +107,7 @@ class WebService(object): if "fetch" not in request.json: abort(400) feed, fetch = self.preprocess(request.json, request.json["fetch"]) - fetch_map = client_list[0].predict( - feed=feed, fetch=fetch) + fetch_map = client_list[0].predict(feed=feed, fetch=fetch) fetch_map = self.postprocess( feed=request.json, fetch=fetch, fetch_map=fetch_map) return fetch_map @@ -120,13 +126,14 @@ class WebService(object): rpc_processes = [] for idx in range(len(self.rpc_service_list)): - p_rpc = Process(target=self._launch_rpc_service, args=(idx,)) + p_rpc = Process(target=self._launch_rpc_service, args=(idx, )) rpc_processes.append(p_rpc) for p in rpc_processes: p.start() - p_web = Process(target=self._launch_web_service, args=(len(self.gpus),)) + p_web = Process( + target=self._launch_web_service, args=(len(self.gpus), )) p_web.start() for p in rpc_processes: p.join() diff --git a/python/setup.py.in b/python/setup.py.in index 90d2fcd5..af7036bd 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -77,4 +77,3 @@ setup( ], license='Apache 2.0', keywords=('paddle-serving serving-client deployment industrial easy-to-use')) - diff --git a/python/setup.py.server.in b/python/setup.py.server.in index e852f02c..4c726aa3 100644 --- a/python/setup.py.server.in +++ b/python/setup.py.server.in @@ -73,4 +73,3 @@ setup( ], license='Apache 2.0', keywords=('paddle-serving serving-server deployment industrial easy-to-use')) - diff --git a/tools/Dockerfile.ci b/tools/Dockerfile.ci index 7943df77..c3ababc7 100644 --- a/tools/Dockerfile.ci +++ b/tools/Dockerfile.ci @@ -1,23 +1,34 @@ FROM centos:7.3.1611 -RUN yum -y install wget \ - && yum -y install gcc gcc-c++ make glibc-static which \ - && yum -y install git openssl-devel curl-devel bzip2-devel python-devel \ - && wget https://cmake.org/files/v3.2/cmake-3.2.0-Linux-x86_64.tar.gz \ +RUN yum -y install wget >/dev/null \ + && yum -y install gcc gcc-c++ make glibc-static which >/dev/null \ + && yum -y install git openssl-devel curl-devel bzip2-devel python-devel >/dev/null \ + && wget https://cmake.org/files/v3.2/cmake-3.2.0-Linux-x86_64.tar.gz >/dev/null \ && tar xzf cmake-3.2.0-Linux-x86_64.tar.gz \ && mv cmake-3.2.0-Linux-x86_64 /usr/local/cmake3.2.0 \ && echo 'export PATH=/usr/local/cmake3.2.0/bin:$PATH' >> /root/.bashrc \ - && wget https://dl.google.com/go/go1.14.linux-amd64.tar.gz \ + && rm cmake-3.2.0-Linux-x86_64.tar.gz \ + && wget https://dl.google.com/go/go1.14.linux-amd64.tar.gz >/dev/null \ && tar xzf go1.14.linux-amd64.tar.gz \ && mv go /usr/local/go \ && echo 'export GOROOT=/usr/local/go' >> /root/.bashrc \ && echo 'export PATH=/usr/local/go/bin:$PATH' >> /root/.bashrc \ - && yum -y install python-devel sqlite-devel \ + && rm go1.14.linux-amd64.tar.gz \ + && yum -y install python-devel sqlite-devel >/dev/null \ && curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py >/dev/null \ && python get-pip.py >/dev/null \ && pip install google protobuf setuptools wheel flask >/dev/null \ + && rm get-pip.py \ && wget http://nixos.org/releases/patchelf/patchelf-0.10/patchelf-0.10.tar.bz2 \ - && yum -y install bzip2 \ + && yum -y install bzip2 >/dev/null \ && tar -jxf patchelf-0.10.tar.bz2 \ && cd patchelf-0.10 \ && ./configure --prefix=/usr \ - && make >/dev/null && make install >/dev/null + && make >/dev/null && make install >/dev/null \ + && cd .. \ + && rm -rf patchelf-0.10* \ + && yum -y update >/dev/null \ + && yum -y install dnf >/dev/null \ + && yum -y install dnf-plugins-core >/dev/null \ + && dnf copr enable alonid/llvm-3.8.0 -y \ + && dnf install llvm-3.8.0 clang-3.8.0 compiler-rt-3.8.0 -y \ + && echo 'export PATH=/opt/llvm-3.8.0/bin:$PATH' >> /root/.bashrc diff --git a/tools/cpp_examples/demo-client/paddle_serving_client.egg-info/SOURCES.txt b/tools/cpp_examples/demo-client/paddle_serving_client.egg-info/SOURCES.txt index 2f2ba80c..6ae9aeb0 100644 --- a/tools/cpp_examples/demo-client/paddle_serving_client.egg-info/SOURCES.txt +++ b/tools/cpp_examples/demo-client/paddle_serving_client.egg-info/SOURCES.txt @@ -3,4 +3,4 @@ paddle_serving_client.egg-info/PKG-INFO paddle_serving_client.egg-info/SOURCES.txt paddle_serving_client.egg-info/dependency_links.txt paddle_serving_client.egg-info/not-zip-safe -paddle_serving_client.egg-info/top_level.txt \ No newline at end of file +paddle_serving_client.egg-info/top_level.txt diff --git a/tools/cpp_examples/demo-client/paddle_serving_client.egg-info/dependency_links.txt b/tools/cpp_examples/demo-client/paddle_serving_client.egg-info/dependency_links.txt index 8b137891..e69de29b 100644 --- a/tools/cpp_examples/demo-client/paddle_serving_client.egg-info/dependency_links.txt +++ b/tools/cpp_examples/demo-client/paddle_serving_client.egg-info/dependency_links.txt @@ -1 +0,0 @@ - diff --git a/tools/cpp_examples/demo-client/src/general_model.cpp b/tools/cpp_examples/demo-client/src/general_model.cpp index c281c54f..5a6b9959 100644 --- a/tools/cpp_examples/demo-client/src/general_model.cpp +++ b/tools/cpp_examples/demo-client/src/general_model.cpp @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "general_model.h" // NOLINT #include -#include "general_model.h" #include "sdk-cpp/builtin_format.pb.h" #include "sdk-cpp/include/common.h" #include "sdk-cpp/include/predictor_sdk.h" @@ -28,7 +28,7 @@ namespace baidu { namespace paddle_serving { namespace general_model { -void PredictorClient::init(const std::string & conf_file) { +void PredictorClient::init(const std::string &conf_file) { _conf_file = conf_file; std::ifstream fin(conf_file); if (!fin) { @@ -65,9 +65,8 @@ void PredictorClient::init(const std::string & conf_file) { } } -void PredictorClient::set_predictor_conf( - const std::string & conf_path, - const std::string & conf_file) { +void PredictorClient::set_predictor_conf(const std::string &conf_path, + const std::string &conf_file) { _predictor_path = conf_path; _predictor_conf = conf_file; } @@ -80,53 +79,51 @@ int PredictorClient::create_predictor() { _api.thrd_initialize(); } -void PredictorClient::predict( - const std::vector > & float_feed, - const std::vector & float_feed_name, - const std::vector > & int_feed, - const std::vector & int_feed_name, - const std::vector & fetch_name, - FetchedMap * fetch_result) { - +void PredictorClient::predict(const std::vector> &float_feed, + const std::vector &float_feed_name, + const std::vector> &int_feed, + const std::vector &int_feed_name, + const std::vector &fetch_name, + FetchedMap *fetch_result) { _api.thrd_clear(); _predictor = _api.fetch_predictor("general_model"); Request req; std::vector tensor_vec; - FeedInst * inst = req.add_insts(); - for (auto & name : float_feed_name) { + FeedInst *inst = req.add_insts(); + for (auto &name : float_feed_name) { tensor_vec.push_back(inst->add_tensor_array()); } - for (auto & name : int_feed_name) { + for (auto &name : int_feed_name) { tensor_vec.push_back(inst->add_tensor_array()); } int vec_idx = 0; - for (auto & name : float_feed_name) { + for (auto &name : float_feed_name) { int idx = _feed_name_to_idx[name]; - Tensor * tensor = tensor_vec[idx]; + Tensor *tensor = tensor_vec[idx]; for (int j = 0; j < _shape[idx].size(); ++j) { tensor->add_shape(_shape[idx][j]); } tensor->set_elem_type(1); for (int j = 0; j < float_feed[vec_idx].size(); ++j) { - tensor->add_data( - (char *)(&(float_feed[vec_idx][j])), sizeof(float)); + tensor->add_data((char *)(&(float_feed[vec_idx][j])), // NOLINT + sizeof(float)); } vec_idx++; } vec_idx = 0; - for (auto & name : int_feed_name) { + for (auto &name : int_feed_name) { int idx = _feed_name_to_idx[name]; - Tensor * tensor = tensor_vec[idx]; + Tensor *tensor = tensor_vec[idx]; for (int j = 0; j < _shape[idx].size(); ++j) { tensor->add_shape(_shape[idx][j]); } tensor->set_elem_type(0); for (int j = 0; j < int_feed[vec_idx].size(); ++j) { - tensor->add_data( - (char *)(&(int_feed[vec_idx][j])), sizeof(int64_t)); + tensor->add_data((char *)(&(int_feed[vec_idx][j])), // NOLINT + sizeof(int64_t)); } vec_idx++; } @@ -139,13 +136,13 @@ void PredictorClient::predict( LOG(ERROR) << "failed call predictor with req: " << req.ShortDebugString(); exit(-1); } else { - for (auto & name : fetch_name) { + for (auto &name : fetch_name) { int idx = _fetch_name_to_idx[name]; int len = res.insts(0).tensor_array(idx).data_size(); (*fetch_result)[name].resize(len); for (int i = 0; i < len; ++i) { - (*fetch_result)[name][i] = *(const float *) - res.insts(0).tensor_array(idx).data(i).c_str(); + (*fetch_result)[name][i] = + *(const float *)res.insts(0).tensor_array(idx).data(i).c_str(); } } } @@ -154,12 +151,12 @@ void PredictorClient::predict( } void PredictorClient::predict_with_profile( - const std::vector > & float_feed, - const std::vector & float_feed_name, - const std::vector > & int_feed, - const std::vector & int_feed_name, - const std::vector & fetch_name, - FetchedMap * fetch_result) { + const std::vector> &float_feed, + const std::vector &float_feed_name, + const std::vector> &int_feed, + const std::vector &int_feed_name, + const std::vector &fetch_name, + FetchedMap *fetch_result) { return; } diff --git a/tools/cpp_examples/demo-client/src/general_model.h b/tools/cpp_examples/demo-client/src/general_model.h index 91084a61..76143865 100644 --- a/tools/cpp_examples/demo-client/src/general_model.h +++ b/tools/cpp_examples/demo-client/src/general_model.h @@ -18,9 +18,9 @@ #include #include +#include #include #include -#include #include "sdk-cpp/builtin_format.pb.h" #include "sdk-cpp/general_model_service.pb.h" @@ -37,44 +37,40 @@ namespace general_model { typedef std::map> FetchedMap; - class PredictorClient { public: PredictorClient() {} ~PredictorClient() {} - void init(const std::string & client_conf); - void set_predictor_conf( - const std::string& conf_path, - const std::string& conf_file); + void init(const std::string& client_conf); + void set_predictor_conf(const std::string& conf_path, + const std::string& conf_file); int create_predictor(); - void predict( - const std::vector > & float_feed, - const std::vector & float_feed_name, - const std::vector > & int_feed, - const std::vector & int_feed_name, - const std::vector & fetch_name, - FetchedMap * result_map); + void predict(const std::vector>& float_feed, + const std::vector& float_feed_name, + const std::vector>& int_feed, + const std::vector& int_feed_name, + const std::vector& fetch_name, + FetchedMap* result_map); - void predict_with_profile( - const std::vector > & float_feed, - const std::vector & float_feed_name, - const std::vector > & int_feed, - const std::vector & int_feed_name, - const std::vector & fetch_name, - FetchedMap * result_map); + void predict_with_profile(const std::vector>& float_feed, + const std::vector& float_feed_name, + const std::vector>& int_feed, + const std::vector& int_feed_name, + const std::vector& fetch_name, + FetchedMap* result_map); private: PredictorApi _api; - Predictor * _predictor; + Predictor* _predictor; std::string _predictor_conf; std::string _predictor_path; std::string _conf_file; std::map _feed_name_to_idx; std::map _fetch_name_to_idx; std::map _fetch_name_to_var_name; - std::vector > _shape; + std::vector> _shape; }; } // namespace general_model diff --git a/tools/cpp_examples/demo-client/src/general_model_main.cpp b/tools/cpp_examples/demo-client/src/general_model_main.cpp index 8bf2e179..b7171e12 100644 --- a/tools/cpp_examples/demo-client/src/general_model_main.cpp +++ b/tools/cpp_examples/demo-client/src/general_model_main.cpp @@ -15,20 +15,20 @@ #include #include -#include "general_model.h" +#include "general_model.h" // NOLINT -using namespace std; +using namespace std; // NOLINT using baidu::paddle_serving::general_model::PredictorClient; using baidu::paddle_serving::general_model::FetchedMap; -int main(int argc, char * argv[]) { - PredictorClient * client = new PredictorClient(); +int main(int argc, char* argv[]) { + PredictorClient* client = new PredictorClient(); client->init("inference.conf"); client->set_predictor_conf("./", "predictor.conf"); client->create_predictor(); - std::vector > float_feed; - std::vector > int_feed; + std::vector> float_feed; + std::vector> int_feed; std::vector float_feed_name; std::vector int_feed_name = {"words", "label"}; std::vector fetch_name = {"cost", "acc", "prediction"}; @@ -53,13 +53,14 @@ int main(int argc, char * argv[]) { cin >> label; int_feed.push_back({label}); - FetchedMap result; - client->predict( - float_feed, float_feed_name, - int_feed, int_feed_name, fetch_name, - &result); + client->predict(float_feed, + float_feed_name, + int_feed, + int_feed_name, + fetch_name, + &result); cout << label << "\t" << result["prediction"][1] << endl; diff --git a/tools/cpp_examples/demo-client/src/load_general_model.cpp b/tools/cpp_examples/demo-client/src/load_general_model.cpp index c7d4ab19..ea6632b8 100644 --- a/tools/cpp_examples/demo-client/src/load_general_model.cpp +++ b/tools/cpp_examples/demo-client/src/load_general_model.cpp @@ -18,14 +18,14 @@ #include #include "core/sdk-cpp/builtin_format.pb.h" -#include "core/sdk-cpp/load_general_model_service.pb.h" #include "core/sdk-cpp/include/common.h" #include "core/sdk-cpp/include/predictor_sdk.h" +#include "core/sdk-cpp/load_general_model_service.pb.h" using baidu::paddle_serving::sdk_cpp::Predictor; using baidu::paddle_serving::sdk_cpp::PredictorApi; -using baidu::paddle_serving::predictor:: -load_general_model_service::RequestAndResponse; +using baidu::paddle_serving::predictor::load_general_model_service:: + RequestAndResponse; int create_req(RequestAndResponse& req) { // NOLINT req.set_a(1); diff --git a/tools/cpp_examples/demo-client/src/pybind_general_model.cpp b/tools/cpp_examples/demo-client/src/pybind_general_model.cpp index 0ce939eb..dda26525 100644 --- a/tools/cpp_examples/demo-client/src/pybind_general_model.cpp +++ b/tools/cpp_examples/demo-client/src/pybind_general_model.cpp @@ -1,7 +1,21 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include -#include "general_model.h" +#include "general_model.h" // NOLINT -#include +#include // NOLINT namespace py = pybind11; @@ -17,28 +31,30 @@ PYBIND11_MODULE(paddle_serving_client, m) { py::class_(m, "PredictorClient", py::buffer_protocol()) .def(py::init()) .def("init", - [](PredictorClient &self, const std::string & conf) { + [](PredictorClient &self, const std::string &conf) { self.init(conf); }) .def("set_predictor_conf", - [](PredictorClient &self, const std::string & conf_path, - const std::string & conf_file) { + [](PredictorClient &self, + const std::string &conf_path, + const std::string &conf_file) { self.set_predictor_conf(conf_path, conf_file); }) .def("create_predictor", - [](PredictorClient & self) { - self.create_predictor(); - }) + [](PredictorClient &self) { self.create_predictor(); }) .def("predict", [](PredictorClient &self, - const std::vector > & float_feed, - const std::vector & float_feed_name, - const std::vector > & int_feed, - const std::vector & int_feed_name, - const std::vector & fetch_name, - FetchedMap * fetch_result) { - return self.predict(float_feed, float_feed_name, - int_feed, int_feed_name, fetch_name, + const std::vector> &float_feed, + const std::vector &float_feed_name, + const std::vector> &int_feed, + const std::vector &int_feed_name, + const std::vector &fetch_name, + FetchedMap *fetch_result) { + return self.predict(float_feed, + float_feed_name, + int_feed, + int_feed_name, + fetch_name, fetch_result); }); } diff --git a/tools/cpp_examples/demo-serving/op/bert_service_op.h b/tools/cpp_examples/demo-serving/op/bert_service_op.h index 37875011..b4934d06 100644 --- a/tools/cpp_examples/demo-serving/op/bert_service_op.h +++ b/tools/cpp_examples/demo-serving/op/bert_service_op.h @@ -14,10 +14,10 @@ #pragma once #include -#include "paddle_inference_api.h" // NOLINT #include "examples/demo-serving/bert_service.pb.h" +#include "paddle_inference_api.h" // NOLINT -#include +#include // NOLINT namespace baidu { namespace paddle_serving { diff --git a/tools/cpp_examples/demo-serving/op/classify_op.cpp b/tools/cpp_examples/demo-serving/op/classify_op.cpp index 76370e2c..27ac8ae3 100644 --- a/tools/cpp_examples/demo-serving/op/classify_op.cpp +++ b/tools/cpp_examples/demo-serving/op/classify_op.cpp @@ -13,9 +13,9 @@ // limitations under the License. #include "examples/demo-serving/op/classify_op.h" -#include "examples/demo-serving/op/reader_op.h" #include "core/predictor/framework/infer.h" #include "core/predictor/framework/memory.h" +#include "examples/demo-serving/op/reader_op.h" namespace baidu { namespace paddle_serving { diff --git a/tools/cpp_examples/demo-serving/op/classify_op.h b/tools/cpp_examples/demo-serving/op/classify_op.h index be72bad6..0d3f1428 100644 --- a/tools/cpp_examples/demo-serving/op/classify_op.h +++ b/tools/cpp_examples/demo-serving/op/classify_op.h @@ -14,8 +14,8 @@ #pragma once #include -#include "paddle_inference_api.h" // NOLINT #include "examples/demo-serving/image_class.pb.h" +#include "paddle_inference_api.h" // NOLINT namespace baidu { namespace paddle_serving { diff --git a/tools/cpp_examples/demo-serving/op/ctr_prediction_op.h b/tools/cpp_examples/demo-serving/op/ctr_prediction_op.h index fbcfc204..7c9e8447 100644 --- a/tools/cpp_examples/demo-serving/op/ctr_prediction_op.h +++ b/tools/cpp_examples/demo-serving/op/ctr_prediction_op.h @@ -14,8 +14,8 @@ #pragma once #include -#include "paddle_inference_api.h" // NOLINT #include "examples/demo-serving/ctr_prediction.pb.h" +#include "paddle_inference_api.h" // NOLINT namespace baidu { namespace paddle_serving { diff --git a/tools/cpp_examples/demo-serving/op/general_model_op.h b/tools/cpp_examples/demo-serving/op/general_model_op.h index 12a59497..eb5acf33 100644 --- a/tools/cpp_examples/demo-serving/op/general_model_op.h +++ b/tools/cpp_examples/demo-serving/op/general_model_op.h @@ -25,7 +25,6 @@ #endif #include "examples/demo-serving/general_model_service.pb.h" - namespace baidu { namespace paddle_serving { namespace serving { @@ -34,7 +33,7 @@ static const char* GENERAL_MODEL_NAME = "general_model"; class GeneralModelOp : public baidu::paddle_serving::predictor::OpWithChannel< - baidu::paddle_serving::predictor::general_model::Response> { + baidu::paddle_serving::predictor::general_model::Response> { public: typedef std::vector TensorVector; diff --git a/tools/cpp_examples/demo-serving/op/kvdb_echo_op.h b/tools/cpp_examples/demo-serving/op/kvdb_echo_op.h index 1a3dd81c..6dbb9386 100644 --- a/tools/cpp_examples/demo-serving/op/kvdb_echo_op.h +++ b/tools/cpp_examples/demo-serving/op/kvdb_echo_op.h @@ -13,13 +13,13 @@ // limitations under the License. #pragma once -#include "examples/demo-serving/echo_kvdb_service.pb.h" #include "core/kvdb/include/kvdb/paddle_rocksdb.h" #include "core/predictor/common/inner_common.h" #include "core/predictor/framework/channel.h" #include "core/predictor/framework/op_repository.h" #include "core/predictor/framework/resource.h" #include "core/predictor/op/op.h" +#include "examples/demo-serving/echo_kvdb_service.pb.h" namespace baidu { namespace paddle_serving { diff --git a/tools/cpp_examples/demo-serving/op/load_general_model_conf_op.h b/tools/cpp_examples/demo-serving/op/load_general_model_conf_op.h index 461d14ef..c07bd954 100644 --- a/tools/cpp_examples/demo-serving/op/load_general_model_conf_op.h +++ b/tools/cpp_examples/demo-serving/op/load_general_model_conf_op.h @@ -15,25 +15,23 @@ #pragma once #include -#include "examples/demo-serving/load_general_model_service.pb.h" #include "core/predictor/common/inner_common.h" #include "core/predictor/framework/channel.h" #include "core/predictor/framework/op_repository.h" -#include "core/predictor/op/op.h" #include "core/predictor/framework/resource.h" +#include "core/predictor/op/op.h" +#include "examples/demo-serving/load_general_model_service.pb.h" namespace baidu { namespace paddle_serving { namespace predictor { class LoadGeneralModelConfOp - : public OpWithChannel< - baidu::paddle_serving::predictor:: - load_general_model_service::RequestAndResponse> { + : public OpWithChannel { public: - typedef baidu::paddle_serving::predictor:: - load_general_model_service::RequestAndResponse - RequestAndResponse; + typedef baidu::paddle_serving::predictor::load_general_model_service:: + RequestAndResponse RequestAndResponse; DECLARE_OP(LoadGeneralModelConfOp); diff --git a/tools/cpp_examples/demo-serving/op/reader_op.h b/tools/cpp_examples/demo-serving/op/reader_op.h index 6cdb2399..21b1d078 100644 --- a/tools/cpp_examples/demo-serving/op/reader_op.h +++ b/tools/cpp_examples/demo-serving/op/reader_op.h @@ -15,12 +15,12 @@ #pragma once #include #include -#include "examples/demo-serving/image_class.pb.h" #include "core/predictor/builtin_format.pb.h" #include "core/predictor/common/inner_common.h" #include "core/predictor/framework/channel.h" #include "core/predictor/framework/op_repository.h" #include "core/predictor/op/op.h" +#include "examples/demo-serving/image_class.pb.h" // opencv #include "opencv/cv.h" diff --git a/tools/cpp_examples/demo-serving/op/text_classification_op.h b/tools/cpp_examples/demo-serving/op/text_classification_op.h index ab462ee5..f01ab23e 100644 --- a/tools/cpp_examples/demo-serving/op/text_classification_op.h +++ b/tools/cpp_examples/demo-serving/op/text_classification_op.h @@ -14,8 +14,8 @@ #pragma once #include -#include "paddle_inference_api.h" // NOLINT #include "examples/demo-serving/text_classification.pb.h" +#include "paddle_inference_api.h" // NOLINT namespace baidu { namespace paddle_serving { diff --git a/tools/cpp_examples/demo-serving/op/write_json_op.cpp b/tools/cpp_examples/demo-serving/op/write_json_op.cpp index 1b65290d..c49fc4e4 100644 --- a/tools/cpp_examples/demo-serving/op/write_json_op.cpp +++ b/tools/cpp_examples/demo-serving/op/write_json_op.cpp @@ -21,8 +21,8 @@ #include "json2pb/pb_to_json.h" #endif -#include "examples/demo-serving/op/write_json_op.h" #include "core/predictor/framework/memory.h" +#include "examples/demo-serving/op/write_json_op.h" #ifndef BCLOUD using json2pb::ProtoMessageToJson; diff --git a/tools/cpp_examples/demo-serving/op/write_json_op.h b/tools/cpp_examples/demo-serving/op/write_json_op.h index e292af3d..7a000252 100644 --- a/tools/cpp_examples/demo-serving/op/write_json_op.h +++ b/tools/cpp_examples/demo-serving/op/write_json_op.h @@ -13,11 +13,11 @@ // limitations under the License. #pragma once -#include "examples/demo-serving/image_class.pb.h" #include "core/predictor/common/inner_common.h" #include "core/predictor/framework/channel.h" #include "core/predictor/framework/op_repository.h" #include "core/predictor/op/op.h" +#include "examples/demo-serving/image_class.pb.h" namespace baidu { namespace paddle_serving { diff --git a/tools/cpp_examples/demo-serving/op/write_op.cpp b/tools/cpp_examples/demo-serving/op/write_op.cpp index 3107145a..793493e4 100644 --- a/tools/cpp_examples/demo-serving/op/write_op.cpp +++ b/tools/cpp_examples/demo-serving/op/write_op.cpp @@ -16,13 +16,13 @@ #include #ifdef BCLOUD -#include "pb_to_json.h" +#include "pb_to_json.h" // NOLINT #else #include "json2pb/pb_to_json.h" #endif -#include "examples/demo-serving/op/write_op.h" #include "core/predictor/framework/memory.h" +#include "examples/demo-serving/op/write_op.h" #ifndef BCLOUD using json2pb::ProtoMessageToJson; diff --git a/tools/cpp_examples/demo-serving/op/write_op.h b/tools/cpp_examples/demo-serving/op/write_op.h index d953f80c..51b3da2e 100644 --- a/tools/cpp_examples/demo-serving/op/write_op.h +++ b/tools/cpp_examples/demo-serving/op/write_op.h @@ -13,12 +13,12 @@ // limitations under the License. #pragma once -#include "examples/demo-serving/image_class.pb.h" #include "core/predictor/builtin_format.pb.h" #include "core/predictor/common/inner_common.h" #include "core/predictor/framework/channel.h" #include "core/predictor/framework/op_repository.h" #include "core/predictor/op/op.h" +#include "examples/demo-serving/image_class.pb.h" namespace baidu { namespace paddle_serving { diff --git a/tools/cpp_examples/demo-serving/proto/general_model_service.proto b/tools/cpp_examples/demo-serving/proto/general_model_service.proto index 1b9bfe38..803f7aa0 100644 --- a/tools/cpp_examples/demo-serving/proto/general_model_service.proto +++ b/tools/cpp_examples/demo-serving/proto/general_model_service.proto @@ -25,21 +25,13 @@ message Tensor { repeated int32 shape = 3; }; -message FeedInst { - repeated Tensor tensor_array = 1; -}; +message FeedInst { repeated Tensor tensor_array = 1; }; -message FetchInst { - repeated Tensor tensor_array = 1; -}; +message FetchInst { repeated Tensor tensor_array = 1; }; -message Request { - repeated FeedInst insts = 1; -}; +message Request { repeated FeedInst insts = 1; }; -message Response { - repeated FetchInst insts = 1; -}; +message Response { repeated FetchInst insts = 1; }; service GeneralModelService { rpc inference(Request) returns (Response); diff --git a/tools/cpp_examples/elastic-ctr/client/demo/elastic_ctr.py b/tools/cpp_examples/elastic-ctr/client/demo/elastic_ctr.py index eb36a4ad..b5c95440 100644 --- a/tools/cpp_examples/elastic-ctr/client/demo/elastic_ctr.py +++ b/tools/cpp_examples/elastic-ctr/client/demo/elastic_ctr.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +# pylint: disable=doc-string-missing from __future__ import print_function import json import sys @@ -33,8 +33,8 @@ def str2long(str): return int(str) -def tied_rank(x): - """ +def tied_rank(x): # pylint: disable=doc-string-with-all-args, doc-string-with-returns + """ Computes the tied rank of elements in x. This function computes the tied rank of elements in x. Parameters @@ -45,23 +45,23 @@ def tied_rank(x): score : list of numbers The tied rank f each element in x """ - sorted_x = sorted(zip(x,range(len(x)))) + sorted_x = sorted(zip(x, range(len(x)))) r = [0 for k in x] cur_val = sorted_x[0][0] last_rank = 0 for i in range(len(sorted_x)): if cur_val != sorted_x[i][0]: cur_val = sorted_x[i][0] - for j in range(last_rank, i): - r[sorted_x[j][1]] = float(last_rank+1+i)/2.0 + for j in range(last_rank, i): + r[sorted_x[j][1]] = float(last_rank + 1 + i) / 2.0 last_rank = i - if i==len(sorted_x)-1: - for j in range(last_rank, i+1): - r[sorted_x[j][1]] = float(last_rank+i+2)/2.0 + if i == len(sorted_x) - 1: + for j in range(last_rank, i + 1): + r[sorted_x[j][1]] = float(last_rank + i + 2) / 2.0 return r -def auc(actual, posterior): +def auc(actual, posterior): # pylint: disable=doc-string-with-all-args, doc-string-with-returns """ Computes the area under the receiver-operater characteristic (AUC) This function computes the AUC error metric for binary classification. @@ -78,11 +78,11 @@ def auc(actual, posterior): The mean squared error between actual and posterior """ r = tied_rank(posterior) - num_positive = len([0 for x in actual if x==1]) - num_negative = len(actual)-num_positive - sum_positive = sum([r[i] for i in range(len(r)) if actual[i]==1]) - auc = ((sum_positive - num_positive*(num_positive+1)/2.0) / - (num_negative*num_positive)) + num_positive = len([0 for x in actual if x == 1]) + num_negative = len(actual) - num_positive + sum_positive = sum([r[i] for i in range(len(r)) if actual[i] == 1]) + auc = ((sum_positive - num_positive * (num_positive + 1) / 2.0) / + (num_negative * num_positive)) return auc @@ -105,8 +105,8 @@ def data_reader(data_file, samples, labels): for i in range(0, len(features)): if slots[i] in sample: - sample[slots[i]].append(int(features[i]) % - CTR_EMBEDDING_TABLE_SIZE) + sample[slots[i]].append( + int(features[i]) % CTR_EMBEDDING_TABLE_SIZE) else: sample[slots[i]] = [ int(features[i]) % CTR_EMBEDDING_TABLE_SIZE @@ -117,7 +117,7 @@ def data_reader(data_file, samples, labels): sample[x] = [0] samples.append(sample) - + if __name__ == "__main__": """ main """ @@ -180,4 +180,4 @@ if __name__ == "__main__": pass idx = idx + 1 - print("auc = ", auc(labels, result_list) ) + print("auc = ", auc(labels, result_list)) diff --git a/tools/cpp_examples/elastic-ctr/serving/op/elastic_ctr_prediction_op.cpp b/tools/cpp_examples/elastic-ctr/serving/op/elastic_ctr_prediction_op.cpp index 78215d7a..481a99d3 100644 --- a/tools/cpp_examples/elastic-ctr/serving/op/elastic_ctr_prediction_op.cpp +++ b/tools/cpp_examples/elastic-ctr/serving/op/elastic_ctr_prediction_op.cpp @@ -14,8 +14,8 @@ #include "elastic-ctr/serving/op/elastic_ctr_prediction_op.h" #include -#include #include +#include #include "cube/cube-api/include/cube_api.h" #include "predictor/framework/infer.h" #include "predictor/framework/kv_manager.h" @@ -87,7 +87,7 @@ int ElasticCTRPredictionOp::inference() { // Verify all request instances have same slots std::vector slot_ids; - for (auto x: samples[0]) { + for (auto x : samples[0]) { slot_ids.push_back(x.first); } std::sort(slot_ids.begin(), slot_ids.end()); @@ -105,9 +105,9 @@ int ElasticCTRPredictionOp::inference() { // // Later we use slot_map to index into lod_tenor array // - std::map slot_map; + std::map slot_map; // NOLINT int index = 0; - for (auto slot_id: slot_ids) { + for (auto slot_id : slot_ids) { slot_map[slot_id] = index; ++index; } @@ -121,7 +121,7 @@ int ElasticCTRPredictionOp::inference() { return 0; } - for (auto slot: samples[i]) { + for (auto slot : samples[i]) { int id = slot.first; auto x = std::find(slot_ids.begin(), slot_ids.end(), id); if (x == slot_ids.end()) { @@ -171,7 +171,7 @@ int ElasticCTRPredictionOp::inference() { feature_slot_sizes.resize(slot_ids.size()); // Iterate over each feature slot - for (auto slot_id: slot_ids) { + for (auto slot_id : slot_ids) { feature_slot_lods[slot_map[slot_id]].push_back(0); feature_slot_sizes[slot_map[slot_id]] = 0; @@ -179,8 +179,8 @@ int ElasticCTRPredictionOp::inference() { for (size_t si = 0; si < samples.size(); ++si) { Sample &sample = samples[si]; std::vector &slot = sample[slot_id]; - feature_slot_lods[slot_map[slot_id]].push_back(feature_slot_lods[slot_map[slot_id]].back() + - slot.size()); + feature_slot_lods[slot_map[slot_id]].push_back( + feature_slot_lods[slot_map[slot_id]].back() + slot.size()); feature_slot_sizes[slot_map[slot_id]] += slot.size(); for (size_t j = 0; j < slot.size(); ++j) { @@ -303,14 +303,11 @@ int ElasticCTRPredictionOp::inference() { std::vector lod_tensors; lod_tensors.resize(slot_ids.size()); - for (auto slot_id: slot_ids) { + for (auto slot_id : slot_ids) { paddle::PaddleTensor &lod_tensor = lod_tensors[slot_map[slot_id]]; char name[VARIABLE_NAME_LEN]; - snprintf(name, - VARIABLE_NAME_LEN, - "embedding_%d.tmp_0", - slot_id); + snprintf(name, VARIABLE_NAME_LEN, "embedding_%d.tmp_0", slot_id); lod_tensor.name = std::string(name); lod_tensor.dtype = paddle::PaddleDType::FLOAT32; @@ -322,7 +319,7 @@ int ElasticCTRPredictionOp::inference() { int base = 0; // Iterate over all slots - for (auto slot_id: slot_ids) { + for (auto slot_id : slot_ids) { paddle::PaddleTensor &lod_tensor = lod_tensors[slot_map[slot_id]]; std::vector> &lod = lod_tensor.lod; @@ -346,7 +343,7 @@ int ElasticCTRPredictionOp::inference() { res, -1, "Embedding vector size not expected"); return 0; #else - // sizeof(float) * CTR_PREDICTION_EMBEDDING_SIZE = 36 +// sizeof(float) * CTR_PREDICTION_EMBEDDING_SIZE = 36 #if 1 LOG(INFO) << "values[" << idx << "].buff.size != 36"; #endif diff --git a/tools/cpp_examples/elastic-ctr/serving/op/elastic_ctr_prediction_op.h b/tools/cpp_examples/elastic-ctr/serving/op/elastic_ctr_prediction_op.h index 11679c83..39e7d978 100644 --- a/tools/cpp_examples/elastic-ctr/serving/op/elastic_ctr_prediction_op.h +++ b/tools/cpp_examples/elastic-ctr/serving/op/elastic_ctr_prediction_op.h @@ -14,8 +14,8 @@ #pragma once #include -#include "paddle_inference_api.h" // NOLINT #include "elastic-ctr/serving/elastic_ctr_prediction.pb.h" +#include "paddle_inference_api.h" // NOLINT namespace baidu { namespace paddle_serving { @@ -33,7 +33,7 @@ class ElasticCTRPredictionOp baidu::paddle_serving::predictor::elastic_ctr::Response> { public: typedef std::vector TensorVector; - typedef std::map> Sample; + typedef std::map> Sample; // NOLINT typedef std::vector Samples; DECLARE_OP(ElasticCTRPredictionOp); diff --git a/tools/serving-build.sh b/tools/serving_build.sh similarity index 100% rename from tools/serving-build.sh rename to tools/serving_build.sh diff --git a/tools/serving_check_style.sh b/tools/serving_check_style.sh new file mode 100644 index 00000000..714f128d --- /dev/null +++ b/tools/serving_check_style.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +function init() { + source /root/.bashrc + set -v + cd Serving +} + +function abort(){ + echo "Your change doesn't follow PaddlePaddle's code style." 1>&2 + echo "Please use pre-commit to check what is wrong." 1>&2 + exit 1 +} + +function check_style() { + trap 'abort' 0 + set -e + + pip install cpplint 'pre-commit==1.10.4' + + export PATH=/usr/bin:$PATH + pre-commit install + clang-format --version + + if ! pre-commit run -a ; then + git diff + exit 1 + fi + + trap : 0 +} + +function main() { + init + check_style +} + +main -- GitLab