From c90fab43898d2e190de31434d79c696e0be006b4 Mon Sep 17 00:00:00 2001 From: HexToString <506181616@qq.com> Date: Tue, 29 Mar 2022 16:35:16 +0800 Subject: [PATCH] fix_bug --- .../op/general_dist_kv_infer_op.cpp | 8 +++---- doc/C++_Serving/Model_Ensemble_CN.md | 8 +++---- doc/C++_Serving/Model_Ensemble_EN.md | 8 +++---- doc/C++_Serving/OP_CN.md | 23 +++++++++++-------- doc/C++_Serving/OP_EN.md | 23 +++++++++++-------- .../C++/PaddleNLP/bert/bert_gpu_server.py | 6 ++--- examples/C++/PaddleNLP/bert/bert_server.py | 6 ++--- .../C++/PaddleRec/criteo_ctr/test_server.py | 8 +++---- python/pipeline/local_service_handler.py | 23 +++++++++---------- 9 files changed, 59 insertions(+), 54 deletions(-) mode change 100644 => 100755 examples/C++/PaddleNLP/bert/bert_gpu_server.py mode change 100644 => 100755 examples/C++/PaddleNLP/bert/bert_server.py mode change 100644 => 100755 examples/C++/PaddleRec/criteo_ctr/test_server.py mode change 100644 => 100755 python/pipeline/local_service_handler.py diff --git a/core/general-server/op/general_dist_kv_infer_op.cpp b/core/general-server/op/general_dist_kv_infer_op.cpp index 238d4cac..957379b5 100644 --- a/core/general-server/op/general_dist_kv_infer_op.cpp +++ b/core/general-server/op/general_dist_kv_infer_op.cpp @@ -40,7 +40,7 @@ using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; using baidu::paddle_serving::predictor::CubeCache; // DistKV Infer Op: seek cube and then call paddle inference -// op seq: general_reader-> dist_kv_infer -> general_response +// op seq: GeneralReaderOp-> dist_kv_infer -> general_response int GeneralDistKVInferOp::inference() { VLOG(2) << "Going to run inference"; const std::vector pre_node_names = pre_names(); @@ -186,9 +186,9 @@ int GeneralDistKVInferOp::inference() { if (values.size() != keys.size() || values[0].buff.size() == 0) { LOG(ERROR) << "cube value return null"; } - size_t EMBEDDING_SIZE = values[0].buff.size() / sizeof(float); + size_t EMBEDDING_SIZE = values[0].buff.size() / sizeof(float); // size_t EMBEDDING_SIZE = (values[0].buff.size() - 10) / sizeof(float); - //size_t EMBEDDING_SIZE = 9; + // size_t EMBEDDING_SIZE = 9; TensorVector sparse_out; sparse_out.resize(sparse_count); TensorVector dense_out; @@ -241,7 +241,7 @@ int GeneralDistKVInferOp::inference() { // The data generated by pslib has 10 bytes of information to be filtered // out - memcpy(data_ptr, cur_val->buff.data(), cur_val->buff.size() ); + memcpy(data_ptr, cur_val->buff.data(), cur_val->buff.size()); // VLOG(3) << keys[cube_val_idx] << ":" << data_ptr[0] << ", " << // data_ptr[1] << ", " <