diff --git a/core/general-server/op/general_dist_kv_infer_op.cpp b/core/general-server/op/general_dist_kv_infer_op.cpp index 238d4cac3a085ef188f427c8cc3669b7617443d7..957379b594e7dc18516b1a55ec042b2ec9921cc5 100644 --- a/core/general-server/op/general_dist_kv_infer_op.cpp +++ b/core/general-server/op/general_dist_kv_infer_op.cpp @@ -40,7 +40,7 @@ using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; using baidu::paddle_serving::predictor::CubeCache; // DistKV Infer Op: seek cube and then call paddle inference -// op seq: general_reader-> dist_kv_infer -> general_response +// op seq: GeneralReaderOp-> dist_kv_infer -> general_response int GeneralDistKVInferOp::inference() { VLOG(2) << "Going to run inference"; const std::vector pre_node_names = pre_names(); @@ -186,9 +186,9 @@ int GeneralDistKVInferOp::inference() { if (values.size() != keys.size() || values[0].buff.size() == 0) { LOG(ERROR) << "cube value return null"; } - size_t EMBEDDING_SIZE = values[0].buff.size() / sizeof(float); + size_t EMBEDDING_SIZE = values[0].buff.size() / sizeof(float); // size_t EMBEDDING_SIZE = (values[0].buff.size() - 10) / sizeof(float); - //size_t EMBEDDING_SIZE = 9; + // size_t EMBEDDING_SIZE = 9; TensorVector sparse_out; sparse_out.resize(sparse_count); TensorVector dense_out; @@ -241,7 +241,7 @@ int GeneralDistKVInferOp::inference() { // The data generated by pslib has 10 bytes of information to be filtered // out - memcpy(data_ptr, cur_val->buff.data(), cur_val->buff.size() ); + memcpy(data_ptr, cur_val->buff.data(), cur_val->buff.size()); // VLOG(3) << keys[cube_val_idx] << ":" << data_ptr[0] << ", " << // data_ptr[1] << ", " <