diff --git a/core/general-server/op/general_infer_helper.h b/core/general-server/op/general_infer_helper.h index 415c888f6880553b8dced11d60ede4961e41fc96..b5d6d7f23f6474cd1b77889ce6192d890920f85e 100644 --- a/core/general-server/op/general_infer_helper.h +++ b/core/general-server/op/general_infer_helper.h @@ -45,7 +45,7 @@ struct GeneralBlob { tensor_vector.clear(); } - int GetBatchSize() { + int GetBatchSize() const { if (tensor_vector.size() > 0) { if (tensor_vector[0].lod.size() == 1) { return tensor_vector[0].lod[0].size() - 1; @@ -58,7 +58,7 @@ struct GeneralBlob { } std::string ShortDebugString() const { return "Not implemented!"; } -} +}; } // namespace serving } // namespace paddle_serving diff --git a/core/general-server/op/general_infer_op.cpp b/core/general-server/op/general_infer_op.cpp index 163d7269747d099fc21f6eac63c4a3e3cd61d8d6..a4e8fa451ea85da893ef3e00e741baf307522bf4 100644 --- a/core/general-server/op/general_infer_op.cpp +++ b/core/general-server/op/general_infer_op.cpp @@ -16,7 +16,6 @@ #include #include #include -#include "core/general-server/op/general_infer_helper.h" #include "core/general-server/op/general_infer_op.h" #include "core/predictor/framework/infer.h" #include "core/predictor/framework/memory.h" @@ -48,7 +47,7 @@ int GeneralInferOp::inference() { const TensorVector *in = &input_blob->tensor_vector; TensorVector *out = butil::get_object(); - int batch_size = in->GetBatchSize(); + int batch_size = input_blob->GetBatchSize(); VLOG(2) << "infer batch size: " << batch_size; // infer diff --git a/core/general-server/op/general_infer_op.h b/core/general-server/op/general_infer_op.h index 70b13cf3cc13d05cb87191aed8a1018f29cdbcdf..6c8d9fdc415122baf3eb94aaea5167579835737e 100644 --- a/core/general-server/op/general_infer_op.h +++ b/core/general-server/op/general_infer_op.h @@ -25,6 +25,7 @@ #include "paddle_inference_api.h" // NOLINT #endif #include "core/general-server/general_model_service.pb.h" +#include "core/general-server/op/general_infer_helper.h" namespace baidu { namespace paddle_serving { diff --git a/core/general-server/op/general_reader_op.cpp b/core/general-server/op/general_reader_op.cpp index a8b0e52df61c15bae197f3368b60ecffb89b398c..3020dac3bde5ed484b3db4870f35d025ae6a2396 100644 --- a/core/general-server/op/general_reader_op.cpp +++ b/core/general-server/op/general_reader_op.cpp @@ -100,8 +100,8 @@ int GeneralReaderOp::inference() { VLOG(2) << "print general model config done."; // TODO(guru4elephant): how to do conditional check? - res->reader_status = conf_check(req, model_config); - if (res->reader_status != 0) { + int ret = conf_check(req, model_config); + if (ret != 0) { LOG(INFO) << "model conf of server:"; resource.print_general_model_config(model_config); return 0; diff --git a/core/general-server/op/general_response_op.cpp b/core/general-server/op/general_response_op.cpp index 377a993dd65d0bcd666c6d767407f80c276cd35a..39624dcd7bb9455f1268c5d2d464b02fdeb16be4 100644 --- a/core/general-server/op/general_response_op.cpp +++ b/core/general-server/op/general_response_op.cpp @@ -47,8 +47,7 @@ int GeneralResponseOp::inference() { } const TensorVector *in = &input_blob->tensor_vector; - int batch_size = in->GetBatchSize(); - double infer_time = in->infer_time; + int batch_size = input_blob->GetBatchSize(); VLOG(2) << "input batch size: " << batch_size; @@ -72,7 +71,7 @@ int GeneralResponseOp::inference() { // response inst with only fetch_var_names Response *res = mutable_data(); - res->set_mean_infer_us(infer_time); + // res->set_mean_infer_us(infer_time); for (int i = 0; i < batch_size; ++i) { FetchInst *fetch_inst = res->add_insts(); diff --git a/core/general-server/op/general_text_reader_op.cpp b/core/general-server/op/general_text_reader_op.cpp index 7f13c1a1e27206ba68e81720c8a4d13c9fad6339..bc6d96a7bb507dcc1c74dac28bfe9a908067ffed 100644 --- a/core/general-server/op/general_text_reader_op.cpp +++ b/core/general-server/op/general_text_reader_op.cpp @@ -17,7 +17,6 @@ #include #include #include "core/general-server/op/general_text_reader_op.h" -#include "core/general-server/op/general_infer_helper.h" #include "core/predictor/framework/infer.h" #include "core/predictor/framework/memory.h" @@ -51,8 +50,8 @@ int GeneralTextReaderOp::inference() { } if (batch_size <= 0) { - res->reader_status = -1; - return 0; + LOG(ERROR) << "Batch size < 0"; + return -1; } int var_num = req->insts(0).tensor_array_size(); diff --git a/core/general-server/op/general_text_reader_op.h b/core/general-server/op/general_text_reader_op.h index 3f9eb5deb1a48789c6a85674de73c333aa08f1af..80573a15cbbacf0d2682ea1b225ef7732d54e9ad 100644 --- a/core/general-server/op/general_text_reader_op.h +++ b/core/general-server/op/general_text_reader_op.h @@ -25,6 +25,7 @@ #endif #include #include "core/predictor/framework/resource.h" +#include "core/general-server/op/general_infer_helper.h" #include "core/general-server/general_model_service.pb.h" #include "core/general-server/load_general_model_service.pb.h" diff --git a/core/general-server/op/general_text_response_op.cpp b/core/general-server/op/general_text_response_op.cpp index 7e67ce86e8f1616e320167a833d9a98f70ad3d99..16f476353965384c0d2b0092f2a42efa9c5289e7 100644 --- a/core/general-server/op/general_text_response_op.cpp +++ b/core/general-server/op/general_text_response_op.cpp @@ -17,7 +17,6 @@ #include #include #include "core/general-server/op/general_text_response_op.h" -#include "core/general-server/op/general_infer_helper.h" #include "core/predictor/framework/infer.h" #include "core/predictor/framework/memory.h" #include "core/predictor/framework/resource.h" @@ -36,18 +35,18 @@ using baidu::paddle_serving::predictor::general_model::FetchInst; using baidu::paddle_serving::predictor::InferManager; using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; -int GeneralTextInferOp::inference() { - const GeneralBlob *blob_input = +int GeneralTextResponseOp::inference() { + const GeneralBlob *input_blob = get_depend_argument(pre_name()); - if (!blob_input) { + if (!input_blob) { LOG(ERROR) << "Failed mutable depended argument, op: " << pre_name(); return -1; } - const TensorVector *in = &blob_input->tensor_vector; - int batch_size = in->GetBatchSize(); + const TensorVector *in = &input_blob->tensor_vector; + int batch_size = input_blob->GetBatchSize(); VLOG(2) << "infer batch size: " << batch_size; // infer @@ -72,7 +71,7 @@ int GeneralTextInferOp::inference() { // response inst with only fetch_var_names Response *res = mutable_data(); - res->set_mean_infer_us(infer_time); + // res->set_mean_infer_us(infer_time); for (int i = 0; i < batch_size; ++i) { FetchInst *fetch_inst = res->add_insts(); diff --git a/core/general-server/op/general_text_response_op.h b/core/general-server/op/general_text_response_op.h index a062ea3f6553bbf3fffa175277b28e3a5c837143..5efefefb77e03f73d321cb1f2c91c7e183cb5a6b 100644 --- a/core/general-server/op/general_text_response_op.h +++ b/core/general-server/op/general_text_response_op.h @@ -25,6 +25,7 @@ #include "paddle_inference_api.h" // NOLINT #endif #include "core/general-server/general_model_service.pb.h" +#include "core/general-server/op/general_infer_helper.h" namespace baidu { namespace paddle_serving {