提交 744b8419 编写于 作者: G guru4elephant

add design of general blob as data struct between op on server side

上级 5bb92dc8
......@@ -45,7 +45,7 @@ struct GeneralBlob {
tensor_vector.clear();
}
int GetBatchSize() {
int GetBatchSize() const {
if (tensor_vector.size() > 0) {
if (tensor_vector[0].lod.size() == 1) {
return tensor_vector[0].lod[0].size() - 1;
......@@ -58,7 +58,7 @@ struct GeneralBlob {
}
std::string ShortDebugString() const { return "Not implemented!"; }
}
};
} // namespace serving
} // namespace paddle_serving
......
......@@ -16,7 +16,6 @@
#include <iostream>
#include <memory>
#include <sstream>
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/op/general_infer_op.h"
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
......@@ -48,7 +47,7 @@ int GeneralInferOp::inference() {
const TensorVector *in = &input_blob->tensor_vector;
TensorVector *out = butil::get_object<TensorVector>();
int batch_size = in->GetBatchSize();
int batch_size = input_blob->GetBatchSize();
VLOG(2) << "infer batch size: " << batch_size;
// infer
......
......@@ -25,6 +25,7 @@
#include "paddle_inference_api.h" // NOLINT
#endif
#include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h"
namespace baidu {
namespace paddle_serving {
......
......@@ -100,8 +100,8 @@ int GeneralReaderOp::inference() {
VLOG(2) << "print general model config done.";
// TODO(guru4elephant): how to do conditional check?
res->reader_status = conf_check(req, model_config);
if (res->reader_status != 0) {
int ret = conf_check(req, model_config);
if (ret != 0) {
LOG(INFO) << "model conf of server:";
resource.print_general_model_config(model_config);
return 0;
......
......@@ -47,8 +47,7 @@ int GeneralResponseOp::inference() {
}
const TensorVector *in = &input_blob->tensor_vector;
int batch_size = in->GetBatchSize();
double infer_time = in->infer_time;
int batch_size = input_blob->GetBatchSize();
VLOG(2) << "input batch size: " << batch_size;
......@@ -72,7 +71,7 @@ int GeneralResponseOp::inference() {
// response inst with only fetch_var_names
Response *res = mutable_data<Response>();
res->set_mean_infer_us(infer_time);
// res->set_mean_infer_us(infer_time);
for (int i = 0; i < batch_size; ++i) {
FetchInst *fetch_inst = res->add_insts();
......
......@@ -17,7 +17,6 @@
#include <memory>
#include <sstream>
#include "core/general-server/op/general_text_reader_op.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
......@@ -51,8 +50,8 @@ int GeneralTextReaderOp::inference() {
}
if (batch_size <= 0) {
res->reader_status = -1;
return 0;
LOG(ERROR) << "Batch size < 0";
return -1;
}
int var_num = req->insts(0).tensor_array_size();
......
......@@ -25,6 +25,7 @@
#endif
#include <string>
#include "core/predictor/framework/resource.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/general_model_service.pb.h"
#include "core/general-server/load_general_model_service.pb.h"
......
......@@ -17,7 +17,6 @@
#include <memory>
#include <sstream>
#include "core/general-server/op/general_text_response_op.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
#include "core/predictor/framework/resource.h"
......@@ -36,18 +35,18 @@ using baidu::paddle_serving::predictor::general_model::FetchInst;
using baidu::paddle_serving::predictor::InferManager;
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralTextInferOp::inference() {
const GeneralBlob *blob_input =
int GeneralTextResponseOp::inference() {
const GeneralBlob *input_blob =
get_depend_argument<GeneralBlob>(pre_name());
if (!blob_input) {
if (!input_blob) {
LOG(ERROR) << "Failed mutable depended argument, op: "
<< pre_name();
return -1;
}
const TensorVector *in = &blob_input->tensor_vector;
int batch_size = in->GetBatchSize();
const TensorVector *in = &input_blob->tensor_vector;
int batch_size = input_blob->GetBatchSize();
VLOG(2) << "infer batch size: " << batch_size;
// infer
......@@ -72,7 +71,7 @@ int GeneralTextInferOp::inference() {
// response inst with only fetch_var_names
Response *res = mutable_data<Response>();
res->set_mean_infer_us(infer_time);
// res->set_mean_infer_us(infer_time);
for (int i = 0; i < batch_size; ++i) {
FetchInst *fetch_inst = res->add_insts();
......
......@@ -25,6 +25,7 @@
#include "paddle_inference_api.h" // NOLINT
#endif
#include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h"
namespace baidu {
namespace paddle_serving {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册