提交 13173ea6 编写于 作者: G guru4elephant

remove unnecessary comments

上级 4e6438aa
......@@ -52,17 +52,14 @@ int GeneralInferOp::inference() {
int batch_size = input_blob->GetBatchSize();
VLOG(2) << "infer batch size: " << batch_size;
// infer
Timer timeline;
// double infer_time = 0.0;
int64_t start = timeline.TimeStampUS();
timeline.Start();
if (InferManager::instance().infer(GENERAL_MODEL_NAME, in, out, batch_size)) {
LOG(ERROR) << "Failed do infer in fluid model: " << GENERAL_MODEL_NAME;
return -1;
}
// timeline.Pause();
// infer_time = timeline.ElapsedUS();
int64_t end = timeline.TimeStampUS();
CopyBlobInfo(input_blob, output_blob);
AddBlobInfo(output_blob, start);
......
......@@ -88,12 +88,9 @@ int GeneralReaderOp::inference() {
}
Timer timeline;
// double read_time = 0.0;
// timeline.Start();
int64_t start = timeline.TimeStampUS();
int var_num = req->insts(0).tensor_array_size();
VLOG(2) << "var num: " << var_num;
// read config
VLOG(2) << "start to call load general model_conf op";
baidu::paddle_serving::predictor::Resource &resource =
......@@ -203,7 +200,6 @@ int GeneralReaderOp::inference() {
}
timeline.Pause();
// read_time = timeline.ElapsedUS();
int64_t end = timeline.TimeStampUS();
res->p_size = 0;
AddBlobInfo(res, start);
......
......@@ -73,11 +73,8 @@ int GeneralResponseOp::inference() {
model_config->_fetch_alias_name_to_index[req->fetch_var_names(i)];
}
// response inst with only fetch_var_names
Response *res = mutable_data<Response>();
// res->set_mean_infer_us(infer_time);
for (int i = 0; i < batch_size; ++i) {
FetchInst *fetch_inst = res->add_insts();
for (auto & idx : fetch_index) {
......@@ -124,9 +121,6 @@ int GeneralResponseOp::inference() {
var_idx++;
}
// timeline.Pause();
// response_time = timeline.ElapsedUS();
if (req->profile_server()) {
int64_t end = timeline.TimeStampUS();
VLOG(2) << "p size for input blob: " << input_blob->p_size;
......
......@@ -57,13 +57,10 @@ int GeneralTextReaderOp::inference() {
}
Timer timeline;
// double read_time = 0.0;
// timeline.Start();
int64_t start = timeline.TimeStampUS();
int var_num = req->insts(0).tensor_array_size();
VLOG(2) << "var num: " << var_num;
// read config
VLOG(2) << "start to call load general model_conf op";
baidu::paddle_serving::predictor::Resource &resource =
......@@ -164,8 +161,6 @@ int GeneralTextReaderOp::inference() {
}
}
// timeline.Pause();
// read_time = timeline.ElapsedUS();
int64_t end = timeline.TimeStampUS();
AddBlobInfo(res, start);
AddBlobInfo(res, end);
......
......@@ -49,13 +49,9 @@ int GeneralTextResponseOp::inference() {
int batch_size = input_blob->GetBatchSize();
VLOG(2) << "infer batch size: " << batch_size;
// infer
const Request *req = dynamic_cast<const Request *>(get_request_message());
Timer timeline;
// double response_time = 0.0;
// timeline.Start();
int64_t start = timeline.TimeStampUS();
VLOG(2) << "start to call load general model_conf op";
......@@ -76,8 +72,6 @@ int GeneralTextResponseOp::inference() {
// response inst with only fetch_var_names
Response *res = mutable_data<Response>();
// res->set_mean_infer_us(infer_time);
for (int i = 0; i < batch_size; ++i) {
FetchInst *fetch_inst = res->add_insts();
for (auto & idx : fetch_index) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册