From 13173ea62ffd78edc532526eb52a44856a978bb6 Mon Sep 17 00:00:00 2001 From: guru4elephant Date: Tue, 18 Feb 2020 21:13:08 +0800 Subject: [PATCH] remove unnecessary comments --- core/general-server/op/general_infer_op.cpp | 5 +---- core/general-server/op/general_reader_op.cpp | 4 ---- core/general-server/op/general_response_op.cpp | 6 ------ core/general-server/op/general_text_reader_op.cpp | 5 ----- core/general-server/op/general_text_response_op.cpp | 6 ------ 5 files changed, 1 insertion(+), 25 deletions(-) diff --git a/core/general-server/op/general_infer_op.cpp b/core/general-server/op/general_infer_op.cpp index e298ef4b..3ae84b20 100644 --- a/core/general-server/op/general_infer_op.cpp +++ b/core/general-server/op/general_infer_op.cpp @@ -52,17 +52,14 @@ int GeneralInferOp::inference() { int batch_size = input_blob->GetBatchSize(); VLOG(2) << "infer batch size: " << batch_size; - // infer Timer timeline; - // double infer_time = 0.0; int64_t start = timeline.TimeStampUS(); timeline.Start(); if (InferManager::instance().infer(GENERAL_MODEL_NAME, in, out, batch_size)) { LOG(ERROR) << "Failed do infer in fluid model: " << GENERAL_MODEL_NAME; return -1; } - // timeline.Pause(); - // infer_time = timeline.ElapsedUS(); + int64_t end = timeline.TimeStampUS(); CopyBlobInfo(input_blob, output_blob); AddBlobInfo(output_blob, start); diff --git a/core/general-server/op/general_reader_op.cpp b/core/general-server/op/general_reader_op.cpp index afb593d7..8db186ea 100644 --- a/core/general-server/op/general_reader_op.cpp +++ b/core/general-server/op/general_reader_op.cpp @@ -88,12 +88,9 @@ int GeneralReaderOp::inference() { } Timer timeline; - // double read_time = 0.0; - // timeline.Start(); int64_t start = timeline.TimeStampUS(); int var_num = req->insts(0).tensor_array_size(); VLOG(2) << "var num: " << var_num; - // read config VLOG(2) << "start to call load general model_conf op"; baidu::paddle_serving::predictor::Resource &resource = @@ -203,7 +200,6 @@ int GeneralReaderOp::inference() { } timeline.Pause(); - // read_time = timeline.ElapsedUS(); int64_t end = timeline.TimeStampUS(); res->p_size = 0; AddBlobInfo(res, start); diff --git a/core/general-server/op/general_response_op.cpp b/core/general-server/op/general_response_op.cpp index 9822d541..caa0185e 100644 --- a/core/general-server/op/general_response_op.cpp +++ b/core/general-server/op/general_response_op.cpp @@ -73,11 +73,8 @@ int GeneralResponseOp::inference() { model_config->_fetch_alias_name_to_index[req->fetch_var_names(i)]; } - // response inst with only fetch_var_names Response *res = mutable_data(); - // res->set_mean_infer_us(infer_time); - for (int i = 0; i < batch_size; ++i) { FetchInst *fetch_inst = res->add_insts(); for (auto & idx : fetch_index) { @@ -124,9 +121,6 @@ int GeneralResponseOp::inference() { var_idx++; } - // timeline.Pause(); - // response_time = timeline.ElapsedUS(); - if (req->profile_server()) { int64_t end = timeline.TimeStampUS(); VLOG(2) << "p size for input blob: " << input_blob->p_size; diff --git a/core/general-server/op/general_text_reader_op.cpp b/core/general-server/op/general_text_reader_op.cpp index d927dd0f..77dd299c 100644 --- a/core/general-server/op/general_text_reader_op.cpp +++ b/core/general-server/op/general_text_reader_op.cpp @@ -57,13 +57,10 @@ int GeneralTextReaderOp::inference() { } Timer timeline; - // double read_time = 0.0; - // timeline.Start(); int64_t start = timeline.TimeStampUS(); int var_num = req->insts(0).tensor_array_size(); VLOG(2) << "var num: " << var_num; - // read config VLOG(2) << "start to call load general model_conf op"; baidu::paddle_serving::predictor::Resource &resource = @@ -164,8 +161,6 @@ int GeneralTextReaderOp::inference() { } } - // timeline.Pause(); - // read_time = timeline.ElapsedUS(); int64_t end = timeline.TimeStampUS(); AddBlobInfo(res, start); AddBlobInfo(res, end); diff --git a/core/general-server/op/general_text_response_op.cpp b/core/general-server/op/general_text_response_op.cpp index d44e9da1..a60aba14 100644 --- a/core/general-server/op/general_text_response_op.cpp +++ b/core/general-server/op/general_text_response_op.cpp @@ -49,13 +49,9 @@ int GeneralTextResponseOp::inference() { int batch_size = input_blob->GetBatchSize(); VLOG(2) << "infer batch size: " << batch_size; - // infer - const Request *req = dynamic_cast(get_request_message()); Timer timeline; - // double response_time = 0.0; - // timeline.Start(); int64_t start = timeline.TimeStampUS(); VLOG(2) << "start to call load general model_conf op"; @@ -76,8 +72,6 @@ int GeneralTextResponseOp::inference() { // response inst with only fetch_var_names Response *res = mutable_data(); - // res->set_mean_infer_us(infer_time); - for (int i = 0; i < batch_size; ++i) { FetchInst *fetch_inst = res->add_insts(); for (auto & idx : fetch_index) { -- GitLab