From a7b94efded6f8ec435810ec895873ccbe4108c0f Mon Sep 17 00:00:00 2001 From: guru4elephant Date: Fri, 7 Feb 2020 13:45:29 +0800 Subject: [PATCH] make all general server log level 2 --- core/general-client/src/general_model.cpp | 50 +++++++++++++++------ core/general-server/op/general_infer_op.cpp | 14 ------ 2 files changed, 36 insertions(+), 28 deletions(-) diff --git a/core/general-client/src/general_model.cpp b/core/general-client/src/general_model.cpp index e8747125..b37d2dc7 100644 --- a/core/general-client/src/general_model.cpp +++ b/core/general-client/src/general_model.cpp @@ -38,23 +38,35 @@ int PredictorClient::init(const std::string &conf_file) { << ", file path: " << conf_file; return -1; } + _feed_name_to_idx.clear(); _fetch_name_to_idx.clear(); _shape.clear(); int feed_var_num = model_config.feed_var_size(); int fetch_var_num = model_config.fetch_var_size(); + VLOG(2) << "feed var num: " << feed_var_num + << "fetch_var_num: " << fetch_var_num; for (int i = 0; i < feed_var_num; ++i) { _feed_name_to_idx[model_config.feed_var(i).alias_name()] = i; + VLOG(2) << "feed alias name: " << model_config.feed_var(i).alias_name() + << " index: " << i; std::vector tmp_feed_shape; + VLOG(2) << "feed" << "[" << i << "] shape:"; for (int j = 0; j < model_config.feed_var(i).shape_size(); ++j) { tmp_feed_shape.push_back(model_config.feed_var(i).shape(j)); + VLOG(2) << "shape[" << j << "]: " + << model_config.feed_var(i).shape(j); } _type.push_back(model_config.feed_var(i).feed_type()); + VLOG(2) << "feed" << "[" << i << "] feed type: " + << model_config.feed_var(i).feed_type(); _shape.push_back(tmp_feed_shape); } for (int i = 0; i < fetch_var_num; ++i) { _fetch_name_to_idx[model_config.fetch_var(i).alias_name()] = i; + VLOG(2) << "fetch [" << i << "]" << " alias name: " + << model_config.fetch_var(i).alias_name(); _fetch_name_to_var_name[model_config.fetch_var(i).alias_name()] = model_config.fetch_var(i).name(); } @@ -93,6 +105,9 @@ std::vector> PredictorClient::predict( _api.thrd_clear(); _predictor = _api.fetch_predictor("general_model"); + VLOG(2) << "fetch general model predictor done."; + VLOG(2) << "float feed name size: " << float_feed_name.size(); + VLOG(2) << "int feed name size: " << int_feed_name.size(); Request req; std::vector tensor_vec; FeedInst *inst = req.add_insts(); @@ -103,6 +118,7 @@ std::vector> PredictorClient::predict( for (auto &name : int_feed_name) { tensor_vec.push_back(inst->add_tensor_array()); } + VLOG(2) << "prepare tensor vec done."; int vec_idx = 0; for (auto &name : float_feed_name) { @@ -120,6 +136,8 @@ std::vector> PredictorClient::predict( vec_idx++; } + VLOG(2) << "feed float feed var done."; + vec_idx = 0; for (auto &name : int_feed_name) { int idx = _feed_name_to_idx[name]; @@ -136,6 +154,8 @@ std::vector> PredictorClient::predict( vec_idx++; } + VLOG(2) << "feed int feed var done."; + // std::map > result; Response res; @@ -147,18 +167,10 @@ std::vector> PredictorClient::predict( for (auto &name : fetch_name) { int idx = _fetch_name_to_idx[name]; int len = res.insts(0).tensor_array(idx).data_size(); - VLOG(3) << "fetch name: " << name; - VLOG(3) << "tensor data size: " << len; + VLOG(2) << "fetch name: " << name; + VLOG(2) << "tensor data size: " << len; fetch_result[idx].resize(len); for (int i = 0; i < len; ++i) { - /* - (*fetch_result)[name][i] = *(const float *) - res.insts(0).tensor_array(idx).data(i).c_str(); - VLOG(3) << *(const float *) - res.insts(0).tensor_array(idx).data(i).c_str(); - fetch_result[name][i] = *(const float *) - res.insts(0).tensor_array(idx).data(i).c_str(); - */ fetch_result[idx][i] = *(const float *)res.insts(0).tensor_array(idx).data(i).c_str(); } @@ -187,9 +199,13 @@ std::vector>> PredictorClient::batch_predict( _api.thrd_clear(); _predictor = _api.fetch_predictor("general_model"); + VLOG(2) << "fetch general model predictor done."; + VLOG(2) << "float feed name size: " << float_feed_name.size(); + VLOG(2) << "int feed name size: " << int_feed_name.size(); Request req; // for (int bi = 0; bi < batch_size; bi++) { + VLOG(2) << "prepare batch " << bi; std::vector tensor_vec; FeedInst *inst = req.add_insts(); std::vector> float_feed = float_feed_batch[bi]; @@ -201,7 +217,9 @@ std::vector>> PredictorClient::batch_predict( for (auto &name : int_feed_name) { tensor_vec.push_back(inst->add_tensor_array()); } - + + VLOG(2) << "batch [" << bi << "] int_feed_name and float_feed_name" + << "prepared"; int vec_idx = 0; for (auto &name : float_feed_name) { int idx = _feed_name_to_idx[name]; @@ -218,6 +236,8 @@ std::vector>> PredictorClient::batch_predict( vec_idx++; } + VLOG(2) << "batch [" << bi << "] " << "float feed value prepared"; + vec_idx = 0; for (auto &name : int_feed_name) { int idx = _feed_name_to_idx[name]; @@ -235,6 +255,8 @@ std::vector>> PredictorClient::batch_predict( } vec_idx++; } + + VLOG(2) << "batch [" << bi << "] " << "itn feed value prepared"; } Response res; @@ -248,10 +270,10 @@ std::vector>> PredictorClient::batch_predict( for (auto &name : fetch_name) { int idx = _fetch_name_to_idx[name]; int len = res.insts(bi).tensor_array(idx).data_size(); - VLOG(3) << "fetch name: " << name; - VLOG(3) << "tensor data size: " << len; + VLOG(2) << "fetch name: " << name; + VLOG(2) << "tensor data size: " << len; fetch_result_batch[bi][idx].resize(len); - VLOG(3) + VLOG(2) << "fetch name " << name << " index " << idx << " first data " << *(const float *)res.insts(bi).tensor_array(idx).data(0).c_str(); for (int i = 0; i < len; ++i) { diff --git a/core/general-server/op/general_infer_op.cpp b/core/general-server/op/general_infer_op.cpp index 6f5dab9b..cea48dee 100644 --- a/core/general-server/op/general_infer_op.cpp +++ b/core/general-server/op/general_infer_op.cpp @@ -96,20 +96,6 @@ int GeneralInferOp::inference() { } } } - /* - for (size_t i = 0; i < in->size(); ++i) { - (*in)[i].shape.clear(); - } - in->clear(); - butil::return_object(in); - - for (size_t i = 0; i < out->size(); ++i) { - (*out)[i].shape.clear(); - } - out->clear(); - butil::return_object(out); - } - */ return 0; } DEFINE_OP(GeneralInferOp); -- GitLab