From 8f254ed98596f7a08bf21aa538ffd62531462e03 Mon Sep 17 00:00:00 2001 From: HexToString <506181616@qq.com> Date: Tue, 16 Mar 2021 04:11:09 +0000 Subject: [PATCH] fix code style --- core/general-server/op/general_infer_op.cpp | 4 +- core/general-server/op/general_reader_op.cpp | 41 +++++++++---------- .../general-server/op/general_response_op.cpp | 8 ++-- core/predictor/framework/infer.h | 14 +++---- core/predictor/framework/infer_data.h | 4 +- 5 files changed, 35 insertions(+), 36 deletions(-) diff --git a/core/general-server/op/general_infer_op.cpp b/core/general-server/op/general_infer_op.cpp index c3ca099b..81269127 100755 --- a/core/general-server/op/general_infer_op.cpp +++ b/core/general-server/op/general_infer_op.cpp @@ -47,7 +47,7 @@ int GeneralInferOp::inference() { const std::string pre_name = pre_node_names[0]; const GeneralBlob *input_blob = get_depend_argument(pre_name); - if(!input_blob){ + if (!input_blob) { LOG(ERROR) << "input_blob is nullptr,error"; return -1; } @@ -55,7 +55,7 @@ int GeneralInferOp::inference() { VLOG(2) << "(logid=" << log_id << ") Get precedent op name: " << pre_name; GeneralBlob *output_blob = mutable_data(); - if(!output_blob){ + if (!output_blob) { LOG(ERROR) << "output_blob is nullptr,error"; return -1; } diff --git a/core/general-server/op/general_reader_op.cpp b/core/general-server/op/general_reader_op.cpp index 135ae630..09adf2e2 100755 --- a/core/general-server/op/general_reader_op.cpp +++ b/core/general-server/op/general_reader_op.cpp @@ -32,7 +32,7 @@ using baidu::paddle_serving::predictor::general_model::Tensor; using baidu::paddle_serving::predictor::general_model::Request; using baidu::paddle_serving::predictor::general_model::FeedInst; using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; -enum ProtoDataType { P_INT64,P_FLOAT32,P_INT32 }; +enum ProtoDataType { P_INT64, P_FLOAT32, P_INT32 }; int conf_check(const Request *req, const std::shared_ptr &model_config) { int var_num = req->insts(0).tensor_array_size(); @@ -79,13 +79,9 @@ int GeneralReaderOp::inference() { std::vector capacity; GeneralBlob *res = mutable_data(); - TensorVector *out = &res->tensor_vector; - if(!res){ - LOG(ERROR) << "res is nullptr,error"; - return -1; - } + TensorVector *out = &(res->tensor_vector); + res->SetLogId(log_id); - if (!res) { LOG(ERROR) << "(logid=" << log_id << ") Failed get op tls reader object output"; @@ -94,9 +90,8 @@ int GeneralReaderOp::inference() { Timer timeline; int64_t start = timeline.TimeStampUS(); int var_num = req->insts(0).tensor_array_size(); - VLOG(2) << "(logid=" << log_id << ") var num: " << var_num; - VLOG(2) << "(logid=" << log_id + VLOG(2) << "(logid=" << log_id << ") var num: " << var_num << ") start to call load general model_conf op"; baidu::paddle_serving::predictor::Resource &resource = @@ -106,8 +101,6 @@ int GeneralReaderOp::inference() { std::shared_ptr model_config = resource.get_general_model_config(); - VLOG(2) << "(logid=" << log_id << ") print general model config done."; - // TODO(guru4elephant): how to do conditional check? /* int ret = conf_check(req, model_config); @@ -170,11 +163,13 @@ int GeneralReaderOp::inference() { out->push_back(lod_tensor); } // specify the memory needed for output tensor_vector + int tensor_size = 0; + int data_len = 0; for (int i = 0; i < var_num; ++i) { if (out->at(i).lod.size() == 1) { - int tensor_size = 0; + tensor_size = 0; const Tensor &tensor = req->insts(0).tensor_array(i); - int data_len = 0; + data_len = 0; if (tensor.int64_data_size() > 0) { data_len = tensor.int64_data_size(); } else if (tensor.float_data_size() > 0) { @@ -207,14 +202,16 @@ int GeneralReaderOp::inference() { } // fill the data into output general_blob + int offset = 0; + int elem_num = 0; for (int i = 0; i < var_num; ++i) { if (elem_type[i] == P_INT64) { int64_t *dst_ptr = static_cast(out->at(i).data.data()); VLOG(2) << "(logid=" << log_id << ") first element data in var[" << i << "] is " << req->insts(0).tensor_array(i).int64_data(0); - int offset = 0; - int elem_num = req->insts(0).tensor_array(i).int64_data_size(); - if(!dst_ptr){ + offset = 0; + elem_num = req->insts(0).tensor_array(i).int64_data_size(); + if (!dst_ptr) { LOG(ERROR) << "dst_ptr is nullptr"; return -1; } @@ -225,9 +222,9 @@ int GeneralReaderOp::inference() { float *dst_ptr = static_cast(out->at(i).data.data()); VLOG(2) << "(logid=" << log_id << ") first element data in var[" << i << "] is " << req->insts(0).tensor_array(i).float_data(0); - int offset = 0; - int elem_num = req->insts(0).tensor_array(i).float_data_size(); - if(!dst_ptr){ + offset = 0; + elem_num = req->insts(0).tensor_array(i).float_data_size(); + if (!dst_ptr) { LOG(ERROR) << "dst_ptr is nullptr"; return -1; } @@ -238,9 +235,9 @@ int GeneralReaderOp::inference() { int32_t *dst_ptr = static_cast(out->at(i).data.data()); VLOG(2) << "(logid=" << log_id << ") first element data in var[" << i << "] is " << req->insts(0).tensor_array(i).int_data(0); - int offset = 0; - int elem_num = req->insts(0).tensor_array(i).int_data_size(); - if(!dst_ptr){ + offset = 0; + elem_num = req->insts(0).tensor_array(i).int_data_size(); + if (!dst_ptr) { LOG(ERROR) << "dst_ptr is nullptr"; return -1; } diff --git a/core/general-server/op/general_response_op.cpp b/core/general-server/op/general_response_op.cpp index 7476963c..d4d737fb 100755 --- a/core/general-server/op/general_response_op.cpp +++ b/core/general-server/op/general_response_op.cpp @@ -42,7 +42,9 @@ using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; int GeneralResponseOp::inference() { const std::vector pre_node_names = pre_names(); VLOG(2) << "pre node names size: " << pre_node_names.size(); - const GeneralBlob *input_blob; + const GeneralBlob *input_blob = nullptr; + int var_idx = 0; + int cap = 1; uint64_t log_id = get_depend_argument(pre_node_names[0])->GetLogId(); @@ -116,9 +118,9 @@ int GeneralResponseOp::inference() { } } - int var_idx = 0; + var_idx = 0; for (auto &idx : fetch_index) { - int cap = 1; + cap = 1; for (int j = 0; j < in->at(idx).shape.size(); ++j) { cap *= in->at(idx).shape[j]; } diff --git a/core/predictor/framework/infer.h b/core/predictor/framework/infer.h index 14a1dfd1..4e4fdeaa 100755 --- a/core/predictor/framework/infer.h +++ b/core/predictor/framework/infer.h @@ -612,13 +612,13 @@ class FluidInferEngine : public CloneDBReloadableInferEngine { void* origin_data = (*tensorVector_in_pointer)[i].data.data(); //Because the core needs to determine the size of memory space according to the data type passed in. //The pointer type of data must be one of float *,int64_t*,int32_t* instead void*. - if((*tensorVector_in_pointer)[i].dtype == paddle::PaddleDType::FLOAT32){ + if ((*tensorVector_in_pointer)[i].dtype == paddle::PaddleDType::FLOAT32) { float* data = static_cast(origin_data); lod_tensor_in->CopyFromCpu(data); - }else if((*tensorVector_in_pointer)[i].dtype == paddle::PaddleDType::INT64){ + }else if ((*tensorVector_in_pointer)[i].dtype == paddle::PaddleDType::INT64) { int64_t* data = static_cast(origin_data); lod_tensor_in->CopyFromCpu(data); - }else if((*tensorVector_in_pointer)[i].dtype == paddle::PaddleDType::INT32){ + }else if ((*tensorVector_in_pointer)[i].dtype == paddle::PaddleDType::INT32) { int32_t* data = static_cast(origin_data); lod_tensor_in->CopyFromCpu(data); } @@ -639,7 +639,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine { char* databuf_char = NULL; size_t databuf_size = 0; TensorVector* tensorVector_out_pointer = reinterpret_cast(out); - if(!tensorVector_out_pointer){ + if (!tensorVector_out_pointer) { LOG(ERROR) << "tensorVector_out_pointer is nullptr,error"; return -1; } @@ -650,7 +650,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine { output_shape = lod_tensor_out->shape(); out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies()); dataType = lod_tensor_out->type(); - if(dataType == paddle::PaddleDType::FLOAT32){ + if (dataType == paddle::PaddleDType::FLOAT32) { databuf_size = out_num*sizeof(float); databuf_data = MempoolWrapper::instance().malloc(databuf_size); if (!databuf_data) { @@ -660,7 +660,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine { float* data_out = reinterpret_cast(databuf_data); lod_tensor_out->CopyToCpu(data_out); databuf_char = reinterpret_cast(data_out); - }else if(dataType == paddle::PaddleDType::INT64){ + }else if (dataType == paddle::PaddleDType::INT64) { databuf_size = out_num*sizeof(int64_t); databuf_data = MempoolWrapper::instance().malloc(databuf_size); if (!databuf_data) { @@ -670,7 +670,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine { int64_t* data_out = reinterpret_cast(databuf_data); lod_tensor_out->CopyToCpu(data_out); databuf_char = reinterpret_cast(data_out); - }else if(dataType == paddle::PaddleDType::INT32){ + }else if (dataType == paddle::PaddleDType::INT32) { databuf_size = out_num*sizeof(int32_t); databuf_data = MempoolWrapper::instance().malloc(databuf_size); if (!databuf_data) { diff --git a/core/predictor/framework/infer_data.h b/core/predictor/framework/infer_data.h index 07de10ce..cea6508a 100755 --- a/core/predictor/framework/infer_data.h +++ b/core/predictor/framework/infer_data.h @@ -80,9 +80,9 @@ struct Tensor { size_t ele_byte() const { if (type == INT64) { return sizeof(int64_t); - } else if(type == FLOAT32){ + } else if (type == FLOAT32) { return sizeof(float); - }else{ + } else { return sizeof(int32_t); } } -- GitLab