提交 8f254ed9 编写于 作者: H HexToString

fix code style

上级 612431fe
......@@ -47,7 +47,7 @@ int GeneralInferOp::inference() {
const std::string pre_name = pre_node_names[0];
const GeneralBlob *input_blob = get_depend_argument<GeneralBlob>(pre_name);
if(!input_blob){
if (!input_blob) {
LOG(ERROR) << "input_blob is nullptr,error";
return -1;
}
......@@ -55,7 +55,7 @@ int GeneralInferOp::inference() {
VLOG(2) << "(logid=" << log_id << ") Get precedent op name: " << pre_name;
GeneralBlob *output_blob = mutable_data<GeneralBlob>();
if(!output_blob){
if (!output_blob) {
LOG(ERROR) << "output_blob is nullptr,error";
return -1;
}
......
......@@ -32,7 +32,7 @@ using baidu::paddle_serving::predictor::general_model::Tensor;
using baidu::paddle_serving::predictor::general_model::Request;
using baidu::paddle_serving::predictor::general_model::FeedInst;
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
enum ProtoDataType { P_INT64,P_FLOAT32,P_INT32 };
enum ProtoDataType { P_INT64, P_FLOAT32, P_INT32 };
int conf_check(const Request *req,
const std::shared_ptr<PaddleGeneralModelConfig> &model_config) {
int var_num = req->insts(0).tensor_array_size();
......@@ -79,13 +79,9 @@ int GeneralReaderOp::inference() {
std::vector<int64_t> capacity;
GeneralBlob *res = mutable_data<GeneralBlob>();
TensorVector *out = &res->tensor_vector;
if(!res){
LOG(ERROR) << "res is nullptr,error";
return -1;
}
TensorVector *out = &(res->tensor_vector);
res->SetLogId(log_id);
if (!res) {
LOG(ERROR) << "(logid=" << log_id
<< ") Failed get op tls reader object output";
......@@ -94,9 +90,8 @@ int GeneralReaderOp::inference() {
Timer timeline;
int64_t start = timeline.TimeStampUS();
int var_num = req->insts(0).tensor_array_size();
VLOG(2) << "(logid=" << log_id << ") var num: " << var_num;
VLOG(2) << "(logid=" << log_id
VLOG(2) << "(logid=" << log_id << ") var num: " << var_num
<< ") start to call load general model_conf op";
baidu::paddle_serving::predictor::Resource &resource =
......@@ -106,8 +101,6 @@ int GeneralReaderOp::inference() {
std::shared_ptr<PaddleGeneralModelConfig> model_config =
resource.get_general_model_config();
VLOG(2) << "(logid=" << log_id << ") print general model config done.";
// TODO(guru4elephant): how to do conditional check?
/*
int ret = conf_check(req, model_config);
......@@ -170,11 +163,13 @@ int GeneralReaderOp::inference() {
out->push_back(lod_tensor);
}
// specify the memory needed for output tensor_vector
int tensor_size = 0;
int data_len = 0;
for (int i = 0; i < var_num; ++i) {
if (out->at(i).lod.size() == 1) {
int tensor_size = 0;
tensor_size = 0;
const Tensor &tensor = req->insts(0).tensor_array(i);
int data_len = 0;
data_len = 0;
if (tensor.int64_data_size() > 0) {
data_len = tensor.int64_data_size();
} else if (tensor.float_data_size() > 0) {
......@@ -207,14 +202,16 @@ int GeneralReaderOp::inference() {
}
// fill the data into output general_blob
int offset = 0;
int elem_num = 0;
for (int i = 0; i < var_num; ++i) {
if (elem_type[i] == P_INT64) {
int64_t *dst_ptr = static_cast<int64_t *>(out->at(i).data.data());
VLOG(2) << "(logid=" << log_id << ") first element data in var[" << i
<< "] is " << req->insts(0).tensor_array(i).int64_data(0);
int offset = 0;
int elem_num = req->insts(0).tensor_array(i).int64_data_size();
if(!dst_ptr){
offset = 0;
elem_num = req->insts(0).tensor_array(i).int64_data_size();
if (!dst_ptr) {
LOG(ERROR) << "dst_ptr is nullptr";
return -1;
}
......@@ -225,9 +222,9 @@ int GeneralReaderOp::inference() {
float *dst_ptr = static_cast<float *>(out->at(i).data.data());
VLOG(2) << "(logid=" << log_id << ") first element data in var[" << i
<< "] is " << req->insts(0).tensor_array(i).float_data(0);
int offset = 0;
int elem_num = req->insts(0).tensor_array(i).float_data_size();
if(!dst_ptr){
offset = 0;
elem_num = req->insts(0).tensor_array(i).float_data_size();
if (!dst_ptr) {
LOG(ERROR) << "dst_ptr is nullptr";
return -1;
}
......@@ -238,9 +235,9 @@ int GeneralReaderOp::inference() {
int32_t *dst_ptr = static_cast<int32_t *>(out->at(i).data.data());
VLOG(2) << "(logid=" << log_id << ") first element data in var[" << i
<< "] is " << req->insts(0).tensor_array(i).int_data(0);
int offset = 0;
int elem_num = req->insts(0).tensor_array(i).int_data_size();
if(!dst_ptr){
offset = 0;
elem_num = req->insts(0).tensor_array(i).int_data_size();
if (!dst_ptr) {
LOG(ERROR) << "dst_ptr is nullptr";
return -1;
}
......
......@@ -42,7 +42,9 @@ using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralResponseOp::inference() {
const std::vector<std::string> pre_node_names = pre_names();
VLOG(2) << "pre node names size: " << pre_node_names.size();
const GeneralBlob *input_blob;
const GeneralBlob *input_blob = nullptr;
int var_idx = 0;
int cap = 1;
uint64_t log_id =
get_depend_argument<GeneralBlob>(pre_node_names[0])->GetLogId();
......@@ -116,9 +118,9 @@ int GeneralResponseOp::inference() {
}
}
int var_idx = 0;
var_idx = 0;
for (auto &idx : fetch_index) {
int cap = 1;
cap = 1;
for (int j = 0; j < in->at(idx).shape.size(); ++j) {
cap *= in->at(idx).shape[j];
}
......
......@@ -612,13 +612,13 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
void* origin_data = (*tensorVector_in_pointer)[i].data.data();
//Because the core needs to determine the size of memory space according to the data type passed in.
//The pointer type of data must be one of float *,int64_t*,int32_t* instead void*.
if((*tensorVector_in_pointer)[i].dtype == paddle::PaddleDType::FLOAT32){
if ((*tensorVector_in_pointer)[i].dtype == paddle::PaddleDType::FLOAT32) {
float* data = static_cast<float*>(origin_data);
lod_tensor_in->CopyFromCpu(data);
}else if((*tensorVector_in_pointer)[i].dtype == paddle::PaddleDType::INT64){
}else if ((*tensorVector_in_pointer)[i].dtype == paddle::PaddleDType::INT64) {
int64_t* data = static_cast<int64_t*>(origin_data);
lod_tensor_in->CopyFromCpu(data);
}else if((*tensorVector_in_pointer)[i].dtype == paddle::PaddleDType::INT32){
}else if ((*tensorVector_in_pointer)[i].dtype == paddle::PaddleDType::INT32) {
int32_t* data = static_cast<int32_t*>(origin_data);
lod_tensor_in->CopyFromCpu(data);
}
......@@ -639,7 +639,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
char* databuf_char = NULL;
size_t databuf_size = 0;
TensorVector* tensorVector_out_pointer = reinterpret_cast<TensorVector*>(out);
if(!tensorVector_out_pointer){
if (!tensorVector_out_pointer) {
LOG(ERROR) << "tensorVector_out_pointer is nullptr,error";
return -1;
}
......@@ -650,7 +650,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
output_shape = lod_tensor_out->shape();
out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies<int>());
dataType = lod_tensor_out->type();
if(dataType == paddle::PaddleDType::FLOAT32){
if (dataType == paddle::PaddleDType::FLOAT32) {
databuf_size = out_num*sizeof(float);
databuf_data = MempoolWrapper::instance().malloc(databuf_size);
if (!databuf_data) {
......@@ -660,7 +660,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
float* data_out = reinterpret_cast<float*>(databuf_data);
lod_tensor_out->CopyToCpu(data_out);
databuf_char = reinterpret_cast<char*>(data_out);
}else if(dataType == paddle::PaddleDType::INT64){
}else if (dataType == paddle::PaddleDType::INT64) {
databuf_size = out_num*sizeof(int64_t);
databuf_data = MempoolWrapper::instance().malloc(databuf_size);
if (!databuf_data) {
......@@ -670,7 +670,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
int64_t* data_out = reinterpret_cast<int64_t*>(databuf_data);
lod_tensor_out->CopyToCpu(data_out);
databuf_char = reinterpret_cast<char*>(data_out);
}else if(dataType == paddle::PaddleDType::INT32){
}else if (dataType == paddle::PaddleDType::INT32) {
databuf_size = out_num*sizeof(int32_t);
databuf_data = MempoolWrapper::instance().malloc(databuf_size);
if (!databuf_data) {
......
......@@ -80,9 +80,9 @@ struct Tensor {
size_t ele_byte() const {
if (type == INT64) {
return sizeof(int64_t);
} else if(type == FLOAT32){
} else if (type == FLOAT32) {
return sizeof(float);
}else{
} else {
return sizeof(int32_t);
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册