提交 0b92634d 编写于 作者: H HexToString

fix code style

上级 028b6a7f
......@@ -37,6 +37,8 @@ if(NOT CMAKE_BUILD_TYPE)
"Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel"
FORCE)
endif()
SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb")
SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")
set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
"A path setting third party libraries download & build directories.")
......@@ -57,13 +59,11 @@ option(PACK "Compile for whl"
option(WITH_TRT "Compile Paddle Serving with TRT" OFF)
option(PADDLE_ON_INFERENCE "Compile for encryption" ON)
option(WITH_OPENCV "Compile Paddle Serving with OPENCV" OFF)
option(WITH_GDB "Compile Paddle Serving with GDB" OFF)
if (WITH_GDB)
SET(CMAKE_BUILD_TYPE "Debug")
SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb")
SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")
endif()
if (WITH_OPENCV)
SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
......
......@@ -54,8 +54,7 @@ ELSE(WIN32)
SET(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/libz.a" CACHE FILEPATH "zlib library." FORCE)
ENDIF(WIN32)
IF(WITH_OPENCV)
ELSE()
IF(NOT WITH_OPENCV)
ADD_LIBRARY(zlib STATIC IMPORTED GLOBAL)
ENDIF()
......
......@@ -88,7 +88,7 @@ int PredictorClient::init(const std::vector<std::string> &conf_file) {
_shape.push_back(tmp_feed_shape);
}
if (conf_file.size()>1) {
if (conf_file.size() > 1) {
model_config.Clear();
if (configure::read_proto_conf(conf_file[conf_file.size()-1].c_str(), &model_config) != 0) {
LOG(ERROR) << "Failed to load general model config"
......@@ -162,8 +162,8 @@ int PredictorClient::numpy_predict(
PredictorRes &predict_res_batch,
const int &pid,
const uint64_t log_id) {
int batch_size = std::max( float_feed_batch.size(), int_feed_batch.size() );
batch_size = batch_size>string_feed_batch.size()? batch_size : string_feed_batch.size();
int batch_size = std::max(float_feed_batch.size(), int_feed_batch.size());
batch_size = batch_size > string_feed_batch.size() ? batch_size : string_feed_batch.size();
VLOG(2) << "batch size: " << batch_size;
predict_res_batch.clear();
Timer timeline;
......@@ -186,6 +186,8 @@ int PredictorClient::numpy_predict(
req.add_fetch_var_names(name);
}
int vec_idx = 0;
for (int bi = 0; bi < batch_size; bi++) {
VLOG(2) << "prepare batch " << bi;
std::vector<Tensor *> tensor_vec;
......@@ -207,9 +209,13 @@ int PredictorClient::numpy_predict(
VLOG(2) << "batch [" << bi << "] " << "prepared";
int vec_idx = 0;
vec_idx = 0;
for (auto &name : float_feed_name) {
int idx = _feed_name_to_idx[name];
if (idx >= tensor_vec.size()) {
LOG(ERROR) << "idx > tensor_vec.size()";
return -1;
}
Tensor *tensor = tensor_vec[idx];
VLOG(2) << "prepare float feed " << name << " shape size "
<< float_shape[vec_idx].size();
......@@ -272,6 +278,10 @@ int PredictorClient::numpy_predict(
vec_idx = 0;
for (auto &name : int_feed_name) {
int idx = _feed_name_to_idx[name];
if (idx >= tensor_vec.size()) {
LOG(ERROR) << "idx > tensor_vec.size()";
return -1;
}
Tensor *tensor = tensor_vec[idx];
for (uint32_t j = 0; j < int_shape[vec_idx].size(); ++j) {
......@@ -358,6 +368,10 @@ int PredictorClient::numpy_predict(
vec_idx = 0;
for (auto &name : string_feed_name) {
int idx = _feed_name_to_idx[name];
if (idx >= tensor_vec.size()) {
LOG(ERROR) << "idx > tensor_vec.size()";
return -1;
}
Tensor *tensor = tensor_vec[idx];
for (uint32_t j = 0; j < string_shape[vec_idx].size(); ++j) {
......@@ -371,7 +385,7 @@ int PredictorClient::numpy_predict(
const int string_shape_size = string_shape[vec_idx].size();
//string_shape[vec_idx] = [1];cause numpy has no datatype of string.
//we pass string via vector<vector<string> >.
if( string_shape_size!= 1 ){
if (string_shape_size != 1) {
LOG(ERROR) << "string_shape_size should be 1-D, but received is : " << string_shape_size;
return -1;
}
......
......@@ -71,10 +71,7 @@ int GeneralInferOp::inference() {
TensorVector *out = &output_blob->tensor_vector;
int batch_size = input_blob->_batch_size;
VLOG(2) << "(logid=" << log_id << ") input batch size: " << batch_size;
output_blob->_batch_size = batch_size;
VLOG(2) << "(logid=" << log_id << ") infer batch size: " << batch_size;
Timer timeline;
......
......@@ -46,17 +46,18 @@ int conf_check(const Request *req,
VLOG(2) << "fetch var num in reader op: " << req->fetch_var_names_size();
for (int i = 0; i < var_num; ++i) {
const Tensor &tensor = req->insts(0).tensor_array(i);
if (model_config->_feed_type[i] !=
req->insts(0).tensor_array(i).elem_type()) {
tensor.elem_type()) {
LOG(ERROR) << "feed type not match.";
return -1;
}
if (model_config->_feed_shape[i].size() ==
req->insts(0).tensor_array(i).shape_size()) {
tensor.shape_size()) {
for (int j = 0; j < model_config->_feed_shape[i].size(); ++j) {
req->insts(0).tensor_array(i).shape(j);
tensor.shape(j);
if (model_config->_feed_shape[i][j] !=
req->insts(0).tensor_array(i).shape(j)) {
tensor.shape(j)) {
LOG(ERROR) << "feed shape not match.";
return -1;
}
......@@ -124,7 +125,7 @@ int GeneralReaderOp::inference() {
paddle::PaddleTensor lod_tensor;
const Tensor &tensor = req->insts(0).tensor_array(i);
data_len = 0;
elem_type[i] = req->insts(0).tensor_array(i).elem_type();
elem_type[i] = tensor.elem_type();
VLOG(2) << "var[" << i << "] has elem type: " << elem_type[i];
if (elem_type[i] == P_INT64) { // int64
elem_size[i] = sizeof(int64_t);
......@@ -150,16 +151,16 @@ int GeneralReaderOp::inference() {
// implement lod tensor here
// only support 1-D lod
// TODO:support 2-D lod
if (req->insts(0).tensor_array(i).lod_size() > 0) {
if (tensor.lod_size() > 0) {
VLOG(2) << "(logid=" << log_id << ") var[" << i << "] is lod_tensor";
lod_tensor.lod.resize(1);
for (int k = 0; k < req->insts(0).tensor_array(i).lod_size(); ++k) {
lod_tensor.lod[0].push_back(req->insts(0).tensor_array(i).lod(k));
for (int k = 0; k < tensor.lod_size(); ++k) {
lod_tensor.lod[0].push_back(tensor.lod(k));
}
}
for (int k = 0; k < req->insts(0).tensor_array(i).shape_size(); ++k) {
int dim = req->insts(0).tensor_array(i).shape(k);
for (int k = 0; k < tensor.shape_size(); ++k) {
int dim = tensor.shape(k);
VLOG(2) << "(logid=" << log_id << ") shape for var[" << i
<< "]: " << dim;
lod_tensor.shape.push_back(dim);
......@@ -178,57 +179,57 @@ int GeneralReaderOp::inference() {
if (elem_type[i] == P_INT64) {
int64_t *dst_ptr = static_cast<int64_t *>(out->at(i).data.data());
VLOG(2) << "(logid=" << log_id << ") first element data in var[" << i
<< "] is " << req->insts(0).tensor_array(i).int64_data(0);
<< "] is " << tensor.int64_data(0);
if (!dst_ptr) {
LOG(ERROR) << "dst_ptr is nullptr";
return -1;
}
memcpy(dst_ptr, req->insts(0).tensor_array(i).int64_data().data(),databuf_size[i]);
memcpy(dst_ptr, tensor.int64_data().data(),databuf_size[i]);
/*
int elem_num = req->insts(0).tensor_array(i).int64_data_size();
int elem_num = tensor.int64_data_size();
for (int k = 0; k < elem_num; ++k) {
dst_ptr[k] = req->insts(0).tensor_array(i).int64_data(k);
dst_ptr[k] = tensor.int64_data(k);
}
*/
} else if (elem_type[i] == P_FLOAT32) {
float *dst_ptr = static_cast<float *>(out->at(i).data.data());
VLOG(2) << "(logid=" << log_id << ") first element data in var[" << i
<< "] is " << req->insts(0).tensor_array(i).float_data(0);
<< "] is " << tensor.float_data(0);
if (!dst_ptr) {
LOG(ERROR) << "dst_ptr is nullptr";
return -1;
}
memcpy(dst_ptr, req->insts(0).tensor_array(i).float_data().data(),databuf_size[i]);
/*int elem_num = req->insts(0).tensor_array(i).float_data_size();
memcpy(dst_ptr, tensor.float_data().data(),databuf_size[i]);
/*int elem_num = tensor.float_data_size();
for (int k = 0; k < elem_num; ++k) {
dst_ptr[k] = req->insts(0).tensor_array(i).float_data(k);
dst_ptr[k] = tensor.float_data(k);
}*/
} else if (elem_type[i] == P_INT32) {
int32_t *dst_ptr = static_cast<int32_t *>(out->at(i).data.data());
VLOG(2) << "(logid=" << log_id << ") first element data in var[" << i
<< "] is " << req->insts(0).tensor_array(i).int_data(0);
<< "] is " << tensor.int_data(0);
if (!dst_ptr) {
LOG(ERROR) << "dst_ptr is nullptr";
return -1;
}
memcpy(dst_ptr, req->insts(0).tensor_array(i).int_data().data(),databuf_size[i]);
memcpy(dst_ptr, tensor.int_data().data(),databuf_size[i]);
/*
int elem_num = req->insts(0).tensor_array(i).int_data_size();
int elem_num = tensor.int_data_size();
for (int k = 0; k < elem_num; ++k) {
dst_ptr[k] = req->insts(0).tensor_array(i).int_data(k);
dst_ptr[k] = tensor.int_data(k);
}
*/
} else if (elem_type[i] == P_STRING) {
std::string *dst_ptr = static_cast<std::string *>(out->at(i).data.data());
VLOG(2) << "(logid=" << log_id << ") first element data in var[" << i
<< "] is " << req->insts(0).tensor_array(i).data(0);
<< "] is " << tensor.data(0);
if (!dst_ptr) {
LOG(ERROR) << "dst_ptr is nullptr";
return -1;
}
int elem_num = req->insts(0).tensor_array(i).data_size();
int elem_num = tensor.data_size();
for (int k = 0; k < elem_num; ++k) {
dst_ptr[k] = req->insts(0).tensor_array(i).data(k);
dst_ptr[k] = tensor.data(k);
}
}
}
......
......@@ -512,7 +512,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<PaddleInferenceCore
//Inside each for loop, use the in[i]->name as inputName and call 'core->GetInputHandle(inputName)' to get the pointer of InputData.
//Set the lod and shape information of InputData first. then copy data from cpu to the core.
const TensorVector* tensorVector_in_pointer = reinterpret_cast<const TensorVector*>(in);
for(int i =0; i< tensorVector_in_pointer->size();++i){
for (int i=0; i < tensorVector_in_pointer->size(); ++i) {
auto lod_tensor_in = core->GetInputHandle((*tensorVector_in_pointer)[i].name);
lod_tensor_in->SetLoD((*tensorVector_in_pointer)[i].lod);
lod_tensor_in->Reshape((*tensorVector_in_pointer)[i].shape);
......@@ -552,7 +552,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<PaddleInferenceCore
}
//Get the type and shape information of OutputData first. then copy data to cpu from the core.
//The pointer type of data_out must be one of float *,int64_t*,int32_t* instead void*.
for (int i = 0; i < outnames.size(); ++i){
for (int i=0; i < outnames.size(); ++i) {
auto lod_tensor_out = core->GetOutputHandle(outnames[i]);
output_shape = lod_tensor_out->shape();
out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies<int>());
......@@ -596,7 +596,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<PaddleInferenceCore
tensor_out.dtype = paddle::PaddleDType(dataType);
tensor_out.shape.assign(output_shape.begin(), output_shape.end());
std::vector<std::vector<size_t>> out_lod = lod_tensor_out->lod();
for (int li = 0; li < out_lod.size(); ++li) {
for (int li=0; li < out_lod.size(); ++li) {
std::vector<size_t> lod_element;
lod_element.assign(out_lod[li].begin(), out_lod[li].end());
tensor_out.lod.push_back(lod_element);
......
......@@ -150,10 +150,8 @@ int Resource::initialize(const std::string& path, const std::string& file) {
if (FLAGS_enable_model_toolkit) {
size_t model_toolkit_num = resource_conf.model_toolkit_path_size();
for (size_t mi = 0; mi < model_toolkit_num; ++mi) {
for (size_t mi=0; mi < model_toolkit_num; ++mi) {
std::string model_toolkit_path = resource_conf.model_toolkit_path(mi);
std::string model_toolkit_file = resource_conf.model_toolkit_file(mi);
if (InferManager::instance().proc_initialize(
......@@ -227,7 +225,7 @@ int Resource::general_model_initialize(const std::string& path,
return -1;
}
size_t general_model_num = resource_conf.general_model_path_size();
for (size_t gi = 0; gi < general_model_num; ++gi) {
for (size_t gi=0; gi < general_model_num; ++gi) {
std::string general_model_path = resource_conf.general_model_path(gi);
......@@ -251,7 +249,7 @@ int Resource::general_model_initialize(const std::string& path,
_config->_is_lod_feed.resize(feed_var_num);
_config->_capacity.resize(feed_var_num);
_config->_feed_shape.resize(feed_var_num);
for (int i = 0; i < feed_var_num; ++i) {
for (int i=0; i < feed_var_num; ++i) {
_config->_feed_name[i] = model_config.feed_var(i).name();
_config->_feed_alias_name[i] = model_config.feed_var(i).alias_name();
VLOG(2) << "feed var[" << i << "]: " << _config->_feed_name[i];
......@@ -267,7 +265,7 @@ int Resource::general_model_initialize(const std::string& path,
VLOG(2) << "var[" << i << "] is tensor";
_config->_capacity[i] = 1;
_config->_is_lod_feed[i] = false;
for (int j = 0; j < model_config.feed_var(i).shape_size(); ++j) {
for (int j=0; j < model_config.feed_var(i).shape_size(); ++j) {
int32_t dim = model_config.feed_var(i).shape(j);
VLOG(2) << "var[" << i << "].shape[" << i << "]: " << dim;
_config->_feed_shape[i].push_back(dim);
......@@ -281,7 +279,7 @@ int Resource::general_model_initialize(const std::string& path,
_config->_fetch_name.resize(fetch_var_num);
_config->_fetch_alias_name.resize(fetch_var_num);
_config->_fetch_shape.resize(fetch_var_num);
for (int i = 0; i < fetch_var_num; ++i) {
for (int i=0; i < fetch_var_num; ++i) {
_config->_fetch_name[i] = model_config.fetch_var(i).name();
_config->_fetch_alias_name[i] = model_config.fetch_var(i).alias_name();
_config->_fetch_name_to_index[_config->_fetch_name[i]] = i;
......@@ -292,7 +290,7 @@ int Resource::general_model_initialize(const std::string& path,
_config->_is_lod_fetch[i] = true;
} else {
_config->_is_lod_fetch[i] = false;
for (int j = 0; j < model_config.fetch_var(i).shape_size(); ++j) {
for (int j=0; j < model_config.fetch_var(i).shape_size(); ++j) {
int dim = model_config.fetch_var(i).shape(j);
_config->_fetch_shape[i].push_back(dim);
}
......
......@@ -32,7 +32,7 @@ public class PipelineClientExample {
System.out.println(fetch);
if (StaticPipelineClient.succ != true) {
if(!StaticPipelineClient.initClient("127.0.0.1","18070")){
if (!StaticPipelineClient.initClient("127.0.0.1","18070")) {
System.out.println("connect failed.");
return false;
}
......@@ -57,7 +57,7 @@ public class PipelineClientExample {
List<String> fetch = Arrays.asList("prediction");
System.out.println(fetch);
if (StaticPipelineClient.succ != true) {
if(!StaticPipelineClient.initClient("127.0.0.1","18070")){
if (!StaticPipelineClient.initClient("127.0.0.1","18070")) {
System.out.println("connect failed.");
return false;
}
......@@ -86,7 +86,7 @@ public class PipelineClientExample {
}};
List<String> fetch = Arrays.asList("prediction");
if (StaticPipelineClient.succ != true) {
if(!StaticPipelineClient.initClient("127.0.0.1","9998")){
if (!StaticPipelineClient.initClient("127.0.0.1","9998")) {
System.out.println("connect failed.");
return false;
}
......@@ -105,7 +105,7 @@ public class PipelineClientExample {
* @param npdata INDArray type(The input data).
* @return String (specified String type for python Numpy eval method).
*/
String convertINDArrayToString(INDArray npdata){
String convertINDArrayToString(INDArray npdata) {
return "array("+npdata.toString()+")";
}
......
......@@ -30,10 +30,10 @@ public class StaticPipelineClient {
* @param strPort String type(The server port) such as "8891".
* @return boolean (the sign of connect status).
*/
public static boolean initClient(String strIp,String strPort){
public static boolean initClient(String strIp,String strPort) {
String target = strIp+ ":"+ strPort;//"172.17.0.2:18070";
System.out.println("initial connect.");
if(succ){
if (succ) {
System.out.println("already connect.");
return true;
}
......
......@@ -88,7 +88,7 @@ public class PipelineClient {
keys.add(entry.getKey());
values.add(entry.getValue());
}
if(profile){
if (profile) {
keys.add(_profile_key);
values.add(_profile_value);
}
......
......@@ -31,13 +31,18 @@ sys.path.append(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'proto'))
from .proto import multi_lang_general_model_service_pb2_grpc
#param 'type'(which is in feed_var or fetch_var) = 0 means dataType is int64
#param 'type'(which is in feed_var or fetch_var) = 1 means dataType is float32
#param 'type'(which is in feed_var or fetch_var) = 2 means dataType is int32
#param 'type'(which is in feed_var or fetch_var) = 3 means dataType is string(also called bytes in proto)
int64_type = 0
float32_type = 1
int32_type = 2
bytes_type = 3
#int_type,float_type,string_type are the set of each subdivision classes.
int_type = set([int64_type, int32_type])
float_type = set([float32_type])
string_type= set([bytes_type])
string_type = set([bytes_type])
class _NOPProfiler(object):
......@@ -172,9 +177,9 @@ class Client(object):
self.client_handle_.init_gflags([sys.argv[
0]] + ["--tryfromenv=" + ",".join(read_env_flags)])
self.feed_names_ = [var.alias_name for var in model_conf.feed_var]
self.feed_names_to_idx_ = {}#this is not useful
self.feed_names_to_idx_ = {} #this is not useful
self.lod_tensor_set = set()
self.feed_tensor_len = {}#this is only used for shape check
self.feed_tensor_len = {} #this is only used for shape check
self.key = None
for i, var in enumerate(model_conf.feed_var):
......@@ -420,9 +425,9 @@ class Client(object):
res = self.client_handle_.numpy_predict(
float_slot_batch, float_feed_names, float_shape,
float_lod_slot_batch, int_slot_batch, int_feed_names, int_shape,
int_lod_slot_batch, string_slot_batch, string_feed_names, string_shape,
string_lod_slot_batch, fetch_names, result_batch_handle, self.pid,
log_id)
int_lod_slot_batch, string_slot_batch, string_feed_names,
string_shape, string_lod_slot_batch, fetch_names,
result_batch_handle, self.pid, log_id)
elif self.has_numpy_input == False:
raise ValueError(
"Please make sure all of your inputs are numpy array")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册