提交 9bd6b3f6 编写于 作者: H HexToString

fix 2.0 api

上级 7933a721
......@@ -18,7 +18,9 @@ set(PADDLE_SERVING_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(PADDLE_SERVING_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
SET(PADDLE_SERVING_INSTALL_DIR ${CMAKE_BINARY_DIR}/output)
SET(CMAKE_INSTALL_RPATH "\$ORIGIN" "${CMAKE_INSTALL_RPATH}")
SET(CMAKE_BUILD_TYPE "Debug")
SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g -ggdb")
SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")
include(system)
project(paddle-serving CXX C)
......
......@@ -80,9 +80,12 @@ int GeneralInferOp::inference() {
}
int64_t end = timeline.TimeStampUS();
std::cout << "GeneralInferOp ---ysl" << std::endl;
LOG(ERROR) << "GeneralInferOp ---ysl";
CopyBlobInfo(input_blob, output_blob);
AddBlobInfo(output_blob, start);
AddBlobInfo(output_blob, end);
std::cout << "GeneralInferOp ---ysl222" << std::endl;
return 0;
}
DEFINE_OP(GeneralInferOp);
......
......@@ -244,6 +244,8 @@ int GeneralReaderOp::inference() {
AddBlobInfo(res, end);
VLOG(2) << "(logid=" << log_id << ") read data from client success";
LOG(ERROR) << "GeneralReaderOp ---ysl";
std::cout << "GeneralReaderOp ---ysl" << std::endl;
return 0;
}
DEFINE_OP(GeneralReaderOp);
......
......@@ -139,7 +139,12 @@ int GeneralResponseOp::inference() {
} else if (dtype == paddle::PaddleDType::FLOAT32) {
VLOG(2) << "(logid=" << log_id << ") Prepare float var ["
<< model_config->_fetch_name[idx] << "].";
float *data_ptr = static_cast<float *>(in->at(idx).data.data());
std::cout<<" response op ---- for"<<std::endl;
for(int k =0; k<cap; ++k){
std::cout<< "i am ysl -response op-copy idx = "<< k<< "num = "<< *(data_ptr+k)<<std::endl;
}
google::protobuf::RepeatedField<float> tmp_data(data_ptr,
data_ptr + cap);
fetch_p->mutable_tensor_array(var_idx)->mutable_float_data()->Swap(
......@@ -193,7 +198,9 @@ int GeneralResponseOp::inference() {
res->add_profile_time(start);
res->add_profile_time(end);
}
std::cout << "GeneralResponseOp ---ysl" << std::endl;
LOG(ERROR) << "GeneralResponseOp ---ysl";
return 0;
}
......
......@@ -595,6 +595,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
~FluidInferEngine() {}
int infer_impl1(const void* in, void* out, uint32_t batch_size = -1) {
LOG(ERROR) << "come in infer_impl1 ---ysl";
FluidFamilyCore* core =DBReloadableInferEngine<FluidFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(ERROR) << "Failed get fluid core in infer_impl()";
......@@ -603,6 +604,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
//set inputHandle
const BatchTensor* batchTensor_pointer_in = reinterpret_cast<const BatchTensor*>(in);
std::cout<<"input tensor: "<<batchTensor_pointer_in->count()<<std::endl;
for(int i =0; i< batchTensor_pointer_in->count();++i){
Tensor tensor_in_batchTensor = (*batchTensor_pointer_in)[i];
auto lod_tensor_in = core->GetInputHandle(tensor_in_batchTensor.name);
......@@ -615,55 +617,68 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
}else if(tensor_in_batchTensor.type == INT64){
int64_t* data = reinterpret_cast<int64_t*>(origin_data);
lod_tensor_in->CopyFromCpu(data);
}else if(tensor_in_batchTensor.type == INT32){
}/*else if(tensor_in_batchTensor.type == INT32){
int32_t* data = reinterpret_cast<int32_t*>(origin_data);
lod_tensor_in->CopyFromCpu(data);
}
}*/
}
if (!core->Run()) {
LOG(ERROR) << "Failed run fluid family core";
return -1;
}
LOG(ERROR) << "Run infer_impl1 ---ysl";
//get out and copy to void* out
BatchTensor* batchTensor_pointer_out = reinterpret_cast<BatchTensor*>(out);
LOG(ERROR) << "reinterpret_cast infer_impl1 ---ysl";
std::vector<std::string> outnames = core->GetOutputNames();
for (int i = 0; i < outnames.size(); ++i){
auto lod_tensor_out = core->GetOutputHandle(outnames[i]);
std::vector<int> output_shape = lod_tensor_out->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies<int>());
int dataType = lod_tensor_out->type();
void* databuf_data = NULL;
char* databuf_data = NULL;
size_t databuf_size = 0;
if(dataType == FLOAT32){
float* data_out = new float[out_num];
lod_tensor_out->CopyToCpu(data_out);
databuf_data = reinterpret_cast<void*>(data_out);
databuf_size = sizeof(float);
}else if (dataType == INT64){
for ( int j = 0; j < out_num; j++ )
{std::cout << "ysl----data_out[+ " << j << "]) : ";std::cout << *(data_out + j) << std::endl;}
databuf_data = reinterpret_cast<char*>(data_out);
databuf_size = out_num*sizeof(float);
}else if(dataType == INT64){
int64_t* data_out = new int64_t[out_num];
lod_tensor_out->CopyToCpu(data_out);
databuf_data = reinterpret_cast<void*>(data_out);
databuf_size = sizeof(int64_t);
}else if (dataType == INT32){
for ( int j = 0; j < out_num; j++ )
{std::cout << "ysl----data_out[+ " << j << "]) : ";std::cout << *(data_out + j) << std::endl;}
databuf_data = reinterpret_cast<char*>(data_out);
databuf_size = out_num*sizeof(int64_t);
}/*else (dataType == INT32){
int32_t* data_out = new int32_t[out_num];
lod_tensor_out->CopyToCpu(data_out);
databuf_data = reinterpret_cast<void*>(data_out);
databuf_size = sizeof(int32_t);
}
Tensor tensor_out;
tensor_out.name = outnames[i];
tensor_out.type = DataType(dataType);
tensor_out.shape.assign(output_shape.begin(), output_shape.end());
for ( int j = 0; j < out_num; j++ )
{std::cout << "ysl----data_out[+ " << j << "]) : ";std::cout << *(data_out + j) << std::endl;}
databuf_data = reinterpret_cast<char*>(data_out);
databuf_size = out_num*sizeof(int32_t);
}*/
Tensor* tensor_out = new Tensor();
tensor_out->name = outnames[i];
std::cout<< "i am test ----outnames:"<<outnames[i]<<std::endl;
tensor_out->type = DataType(dataType);
tensor_out->shape.assign(output_shape.begin(), output_shape.end());
std::vector<std::vector<size_t>> out_lod = lod_tensor_out->lod();
for (int li = 0; li < out_lod.size(); ++li) {
std::vector<size_t> lod_element;
lod_element.assign(out_lod[li].begin(), out_lod[li].end());
tensor_out.lod.push_back(lod_element);
tensor_out->lod.push_back(lod_element);
}
tensor_out.data = DataBuf(databuf_data,databuf_size);
batchTensor_pointer_out->push_back_owned(tensor_out);
}
LOG(ERROR) << "DataBuf infer_impl1 ---ysl";
DataBuf* newData = new DataBuf(databuf_data,databuf_size,false);
tensor_out->data = *newData;
batchTensor_pointer_out->push_back(*tensor_out);
LOG(ERROR) << "push_back infer_impl1 ---ysl";
}
LOG(ERROR) << "return infer_impl1 ---ysl";
std::cout << (*batchTensor_pointer_in)[0].shape.size()<< "(*batchTensor_pointer_in)[0].shape.size()"<<std::endl;
return 0;
}
......
......@@ -21,7 +21,7 @@ namespace baidu {
namespace paddle_serving {
namespace predictor {
enum DataType { FLOAT32, INT64,INT32 };
enum DataType { FLOAT32, INT64 };
class DataBuf {
public:
......@@ -84,12 +84,9 @@ struct Tensor {
size_t ele_byte() const {
if (type == INT64) {
return sizeof(int64_t);
} else if(type == FLOAT32) {
} else {
return sizeof(float);
} else if(type == INT32){
return sizeof(int32_t);
}
return sizeof(int32_t);
}
}
bool valid() const {
......
......@@ -183,6 +183,7 @@ int InferService::inference(const google::protobuf::Message* request,
VLOG(2) << "(logid=" << log_id << ") enable map request == False";
TRACEPRINTF("(logid=%" PRIu64 ") start to execute one workflow", log_id);
size_t fsize = _flows.size();
std::cout<< "ysl--total workflow:"<< fsize <<std::endl;
for (size_t fi = 0; fi < fsize; ++fi) {
TRACEPRINTF(
"(logid=%" PRIu64 ") start to execute one workflow-%lu", log_id, fi);
......@@ -197,6 +198,7 @@ int InferService::inference(const google::protobuf::Message* request,
}
}
}
std::cout<< "ysl----InferService::inference finish"<<std::endl;
return ERR_OK;
}
......@@ -266,8 +268,10 @@ int InferService::_execute_workflow(Workflow* workflow,
WORKFLOW_METRIC_PREFIX + dv->full_name(), workflow_time.u_elapsed());
// return tls data to object pool
std::cout<<"ysl ------- _execute_workflow------"<<std::endl;
workflow->return_dag_view(dv);
TRACEPRINTF("(logid=%" PRIu64 ") finish to return dag view", log_id);
std::cout<<"ysl ------- _execute_workflow return_dag_view------"<<std::endl;
return ERR_OK;
}
......
......@@ -163,6 +163,7 @@ int Op::process(const uint64_t log_id, bool debug) {
OP_METRIC_PREFIX + full_name(), op_time.u_elapsed());
LOG(INFO) << "(logid=" << log_id << ") " << name() << "_time=["
<< op_time.u_elapsed() << "]";
std::cout << "op process finish --ysl"<<_name<<std::endl;
return ERR_OK;
}
......
......@@ -118,6 +118,8 @@ int ut_main(int argc, char** argv) {
#else
int main(int argc, char** argv) {
#endif
try
{
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_V) {
......@@ -239,5 +241,11 @@ int main(int argc, char** argv) {
google::ShutdownGoogleLogging();
#endif
VLOG(2) << "Paddle Inference Server exit successfully!";
}
catch (const std::exception &exc)
{
// catch anything thrown within try block that derives from std::exception
std::cerr << exc.what();
}
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册