未验证 提交 1822f86e 编写于 作者: L liu zhengxi 提交者: GitHub

fix the PD_ZeroCopyPredictorRun output problem and cmake, test=release/1.6 (#20624)

上级 1ba0d70b
cc_library(pd_config SRCS pd_config.cc) set(C_API_SRCS pd_config.cc pd_predictor.cc pd_tensor.cc c_api.cc)
cc_library(pd_predictor SRCS pd_predictor.cc)
cc_library(pd_tensor SRCS pd_tensor.cc) cc_library(paddle_fluid_c SRCS ${C_API_SRCS} DEPS paddle_fluid)
cc_library(pd_c_api SRCS c_api.cc) cc_library(paddle_fluid_c_shared SHARED SRCS ${C_API_SRCS} DEPS paddle_fluid)
cc_library(paddle_fluid_c SRCS c_api.cc DEPS paddle_fluid pd_config pd_predictor pd_tensor pd_c_api)
cc_library(paddle_fluid_c_shared SHARED SRCS c_api.cc DEPS paddle_fluid pd_config pd_predictor pd_tensor pd_c_api)
set_target_properties(paddle_fluid_c_shared PROPERTIES OUTPUT_NAME paddle_fluid_c) set_target_properties(paddle_fluid_c_shared PROPERTIES OUTPUT_NAME paddle_fluid_c)
if(WIN32) if(WIN32)
target_link_libraries(paddle_fluid_c_shared shlwapi.lib) target_link_libraries(paddle_fluid_c_shared shlwapi.lib)
......
...@@ -29,22 +29,34 @@ void PD_DeletePaddleBuf(PD_PaddleBuf* buf) { ...@@ -29,22 +29,34 @@ void PD_DeletePaddleBuf(PD_PaddleBuf* buf) {
if (buf) { if (buf) {
delete buf; delete buf;
buf = nullptr; buf = nullptr;
VLOG(3) << "PD_PaddleBuf delete successfully. ";
} }
} }
void PD_PaddleBufResize(PD_PaddleBuf* buf, size_t length) { void PD_PaddleBufResize(PD_PaddleBuf* buf, size_t length) {
PADDLE_ENFORCE_NOT_NULL(buf);
buf->buf.Resize(length); buf->buf.Resize(length);
} }
void PD_PaddleBufReset(PD_PaddleBuf* buf, void* data, size_t length) { void PD_PaddleBufReset(PD_PaddleBuf* buf, void* data, size_t length) {
PADDLE_ENFORCE_NOT_NULL(buf);
buf->buf.Reset(data, length); buf->buf.Reset(data, length);
} }
bool PD_PaddleBufEmpty(PD_PaddleBuf* buf) { return buf->buf.empty(); } bool PD_PaddleBufEmpty(PD_PaddleBuf* buf) {
PADDLE_ENFORCE_NOT_NULL(buf);
return buf->buf.empty();
}
void* PD_PaddleBufData(PD_PaddleBuf* buf) { return buf->buf.data(); } void* PD_PaddleBufData(PD_PaddleBuf* buf) {
PADDLE_ENFORCE_NOT_NULL(buf);
return buf->buf.data();
}
size_t PD_PaddleBufLength(PD_PaddleBuf* buf) { return buf->buf.length(); } size_t PD_PaddleBufLength(PD_PaddleBuf* buf) {
PADDLE_ENFORCE_NOT_NULL(buf);
return buf->buf.length();
}
} // extern "C" } // extern "C"
......
...@@ -104,7 +104,7 @@ PADDLE_CAPI_EXPORT extern bool PD_PredictorRun(const PD_AnalysisConfig* config, ...@@ -104,7 +104,7 @@ PADDLE_CAPI_EXPORT extern bool PD_PredictorRun(const PD_AnalysisConfig* config,
PADDLE_CAPI_EXPORT extern bool PD_PredictorZeroCopyRun( PADDLE_CAPI_EXPORT extern bool PD_PredictorZeroCopyRun(
const PD_AnalysisConfig* config, PD_ZeroCopyData* inputs, int in_size, const PD_AnalysisConfig* config, PD_ZeroCopyData* inputs, int in_size,
PD_ZeroCopyData* output, int** out_size); PD_ZeroCopyData** output, int** out_size);
// AnalysisConfig // AnalysisConfig
enum Precision { kFloat32 = 0, kInt8, kHalf }; enum Precision { kFloat32 = 0, kInt8, kHalf };
......
...@@ -33,6 +33,7 @@ void PD_DeleteAnalysisConfig(PD_AnalysisConfig* config) { ...@@ -33,6 +33,7 @@ void PD_DeleteAnalysisConfig(PD_AnalysisConfig* config) {
if (config) { if (config) {
delete config; delete config;
config = nullptr; config = nullptr;
VLOG(3) << "PD_AnalysisConfig delete successfully. ";
} }
} }
......
...@@ -28,6 +28,7 @@ extern "C" { ...@@ -28,6 +28,7 @@ extern "C" {
bool PD_PredictorRun(const PD_AnalysisConfig* config, PD_Tensor* inputs, bool PD_PredictorRun(const PD_AnalysisConfig* config, PD_Tensor* inputs,
int in_size, PD_Tensor* output_data, int** out_size, int in_size, PD_Tensor* output_data, int** out_size,
int batch_size) { int batch_size) {
PADDLE_ENFORCE_NOT_NULL(config);
auto predictor = paddle::CreatePaddlePredictor(config->config); auto predictor = paddle::CreatePaddlePredictor(config->config);
std::vector<paddle::PaddleTensor> in; std::vector<paddle::PaddleTensor> in;
for (int i = 0; i < in_size; ++i) { for (int i = 0; i < in_size; ++i) {
...@@ -47,9 +48,11 @@ bool PD_PredictorRun(const PD_AnalysisConfig* config, PD_Tensor* inputs, ...@@ -47,9 +48,11 @@ bool PD_PredictorRun(const PD_AnalysisConfig* config, PD_Tensor* inputs,
bool PD_PredictorZeroCopyRun(const PD_AnalysisConfig* config, bool PD_PredictorZeroCopyRun(const PD_AnalysisConfig* config,
PD_ZeroCopyData* inputs, int in_size, PD_ZeroCopyData* inputs, int in_size,
PD_ZeroCopyData* output, int** out_size) { PD_ZeroCopyData** output, int** out_size) {
PADDLE_ENFORCE_NOT_NULL(config);
auto predictor = paddle::CreatePaddlePredictor(config->config); auto predictor = paddle::CreatePaddlePredictor(config->config);
auto input_names = predictor->GetInputNames(); auto input_names = predictor->GetInputNames();
VLOG(3) << "The inputs' size is " << input_names.size();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
input_names.size(), in_size, input_names.size(), in_size,
"The number of input and the number of model's input must match. "); "The number of input and the number of model's input must match. ");
...@@ -81,26 +84,27 @@ bool PD_PredictorZeroCopyRun(const PD_AnalysisConfig* config, ...@@ -81,26 +84,27 @@ bool PD_PredictorZeroCopyRun(const PD_AnalysisConfig* config,
auto output_names = predictor->GetOutputNames(); auto output_names = predictor->GetOutputNames();
int osize = output_names.size(); int osize = output_names.size();
*out_size = &osize; *out_size = &osize;
output = new PD_ZeroCopyData[osize]; *output = new PD_ZeroCopyData[osize];
VLOG(3) << "The output size is " << osize;
for (int i = 0; i < osize; ++i) { for (int i = 0; i < osize; ++i) {
LOG(INFO) << 1; auto& output_i = (*output)[i];
output[i].name = new char[output_names[i].length() + 1]; output_i.name = new char[output_names[i].length() + 1];
snprintf(output[i].name, output_names[i].length() + 1, "%s", snprintf(output_i.name, output_names[i].length() + 1, "%s",
output_names[i].c_str()); output_names[i].c_str());
auto output_t = predictor->GetOutputTensor(output_names[i]); auto output_t = predictor->GetOutputTensor(output_names[i]);
output[i].dtype = ConvertToPDDataType(output_t->type()); output_i.dtype = ConvertToPDDataType(output_t->type());
std::vector<int> output_shape = output_t->shape(); std::vector<int> output_shape = output_t->shape();
output[i].shape = new int[output_shape.size()]; output_i.shape = new int[output_shape.size()];
output[i].shape = output_shape.data(); output_i.shape = output_shape.data();
output[i].shape_size = output_shape.size(); output_i.shape_size = output_shape.size();
switch (output[i].dtype) { switch (output_i.dtype) {
case PD_FLOAT32: { case PD_FLOAT32: {
std::vector<float> out_data; std::vector<float> out_data;
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), int out_num = std::accumulate(output_shape.begin(), output_shape.end(),
1, std::multiplies<int>()); 1, std::multiplies<int>());
out_data.resize(out_num); out_data.resize(out_num);
output_t->copy_to_cpu(out_data.data()); output_t->copy_to_cpu(out_data.data());
output[i].data = static_cast<void*>(out_data.data()); output_i.data = static_cast<void*>(out_data.data());
} break; } break;
case PD_INT32: { case PD_INT32: {
std::vector<int32_t> out_data; std::vector<int32_t> out_data;
...@@ -108,7 +112,7 @@ bool PD_PredictorZeroCopyRun(const PD_AnalysisConfig* config, ...@@ -108,7 +112,7 @@ bool PD_PredictorZeroCopyRun(const PD_AnalysisConfig* config,
1, std::multiplies<int>()); 1, std::multiplies<int>());
out_data.resize(out_num); out_data.resize(out_num);
output_t->copy_to_cpu(out_data.data()); output_t->copy_to_cpu(out_data.data());
output[i].data = static_cast<void*>(out_data.data()); output_i.data = static_cast<void*>(out_data.data());
} break; } break;
case PD_INT64: { case PD_INT64: {
std::vector<int64_t> out_data; std::vector<int64_t> out_data;
...@@ -116,7 +120,7 @@ bool PD_PredictorZeroCopyRun(const PD_AnalysisConfig* config, ...@@ -116,7 +120,7 @@ bool PD_PredictorZeroCopyRun(const PD_AnalysisConfig* config,
1, std::multiplies<int>()); 1, std::multiplies<int>());
out_data.resize(out_num); out_data.resize(out_num);
output_t->copy_to_cpu(out_data.data()); output_t->copy_to_cpu(out_data.data());
output[i].data = static_cast<void*>(out_data.data()); output_i.data = static_cast<void*>(out_data.data());
} break; } break;
case PD_UINT8: { case PD_UINT8: {
std::vector<uint8_t> out_data; std::vector<uint8_t> out_data;
...@@ -124,7 +128,7 @@ bool PD_PredictorZeroCopyRun(const PD_AnalysisConfig* config, ...@@ -124,7 +128,7 @@ bool PD_PredictorZeroCopyRun(const PD_AnalysisConfig* config,
1, std::multiplies<int>()); 1, std::multiplies<int>());
out_data.resize(out_num); out_data.resize(out_num);
output_t->copy_to_cpu(out_data.data()); output_t->copy_to_cpu(out_data.data());
output[i].data = static_cast<void*>(out_data.data()); output_i.data = static_cast<void*>(out_data.data());
} break; } break;
default: default:
CHECK(false) << "Unsupport data type."; CHECK(false) << "Unsupport data type.";
......
...@@ -29,40 +29,49 @@ void PD_DeletePaddleTensor(PD_Tensor* tensor) { ...@@ -29,40 +29,49 @@ void PD_DeletePaddleTensor(PD_Tensor* tensor) {
if (tensor) { if (tensor) {
delete tensor; delete tensor;
tensor = nullptr; tensor = nullptr;
VLOG(3) << "PD_Tensor delete successfully. ";
} }
} }
void PD_SetPaddleTensorName(PD_Tensor* tensor, char* name) { void PD_SetPaddleTensorName(PD_Tensor* tensor, char* name) {
PADDLE_ENFORCE_NOT_NULL(tensor);
tensor->tensor.name = std::string(name); tensor->tensor.name = std::string(name);
} }
void PD_SetPaddleTensorDType(PD_Tensor* tensor, PD_DataType dtype) { void PD_SetPaddleTensorDType(PD_Tensor* tensor, PD_DataType dtype) {
PADDLE_ENFORCE_NOT_NULL(tensor);
tensor->tensor.dtype = paddle::ConvertToPaddleDType(dtype); tensor->tensor.dtype = paddle::ConvertToPaddleDType(dtype);
} }
void PD_SetPaddleTensorData(PD_Tensor* tensor, PD_PaddleBuf* buf) { void PD_SetPaddleTensorData(PD_Tensor* tensor, PD_PaddleBuf* buf) {
PADDLE_ENFORCE_NOT_NULL(tensor);
tensor->tensor.data = buf->buf; tensor->tensor.data = buf->buf;
} }
void PD_SetPaddleTensorShape(PD_Tensor* tensor, int* shape, int size) { void PD_SetPaddleTensorShape(PD_Tensor* tensor, int* shape, int size) {
PADDLE_ENFORCE_NOT_NULL(tensor);
tensor->tensor.shape.assign(shape, shape + size); tensor->tensor.shape.assign(shape, shape + size);
} }
const char* PD_GetPaddleTensorName(const PD_Tensor* tensor) { const char* PD_GetPaddleTensorName(const PD_Tensor* tensor) {
PADDLE_ENFORCE_NOT_NULL(tensor);
return tensor->tensor.name.c_str(); return tensor->tensor.name.c_str();
} }
PD_DataType PD_GetPaddleTensorDType(const PD_Tensor* tensor) { PD_DataType PD_GetPaddleTensorDType(const PD_Tensor* tensor) {
PADDLE_ENFORCE_NOT_NULL(tensor);
return ConvertToPDDataType(tensor->tensor.dtype); return ConvertToPDDataType(tensor->tensor.dtype);
} }
PD_PaddleBuf* PD_GetPaddleTensorData(const PD_Tensor* tensor) { PD_PaddleBuf* PD_GetPaddleTensorData(const PD_Tensor* tensor) {
PADDLE_ENFORCE_NOT_NULL(tensor);
PD_PaddleBuf* ret = PD_NewPaddleBuf(); PD_PaddleBuf* ret = PD_NewPaddleBuf();
ret->buf = tensor->tensor.data; ret->buf = tensor->tensor.data;
return ret; return ret;
} }
int* PD_GetPaddleTensorShape(const PD_Tensor* tensor, int** size) { int* PD_GetPaddleTensorShape(const PD_Tensor* tensor, int** size) {
PADDLE_ENFORCE_NOT_NULL(tensor);
std::vector<int> shape = tensor->tensor.shape; std::vector<int> shape = tensor->tensor.shape;
int s = shape.size(); int s = shape.size();
*size = &s; *size = &s;
......
...@@ -92,7 +92,10 @@ void zero_copy_run() { ...@@ -92,7 +92,10 @@ void zero_copy_run() {
inputs[1].shape = label_shape; inputs[1].shape = label_shape;
inputs[1].shape_size = label_shape_size; inputs[1].shape_size = label_shape_size;
PD_PredictorZeroCopyRun(config, inputs, in_size, outputs, &out_size); PD_PredictorZeroCopyRun(config, inputs, in_size, &outputs, &out_size);
LOG(INFO) << outputs[0].name;
LOG(INFO) << outputs[0].shape_size;
} }
TEST(PD_ZeroCopyRun, zero_copy_run) { TEST(PD_ZeroCopyRun, zero_copy_run) {
......
...@@ -74,7 +74,7 @@ void zero_copy_run() { ...@@ -74,7 +74,7 @@ void zero_copy_run() {
inputs->shape = shape; inputs->shape = shape;
inputs->shape_size = shape_size; inputs->shape_size = shape_size;
PD_PredictorZeroCopyRun(config, inputs, in_size, outputs, &out_size); PD_PredictorZeroCopyRun(config, inputs, in_size, &outputs, &out_size);
} }
TEST(PD_ZeroCopyRun, zero_copy_run) { zero_copy_run<float>(); } TEST(PD_ZeroCopyRun, zero_copy_run) { zero_copy_run<float>(); }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册