提交 33a58e58 编写于 作者: L liu zhengxi 提交者: flame

[cherry-pick] c api update in PD_PredictorRun (#20705)

* improve the performance of capi in PD_PredictorRun (#20665)

* alter the capi of PD_PredictorRun to provide proper function, test=release/1.6
上级 2099618d
......@@ -99,8 +99,8 @@ PADDLE_CAPI_EXPORT extern int* PD_GetPaddleTensorShape(const PD_Tensor* tensor,
// AnalysisPredictor
PADDLE_CAPI_EXPORT extern bool PD_PredictorRun(const PD_AnalysisConfig* config,
PD_Tensor* inputs, int in_size,
PD_Tensor* output_data,
int** out_size, int batch_size);
PD_Tensor** output_data,
int* out_size, int batch_size);
PADDLE_CAPI_EXPORT extern bool PD_PredictorZeroCopyRun(
const PD_AnalysisConfig* config, PD_ZeroCopyData* inputs, int in_size,
......
......@@ -26,10 +26,16 @@ using paddle::ConvertToACPrecision;
extern "C" {
bool PD_PredictorRun(const PD_AnalysisConfig* config, PD_Tensor* inputs,
int in_size, PD_Tensor* output_data, int** out_size,
int in_size, PD_Tensor** output_data, int* out_size,
int batch_size) {
PADDLE_ENFORCE_NOT_NULL(config);
auto predictor = paddle::CreatePaddlePredictor(config->config);
static std::map<std::string, std::unique_ptr<paddle::PaddlePredictor>>
predictors;
if (!predictors.count(config->config.model_dir())) {
predictors[config->config.model_dir()] =
paddle::CreatePaddlePredictor(config->config);
}
auto& predictor = predictors[config->config.model_dir()];
std::vector<paddle::PaddleTensor> in;
for (int i = 0; i < in_size; ++i) {
in.emplace_back(inputs->tensor);
......@@ -37,10 +43,11 @@ bool PD_PredictorRun(const PD_AnalysisConfig* config, PD_Tensor* inputs,
std::vector<paddle::PaddleTensor> out;
if (predictor->Run(in, &out, batch_size)) {
int osize = out.size();
*output_data = new PD_Tensor[osize];
for (int i = 0; i < osize; ++i) {
output_data[i].tensor = out[i];
output_data[i]->tensor = out[i];
}
*out_size = &osize;
*out_size = osize;
return true;
}
return false;
......
......@@ -21,6 +21,7 @@ limitations under the License. */
#include <string>
#include <vector>
#include "paddle/fluid/inference/capi/c_api.h"
#include "paddle/fluid/inference/capi/c_api_internal.h"
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace paddle {
......@@ -56,16 +57,15 @@ void PD_run() {
PD_SetPaddleTensorData(input, buf);
PD_Tensor* out_data = PD_NewPaddleTensor();
int* out_size;
PD_PredictorRun(config, input, 1, out_data, &out_size, 1);
LOG(INFO) << *out_size;
int out_size;
PD_PredictorRun(config, input, 1, &out_data, &out_size, 1);
LOG(INFO) << out_size;
LOG(INFO) << PD_GetPaddleTensorName(out_data);
LOG(INFO) << PD_GetPaddleTensorDType(out_data);
PD_PaddleBuf* b = PD_GetPaddleTensorData(out_data);
LOG(INFO) << PD_PaddleBufLength(b);
LOG(INFO) << PD_PaddleBufLength(b) / sizeof(float);
float* result = static_cast<float*>(PD_PaddleBufData(b));
LOG(INFO) << *result;
PD_PaddleBufResize(b, 500);
PD_DeletePaddleTensor(input);
int* size;
PD_GetPaddleTensorShape(out_data, &size);
......@@ -132,16 +132,15 @@ void buffer_run() {
PD_SetPaddleTensorData(input, buf);
PD_Tensor* out_data = PD_NewPaddleTensor();
int* out_size;
PD_PredictorRun(config, input, 1, out_data, &out_size, 1);
LOG(INFO) << *out_size;
int out_size;
PD_PredictorRun(config, input, 1, &out_data, &out_size, 1);
LOG(INFO) << out_size;
LOG(INFO) << PD_GetPaddleTensorName(out_data);
LOG(INFO) << PD_GetPaddleTensorDType(out_data);
PD_PaddleBuf* b = PD_GetPaddleTensorData(out_data);
LOG(INFO) << PD_PaddleBufLength(b);
LOG(INFO) << PD_PaddleBufLength(b) / sizeof(float);
float* result = static_cast<float*>(PD_PaddleBufData(b));
LOG(INFO) << *result;
PD_PaddleBufResize(b, 500);
PD_DeletePaddleTensor(input);
PD_DeletePaddleBuf(buf);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册