diff --git a/paddle/fluid/inference/capi/c_api.h b/paddle/fluid/inference/capi/c_api.h index 13336cbd19cb453fae9226bc9db2bcf3e2779c3d..b5ef410aadabe46c856261e5b8b9cee1c661b2df 100644 --- a/paddle/fluid/inference/capi/c_api.h +++ b/paddle/fluid/inference/capi/c_api.h @@ -99,8 +99,8 @@ PADDLE_CAPI_EXPORT extern int* PD_GetPaddleTensorShape(const PD_Tensor* tensor, // AnalysisPredictor PADDLE_CAPI_EXPORT extern bool PD_PredictorRun(const PD_AnalysisConfig* config, PD_Tensor* inputs, int in_size, - PD_Tensor* output_data, - int** out_size, int batch_size); + PD_Tensor** output_data, + int* out_size, int batch_size); PADDLE_CAPI_EXPORT extern bool PD_PredictorZeroCopyRun( const PD_AnalysisConfig* config, PD_ZeroCopyData* inputs, int in_size, diff --git a/paddle/fluid/inference/capi/pd_predictor.cc b/paddle/fluid/inference/capi/pd_predictor.cc index bb750524801b50d75473469d93f45e21a5cff6f2..51f8237c95afa6ab8bc151f85401944799da9e3a 100644 --- a/paddle/fluid/inference/capi/pd_predictor.cc +++ b/paddle/fluid/inference/capi/pd_predictor.cc @@ -26,7 +26,7 @@ using paddle::ConvertToACPrecision; extern "C" { bool PD_PredictorRun(const PD_AnalysisConfig* config, PD_Tensor* inputs, - int in_size, PD_Tensor* output_data, int** out_size, + int in_size, PD_Tensor** output_data, int* out_size, int batch_size) { PADDLE_ENFORCE_NOT_NULL(config); static std::map> @@ -43,10 +43,11 @@ bool PD_PredictorRun(const PD_AnalysisConfig* config, PD_Tensor* inputs, std::vector out; if (predictor->Run(in, &out, batch_size)) { int osize = out.size(); + *output_data = new PD_Tensor[osize]; for (int i = 0; i < osize; ++i) { - output_data[i].tensor = out[i]; + output_data[i]->tensor = out[i]; } - *out_size = &osize; + *out_size = osize; return true; } return false; diff --git a/paddle/fluid/inference/tests/api/analyzer_capi_pd_tensor_tester.cc b/paddle/fluid/inference/tests/api/analyzer_capi_pd_tensor_tester.cc index a94e0b8ebd48373081d6f6df642ea9cc3de2e987..fcb73c8ca023024b6b9912c0d6149cad0a708606 100644 --- a/paddle/fluid/inference/tests/api/analyzer_capi_pd_tensor_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_capi_pd_tensor_tester.cc @@ -21,6 +21,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/inference/capi/c_api.h" +#include "paddle/fluid/inference/capi/c_api_internal.h" #include "paddle/fluid/inference/tests/api/tester_helper.h" namespace paddle { @@ -56,16 +57,15 @@ void PD_run() { PD_SetPaddleTensorData(input, buf); PD_Tensor* out_data = PD_NewPaddleTensor(); - int* out_size; - PD_PredictorRun(config, input, 1, out_data, &out_size, 1); - LOG(INFO) << *out_size; + int out_size; + PD_PredictorRun(config, input, 1, &out_data, &out_size, 1); + LOG(INFO) << out_size; LOG(INFO) << PD_GetPaddleTensorName(out_data); LOG(INFO) << PD_GetPaddleTensorDType(out_data); PD_PaddleBuf* b = PD_GetPaddleTensorData(out_data); - LOG(INFO) << PD_PaddleBufLength(b); + LOG(INFO) << PD_PaddleBufLength(b) / sizeof(float); float* result = static_cast(PD_PaddleBufData(b)); LOG(INFO) << *result; - PD_PaddleBufResize(b, 500); PD_DeletePaddleTensor(input); int* size; PD_GetPaddleTensorShape(out_data, &size); @@ -132,16 +132,15 @@ void buffer_run() { PD_SetPaddleTensorData(input, buf); PD_Tensor* out_data = PD_NewPaddleTensor(); - int* out_size; - PD_PredictorRun(config, input, 1, out_data, &out_size, 1); - LOG(INFO) << *out_size; + int out_size; + PD_PredictorRun(config, input, 1, &out_data, &out_size, 1); + LOG(INFO) << out_size; LOG(INFO) << PD_GetPaddleTensorName(out_data); LOG(INFO) << PD_GetPaddleTensorDType(out_data); PD_PaddleBuf* b = PD_GetPaddleTensorData(out_data); - LOG(INFO) << PD_PaddleBufLength(b); + LOG(INFO) << PD_PaddleBufLength(b) / sizeof(float); float* result = static_cast(PD_PaddleBufData(b)); LOG(INFO) << *result; - PD_PaddleBufResize(b, 500); PD_DeletePaddleTensor(input); PD_DeletePaddleBuf(buf); }