未验证 提交 d39777fe 编写于 作者: L liu zhengxi 提交者: GitHub

alter the capi of PD_PredictorRun to provide proper function, test=develop (#20697)

modify the way to pass parameter out_size in function. 
上级 4eeda9d6
......@@ -99,8 +99,8 @@ PADDLE_CAPI_EXPORT extern int* PD_GetPaddleTensorShape(const PD_Tensor* tensor,
// AnalysisPredictor
PADDLE_CAPI_EXPORT extern bool PD_PredictorRun(const PD_AnalysisConfig* config,
PD_Tensor* inputs, int in_size,
PD_Tensor* output_data,
int** out_size, int batch_size);
PD_Tensor** output_data,
int* out_size, int batch_size);
PADDLE_CAPI_EXPORT extern bool PD_PredictorZeroCopyRun(
const PD_AnalysisConfig* config, PD_ZeroCopyData* inputs, int in_size,
......
......@@ -26,7 +26,7 @@ using paddle::ConvertToACPrecision;
extern "C" {
bool PD_PredictorRun(const PD_AnalysisConfig* config, PD_Tensor* inputs,
int in_size, PD_Tensor* output_data, int** out_size,
int in_size, PD_Tensor** output_data, int* out_size,
int batch_size) {
PADDLE_ENFORCE_NOT_NULL(config);
static std::map<std::string, std::unique_ptr<paddle::PaddlePredictor>>
......@@ -43,10 +43,11 @@ bool PD_PredictorRun(const PD_AnalysisConfig* config, PD_Tensor* inputs,
std::vector<paddle::PaddleTensor> out;
if (predictor->Run(in, &out, batch_size)) {
int osize = out.size();
*output_data = new PD_Tensor[osize];
for (int i = 0; i < osize; ++i) {
output_data[i].tensor = out[i];
output_data[i]->tensor = out[i];
}
*out_size = &osize;
*out_size = osize;
return true;
}
return false;
......
......@@ -21,6 +21,7 @@ limitations under the License. */
#include <string>
#include <vector>
#include "paddle/fluid/inference/capi/c_api.h"
#include "paddle/fluid/inference/capi/c_api_internal.h"
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace paddle {
......@@ -56,16 +57,15 @@ void PD_run() {
PD_SetPaddleTensorData(input, buf);
PD_Tensor* out_data = PD_NewPaddleTensor();
int* out_size;
PD_PredictorRun(config, input, 1, out_data, &out_size, 1);
LOG(INFO) << *out_size;
int out_size;
PD_PredictorRun(config, input, 1, &out_data, &out_size, 1);
LOG(INFO) << out_size;
LOG(INFO) << PD_GetPaddleTensorName(out_data);
LOG(INFO) << PD_GetPaddleTensorDType(out_data);
PD_PaddleBuf* b = PD_GetPaddleTensorData(out_data);
LOG(INFO) << PD_PaddleBufLength(b);
LOG(INFO) << PD_PaddleBufLength(b) / sizeof(float);
float* result = static_cast<float*>(PD_PaddleBufData(b));
LOG(INFO) << *result;
PD_PaddleBufResize(b, 500);
PD_DeletePaddleTensor(input);
int* size;
PD_GetPaddleTensorShape(out_data, &size);
......@@ -132,16 +132,15 @@ void buffer_run() {
PD_SetPaddleTensorData(input, buf);
PD_Tensor* out_data = PD_NewPaddleTensor();
int* out_size;
PD_PredictorRun(config, input, 1, out_data, &out_size, 1);
LOG(INFO) << *out_size;
int out_size;
PD_PredictorRun(config, input, 1, &out_data, &out_size, 1);
LOG(INFO) << out_size;
LOG(INFO) << PD_GetPaddleTensorName(out_data);
LOG(INFO) << PD_GetPaddleTensorDType(out_data);
PD_PaddleBuf* b = PD_GetPaddleTensorData(out_data);
LOG(INFO) << PD_PaddleBufLength(b);
LOG(INFO) << PD_PaddleBufLength(b) / sizeof(float);
float* result = static_cast<float*>(PD_PaddleBufData(b));
LOG(INFO) << *result;
PD_PaddleBufResize(b, 500);
PD_DeletePaddleTensor(input);
PD_DeletePaddleBuf(buf);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册