c++ 和 python 结果个数不同的疑问
Created by: BryceLuminary
我使用 c++ 和 python 对同一个模型进行预测,c++ api 遇到了2个疑问: 1、请问 PaddleBuf 接口第二个参数的单位是字节吗? 2、为什么同一个模型两种接口预测的输出个数不同?
代码如下,谢谢!
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <memory>
#include <thread> //NOLINT
#include "paddle/fluid/inference/paddle_inference_api.h"
#include "paddle/fluid/platform/enforce.h"
DEFINE_string(dirname, "", "Directory of the inference model.");
DEFINE_bool(use_gpu, false, "Whether use gpu.");
#define N 1
#define C 3
#define H 225
#define W 225
#define IN_SIZE (N * C * H * W)
namespace paddle {
namespace demo {
void Main(bool use_gpu) {
//# 1. Create PaddlePredictor with a config.
NativeConfig config;
config.prog_file = "/home/fluid_models/model";
config.param_file = "/home/fluid_models/params";
config.use_gpu = use_gpu;
config.fraction_of_gpu_memory = 0.15;
config.device = 0;
auto predictor =
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
//# 2. Prepare input.
float data[IN_SIZE];
for (int i = 0; i < IN_SIZE; i++) {
data[i] = 1.f;
}
std::cout << "size of data: " << sizeof(data) << std::endl;
PaddleTensor tensor;
tensor.shape = std::vector<int>({N, C, H, W});
tensor.data = PaddleBuf(data, IN_SIZE); // Question 1: Should IN_SIZE be count of bytes?
tensor.dtype = PaddleDType::FLOAT32;
// For simplicity, we set all the slots with the same data.
std::vector<PaddleTensor> slots(1, tensor);
//# 3. Run
std::vector<PaddleTensor> outputs;
CHECK(predictor->Run(slots, &outputs));
//# 4. Get output.
std::cout << "num of outputs: " << outputs.size() << std::endl; // Question 2: The output of c++ is different from python
for (int i = 0; i < outputs.size(); i++) {
std::cout << "output " << i << " name " << outputs[i].name << std::endl;
std::cout << "output " << i << " size " << outputs[i].data.length() << std::endl;
std::cout << "output " << i << " dtype " << outputs[i].dtype << std::endl;
}
// The outputs' buffers are in CPU memory.
for (size_t i = 0; i < outputs[0].data.length() / 4; i++) {
std::cout << static_cast<float*>(outputs[0].data.data())[i] << std::endl;
}
}
} // namespace demo
} // namespace paddle
int main(int argc, char** argv) {
google::ParseCommandLineFlags(&argc, &argv, true);
paddle::demo::Main(true /* use_gpu*/);
return 0;
}