From 25c42d4f429847ae610f6254df6dd48f3cf465b9 Mon Sep 17 00:00:00 2001 From: TianXiaogang Date: Mon, 23 Sep 2019 10:40:50 +0800 Subject: [PATCH] review x2paddle and update fpga mds (#2121) * fix: fix fpga_doc * fix: doc review * fix: fix develop/fpga md * fix: update fpga.md * fix: fix fpga doc --- _all_pages/v2.0.0-beta1/fpga.md | 54 +++++++++++++++++++---------- _all_pages/v2.0.0-beta1/x2paddle.md | 2 +- 2 files changed, 37 insertions(+), 19 deletions(-) diff --git a/_all_pages/v2.0.0-beta1/fpga.md b/_all_pages/v2.0.0-beta1/fpga.md index 05897508c5..24399a8145 100644 --- a/_all_pages/v2.0.0-beta1/fpga.md +++ b/_all_pages/v2.0.0-beta1/fpga.md @@ -16,6 +16,7 @@ Lite支持fpga作为后端硬件进行模型推理,其主要特性如下: - 对于fpga暂不支持的kernel,均会切回arm端运行,实现arm+fpga混合布署运行 - 目前fpga成本功耗都较低,Lite基于fpga的模型性能远远好于arm端,可作为边缘设备首选硬件 + # 编译 需要提前准备带有fpgadrv.ko的fpga开发板(如edgeboard开发板)和Lite代码 @@ -88,23 +89,40 @@ chmod +x test_resnet50_fpga 代码示例: ```cpp -lite::Predictor predictor; -std::vector valid_places( - {Place{TARGET(kFPGA), PRECISION(kFP16), DATALAYOUT(kNHWC)}, - Place{TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kNHWC)}}); -Place preferred_place = Place{TARGET(kFPGA), PRECISION(kFP16), DATALAYOUT(kNHWC)}; - -predictor.Build(model_dir, preferred_place, valid_places); - -auto* input_tensor = predictor.GetInput(0); -input_tensor->Resize(DDim(std::vector({1, 3, 224, 224}))); -auto* data = input_tensor->mutable_data(); -auto item_size = input_tensor->dims().production(); -//假设设置输入数据全为1 -for (int i = 0; i < item_size; i++) { - data[i] = 1; +#include "paddle_api.h" +#include "paddle_use_kernels.h" +#include "paddle_use_ops.h" +#include "paddle_use_passes.h" +using namespace paddle::lite_api; + +std::vector valid_places({Place{TARGET(kFPGA), PRECISION(kFP16), DATALAYOUT(kNHWC)}, +Place{TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kNCHW)}}); +std::string model_dir = "my_model"; +std::string model_file = model_dir + "/model"; +std::string params_file = model_dir + "/params"; + +// 1. Set CxxConfig +CxxConfig config; +config.set_model_dir(model_dir); +config.set_model_file(model_file); +config.set_param_file(params_file); +config.set_preferred_place(Place{TARGET(kFPGA), PRECISION(kFP16), DATALAYOUT(kNHWC)}); +config.set_valid_places(valid_places); + +// 2. Create PaddlePredictor by CxxConfig +predictor = CreatePaddlePredictor(config); + +// 3. Set input data +std::unique_ptr input_tensor(std::move(predictor->GetInput(0))); +input_tensor->Resize(shape_t({1, 3, 224, 224})); +auto* input = input_tensor->mutable_data(); +read_image(value, input); + +// 4. Run predictor +for (int i = 0;i < 2; i++) { + predictor->Run(); } - -predictor.Run(); -auto* out = predictor.GetOutput(0); +// 5. Get output +std::unique_ptr output_tensor; +std::move(predictor->GetOutput(0))); ``` diff --git a/_all_pages/v2.0.0-beta1/x2paddle.md b/_all_pages/v2.0.0-beta1/x2paddle.md index 6b62dce809..7fee03c801 100644 --- a/_all_pages/v2.0.0-beta1/x2paddle.md +++ b/_all_pages/v2.0.0-beta1/x2paddle.md @@ -44,4 +44,4 @@ x2paddle --framework tensorflow \ ## 问题反馈 -X2Paddle使用时存在问题时,欢迎您将问题或Bug报告以[Github Issues](https://github.com/PaddlePaddle/X2Paddle/issues)的形式提交给我们,我们会实时跟进。 +X2Paddle使用存在问题时,欢迎您将问题或Bug报告以[Github Issues](https://github.com/PaddlePaddle/X2Paddle/issues)的形式提交给我们,我们会实时跟进。 -- GitLab