未验证 提交 01e22c4d 编写于 作者: S sangoly 提交者: GitHub

Remove prefered_place document (#2205)

上级 a062d73c
...@@ -209,9 +209,11 @@ void RunModel() { ...@@ -209,9 +209,11 @@ void RunModel() {
// 1. Set CxxConfig // 1. Set CxxConfig
CxxConfig config; CxxConfig config;
config.set_model_dir(FLAGS_model_dir); config.set_model_dir(FLAGS_model_dir);
std::vector<Place> valid_places{Place{TARGET(kARM), PRECISION(kFloat)}, std::vector<Place> valid_places({Place{TARGET(kARM), PRECISION(kFloat)}});
Place{TARGET(kHost), PRECISION(kFloat)}}; if (FLAGS_prefer_int8_kernel) {
config.set_preferred_place(Place{TARGET(kARM), PRECISION(kFloat)}); valid_places.insert(valid_places.begin(),
Place{TARGET(kARM), PRECISION(kInt8)});
}
config.set_valid_places(valid_places); config.set_valid_places(valid_places);
// 2. Create PaddlePredictor by CxxConfig // 2. Create PaddlePredictor by CxxConfig
......
...@@ -83,18 +83,16 @@ chmod +x test_resnet50_fpga ...@@ -83,18 +83,16 @@ chmod +x test_resnet50_fpga
在Lite中使用fpga与ARM相似,具体的区别如下: 在Lite中使用fpga与ARM相似,具体的区别如下:
- 由于fpga运行模式为fp16精度、nhwc布局,所以需要修改相应的`valid_place``preferred_place` - 由于fpga运行模式为fp16精度、nhwc布局,所以需要修改相应的`valid_place`
- fpga不需要device的初始化和运行模式设置 - fpga不需要device的初始化和运行模式设置
代码示例: 代码示例:
```cpp ```cpp
lite::Predictor predictor; lite::Predictor predictor;
std::vector<Place> valid_places( std::vector<Place> valid_places(
{Place{TARGET(kFPGA), PRECISION(kFP16), DATALAYOUT(kNHWC)}, {Place{TARGET(kFPGA), PRECISION(kFP16), DATALAYOUT(kNHWC)}});
Place{TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kNHWC)}});
Place preferred_place = Place{TARGET(kFPGA), PRECISION(kFP16), DATALAYOUT(kNHWC)};
predictor.Build(model_dir, preferred_place, valid_places); predictor.Build(model_dir, "", "", valid_places);
auto* input_tensor = predictor.GetInput(0); auto* input_tensor = predictor.GetInput(0);
input_tensor->Resize(DDim(std::vector<DDim::value_type>({1, 3, 224, 224}))); input_tensor->Resize(DDim(std::vector<DDim::value_type>({1, 3, 224, 224})));
......
...@@ -92,15 +92,14 @@ Note: ...@@ -92,15 +92,14 @@ Note:
代码示例: 代码示例:
```cpp ```cpp
std::vector<Place> valid_places({Place{TARGET(kHost), PRECISION(kFloat)},
Place{TARGET(kARM), PRECISION(kFloat)}});
// if want to use NPU // if want to use NPU
valid_places.push_back(Place{TARGET(kNPU), PRECISION(kFloat)}); std::vector<Place> valid_places({Place{TARGET(kNPU), PRECISION(kFloat)},
Place{TARGET(kARM), PRECISION(kFloat)}});
DeviceInfo::Init(); DeviceInfo::Init();
DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, FLAGS_threads); DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, FLAGS_threads);
lite::Predictor predictor; lite::Predictor predictor;
predictor.Build(model_dir, preferred_place, valid_places); predictor.Build(model_dir, "", "", valid_places);
auto* input_tensor = predictor.GetInput(0); auto* input_tensor = predictor.GetInput(0);
input_tensor->Resize(DDim(std::vector<DDim::value_type>({1, 3, 224, 224}))); input_tensor->Resize(DDim(std::vector<DDim::value_type>({1, 3, 224, 224})));
......
...@@ -86,16 +86,14 @@ Lite支持对ARM CPU和ARM GPU的混调执行,具体描述如下: ...@@ -86,16 +86,14 @@ Lite支持对ARM CPU和ARM GPU的混调执行,具体描述如下:
DeviceInfo::Init(); DeviceInfo::Init();
DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, FLAGS_threads); DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, FLAGS_threads);
lite::Predictor predictor; lite::Predictor predictor;
// 设置Lite推断执行的有效Places为{kHost, kARM, kOpenCL} // 设置Lite推断执行的有效Places为{kOpenCL, kARM}
std::vector<Place> valid_places({ std::vector<Place> valid_places({
Place{TARGET(kHost), PRECISION(kFloat)}, Place({TARGET(kOpenCL), PRECISION(kFloat)}),
Place{TARGET(kARM), PRECISION(kFloat)}, Place({TARGET(kARM), PRECISION(kFloat)})
Place{TARGET(kOpenCL), PRECISION(kFloat)},
}); });
// 设置Lite推断执行的偏好Place为kOpenCL
auto preferred_place = Place({TARGET(kOpenCL), PRECISION(kFloat)});
// 根据有效Places和偏好Place构建模型 // 根据有效Places和偏好Place构建模型
predictor.Build(model_dir, preferred_place, valid_places); predictor.Build(model_dir, "", "", valid_places);
// 设置模型的输入 // 设置模型的输入
auto* input_tensor = predictor.GetInput(0); auto* input_tensor = predictor.GetInput(0);
input_tensor->Resize(DDim(std::vector<DDim::value_type>({1, 3, 224, 224}))); input_tensor->Resize(DDim(std::vector<DDim::value_type>({1, 3, 224, 224})));
...@@ -107,4 +105,4 @@ for (int i = 0; i < item_size; i++) { ...@@ -107,4 +105,4 @@ for (int i = 0; i < item_size; i++) {
// 执行模型推断并获取模型的预测结果 // 执行模型推断并获取模型的预测结果
predictor.Run(); predictor.Run();
auto* out = predictor.GetOutput(0); auto* out = predictor.GetOutput(0);
``` ```
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册