提交 dc4f1c21 编写于 作者: W WenmuZhou

add ocr cpp pferx to logi

上级 aedddd81
...@@ -42,17 +42,17 @@ template <typename ConfigT> int PPredictor::_init(ConfigT &config) { ...@@ -42,17 +42,17 @@ template <typename ConfigT> int PPredictor::_init(ConfigT &config) {
// CL_PRECISION_FP32: 1, force fp32 // CL_PRECISION_FP32: 1, force fp32
// CL_PRECISION_FP16: 2, force fp16 // CL_PRECISION_FP16: 2, force fp16
config.set_opencl_precision(paddle::lite_api::CL_PRECISION_FP32); config.set_opencl_precision(paddle::lite_api::CL_PRECISION_FP32);
LOGI("device: running on gpu."); LOGI("ocr cpp device: running on gpu.");
} }
} else { } else {
LOGI("device: running on cpu."); LOGI("ocr cpp device: running on cpu.");
// you can give backup cpu nb model instead // you can give backup cpu nb model instead
// config.set_model_from_file(cpu_nb_model_dir); // config.set_model_from_file(cpu_nb_model_dir);
} }
config.set_threads(_thread_num); config.set_threads(_thread_num);
config.set_power_mode(_mode); config.set_power_mode(_mode);
_predictor = paddle::lite_api::CreatePaddlePredictor(config); _predictor = paddle::lite_api::CreatePaddlePredictor(config);
LOGI("paddle instance created"); LOGI("ocr cpp paddle instance created");
return RETURN_OK; return RETURN_OK;
} }
...@@ -73,18 +73,18 @@ std::vector<PredictorInput> PPredictor::get_inputs(int num) { ...@@ -73,18 +73,18 @@ std::vector<PredictorInput> PPredictor::get_inputs(int num) {
PredictorInput PPredictor::get_first_input() { return get_input(0); } PredictorInput PPredictor::get_first_input() { return get_input(0); }
std::vector<PredictorOutput> PPredictor::infer() { std::vector<PredictorOutput> PPredictor::infer() {
LOGI("infer Run start %d", _net_flag); LOGI("ocr cpp infer Run start %d", _net_flag);
std::vector<PredictorOutput> results; std::vector<PredictorOutput> results;
if (!_is_input_get) { if (!_is_input_get) {
return results; return results;
} }
_predictor->Run(); _predictor->Run();
LOGI("infer Run end"); LOGI("ocr cpp infer Run end");
for (int i = 0; i < _predictor->GetOutputNames().size(); i++) { for (int i = 0; i < _predictor->GetOutputNames().size(); i++) {
std::unique_ptr<const paddle::lite_api::Tensor> output_tensor = std::unique_ptr<const paddle::lite_api::Tensor> output_tensor =
_predictor->GetOutput(i); _predictor->GetOutput(i);
LOGI("output tensor[%d] size %ld", i, product(output_tensor->shape())); LOGI("ocr cpp output tensor[%d] size %ld", i, product(output_tensor->shape()));
PredictorOutput result{std::move(output_tensor), i, _net_flag}; PredictorOutput result{std::move(output_tensor), i, _net_flag};
results.emplace_back(std::move(result)); results.emplace_back(std::move(result));
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册