diff --git a/deploy/android_demo/app/src/main/cpp/ppredictor.cpp b/deploy/android_demo/app/src/main/cpp/ppredictor.cpp index acea0a3fd53a88ab95d4301d227e44fc5873a34d..a40fe5e1b289d1c7646ded302f3faba179b1ac2e 100644 --- a/deploy/android_demo/app/src/main/cpp/ppredictor.cpp +++ b/deploy/android_demo/app/src/main/cpp/ppredictor.cpp @@ -42,17 +42,17 @@ template int PPredictor::_init(ConfigT &config) { // CL_PRECISION_FP32: 1, force fp32 // CL_PRECISION_FP16: 2, force fp16 config.set_opencl_precision(paddle::lite_api::CL_PRECISION_FP32); - LOGI("device: running on gpu."); + LOGI("ocr cpp device: running on gpu."); } } else { - LOGI("device: running on cpu."); + LOGI("ocr cpp device: running on cpu."); // you can give backup cpu nb model instead // config.set_model_from_file(cpu_nb_model_dir); } config.set_threads(_thread_num); config.set_power_mode(_mode); _predictor = paddle::lite_api::CreatePaddlePredictor(config); - LOGI("paddle instance created"); + LOGI("ocr cpp paddle instance created"); return RETURN_OK; } @@ -73,18 +73,18 @@ std::vector PPredictor::get_inputs(int num) { PredictorInput PPredictor::get_first_input() { return get_input(0); } std::vector PPredictor::infer() { - LOGI("infer Run start %d", _net_flag); + LOGI("ocr cpp infer Run start %d", _net_flag); std::vector results; if (!_is_input_get) { return results; } _predictor->Run(); - LOGI("infer Run end"); + LOGI("ocr cpp infer Run end"); for (int i = 0; i < _predictor->GetOutputNames().size(); i++) { std::unique_ptr output_tensor = _predictor->GetOutput(i); - LOGI("output tensor[%d] size %ld", i, product(output_tensor->shape())); + LOGI("ocr cpp output tensor[%d] size %ld", i, product(output_tensor->shape())); PredictorOutput result{std::move(output_tensor), i, _net_flag}; results.emplace_back(std::move(result)); }