提交 c3785440 编写于 作者: A authorfu

注释改为英语的

上级 c3c417f5
...@@ -30,7 +30,7 @@ Java_com_baidu_paddle_lite_demo_ocr_OCRPredictorNative_init(JNIEnv *env, jobject ...@@ -30,7 +30,7 @@ Java_com_baidu_paddle_lite_demo_ocr_OCRPredictorNative_init(JNIEnv *env, jobject
} }
/** /**
* "LITE_POWER_HIGH" 转为 paddle::lite_api::LITE_POWER_HIGH * "LITE_POWER_HIGH" convert to paddle::lite_api::LITE_POWER_HIGH
* @param cpu_mode * @param cpu_mode
* @return * @return
*/ */
......
...@@ -37,7 +37,7 @@ int OCR_PPredictor::init_from_file(const std::string &det_model_path, const std: ...@@ -37,7 +37,7 @@ int OCR_PPredictor::init_from_file(const std::string &det_model_path, const std:
return RETURN_OK; return RETURN_OK;
} }
/** /**
* 调试用,保存第一步的框选结果 * for debug use, show result of First Step
* @param filter_boxes * @param filter_boxes
* @param boxes * @param boxes
* @param srcimg * @param srcimg
......
...@@ -12,26 +12,26 @@ ...@@ -12,26 +12,26 @@
namespace ppredictor { namespace ppredictor {
/** /**
* 配置 * Config
*/ */
struct OCR_Config { struct OCR_Config {
int thread_num = 4; // 线程数 int thread_num = 4; // Thread num
paddle::lite_api::PowerMode mode = paddle::lite_api::LITE_POWER_HIGH; // PaddleLite Mode paddle::lite_api::PowerMode mode = paddle::lite_api::LITE_POWER_HIGH; // PaddleLite Mode
}; };
/** /**
* 一个四边形内图片的推理结果, * PolyGone Result
*/ */
struct OCRPredictResult { struct OCRPredictResult {
std::vector<int> word_index; // std::vector<int> word_index;
std::vector<std::vector<int>> points; std::vector<std::vector<int>> points;
float score; float score;
}; };
/** /**
* OCR 一共有2个模型进行推理, * OCR there are 2 models
* 1. 使用第一个模型(det),框选出多个四边形 * 1. First model(det),select polygones to show where are the texts
* 2. 从原图从抠出这些多边形,使用第二个模型(rec),获取文本 * 2. crop from the origin images, use these polygones to infer
*/ */
class OCR_PPredictor : public PPredictor_Interface { class OCR_PPredictor : public PPredictor_Interface {
public: public:
...@@ -50,7 +50,7 @@ public: ...@@ -50,7 +50,7 @@ public:
int init(const std::string &det_model_content, const std::string &rec_model_content); int init(const std::string &det_model_content, const std::string &rec_model_content);
int init_from_file(const std::string &det_model_path, const std::string &rec_model_path); int init_from_file(const std::string &det_model_path, const std::string &rec_model_path);
/** /**
* 返回OCR结果 * Return OCR result
* @param dims * @param dims
* @param input_data * @param input_data
* @param input_len * @param input_len
...@@ -69,7 +69,7 @@ public: ...@@ -69,7 +69,7 @@ public:
private: private:
/** /**
* 从第一个模型的结果中计算有文字的四边形 * calcul Polygone from the result image of first model
* @param pred * @param pred
* @param output_height * @param output_height
* @param output_width * @param output_width
...@@ -81,7 +81,7 @@ private: ...@@ -81,7 +81,7 @@ private:
const cv::Mat &origin); const cv::Mat &origin);
/** /**
* 第二个模型的推理 * infer for second model
* *
* @param boxes * @param boxes
* @param origin * @param origin
...@@ -91,14 +91,14 @@ private: ...@@ -91,14 +91,14 @@ private:
infer_rec(const std::vector<std::vector<std::vector<int>>> &boxes, const cv::Mat &origin); infer_rec(const std::vector<std::vector<std::vector<int>>> &boxes, const cv::Mat &origin);
/** /**
* 第二个模型提取文字的后处理 * Postprocess or sencod model to extract text
* @param res * @param res
* @return * @return
*/ */
std::vector<int> postprocess_rec_word_index(const PredictorOutput &res); std::vector<int> postprocess_rec_word_index(const PredictorOutput &res);
/** /**
* 计算第二个模型的文字的置信度 * calculate confidence of second model text result
* @param res * @param res
* @return * @return
*/ */
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
namespace ppredictor { namespace ppredictor {
/** /**
* PaddleLite Preditor 通用接口 * PaddleLite Preditor Common Interface
*/ */
class PPredictor_Interface { class PPredictor_Interface {
public: public:
...@@ -21,7 +21,7 @@ public: ...@@ -21,7 +21,7 @@ public:
}; };
/** /**
* 通用推理 * Common Predictor
*/ */
class PPredictor : public PPredictor_Interface { class PPredictor : public PPredictor_Interface {
public: public:
...@@ -33,9 +33,9 @@ public: ...@@ -33,9 +33,9 @@ public:
} }
/** /**
* 初始化paddlitelite的opt模型,nb格式,与init_paddle二选一 * init paddlitelite opt model,nb format ,or use ini_paddle
* @param model_content * @param model_content
* @return 0 目前是固定值0, 之后其他值表示失败 * @return 0
*/ */
virtual int init_nb(const std::string &model_content); virtual int init_nb(const std::string &model_content);
......
...@@ -21,10 +21,10 @@ public: ...@@ -21,10 +21,10 @@ public:
const std::vector<std::vector<uint64_t>> get_lod() const; const std::vector<std::vector<uint64_t>> get_lod() const;
const std::vector<int64_t> get_shape() const; const std::vector<int64_t> get_shape() const;
std::vector<float> data; // 通常是float返回,与下面的data_int二选一 std::vector<float> data; // return float, or use data_int
std::vector<int> data_int; // 少数层是int返回,与 data二选一 std::vector<int> data_int; // several layers return int ,or use data
std::vector<int64_t> shape; // PaddleLite输出层的shape std::vector<int64_t> shape; // PaddleLite output shape
std::vector<std::vector<uint64_t>> lod; // PaddleLite输出层的lod std::vector<std::vector<uint64_t>> lod; // PaddleLite output lod
private: private:
std::unique_ptr<const paddle::lite_api::Tensor> _tensor; std::unique_ptr<const paddle::lite_api::Tensor> _tensor;
......
...@@ -41,8 +41,8 @@ public class MiniActivity extends AppCompatActivity { ...@@ -41,8 +41,8 @@ public class MiniActivity extends AppCompatActivity {
private String assetlabelFilePath = "labels/ppocr_keys_v1.txt"; private String assetlabelFilePath = "labels/ppocr_keys_v1.txt";
private Button button; private Button button;
private ImageView imageView; // 显示图像 private ImageView imageView; // image result
private TextView textView; // 显示结果 private TextView textView; // text result
@Override @Override
protected void onCreate(Bundle savedInstanceState) { protected void onCreate(Bundle savedInstanceState) {
...@@ -85,7 +85,7 @@ public class MiniActivity extends AppCompatActivity { ...@@ -85,7 +85,7 @@ public class MiniActivity extends AppCompatActivity {
} }
} }
}; };
sender.sendEmptyMessage(REQUEST_LOAD_MODEL); // 对应上面的REQUEST_LOAD_MODEL, 调用onLoadModel() sender.sendEmptyMessage(REQUEST_LOAD_MODEL); // corresponding to REQUEST_LOAD_MODEL, to call onLoadModel()
imageView = findViewById(R.id.imageView); imageView = findViewById(R.id.imageView);
textView = findViewById(R.id.sample_text); textView = findViewById(R.id.sample_text);
...@@ -112,7 +112,7 @@ public class MiniActivity extends AppCompatActivity { ...@@ -112,7 +112,7 @@ public class MiniActivity extends AppCompatActivity {
} }
/** /**
* onCreate的时候调用, 模型初始化 * call in onCreate, model init
* *
* @return * @return
*/ */
...@@ -124,7 +124,8 @@ public class MiniActivity extends AppCompatActivity { ...@@ -124,7 +124,8 @@ public class MiniActivity extends AppCompatActivity {
} }
/** /**
* on * init engine
* call in onCreate
* *
* @return * @return
*/ */
...@@ -133,7 +134,7 @@ public class MiniActivity extends AppCompatActivity { ...@@ -133,7 +134,7 @@ public class MiniActivity extends AppCompatActivity {
String assetImagePath = "images/5.jpg"; String assetImagePath = "images/5.jpg";
InputStream imageStream = getAssets().open(assetImagePath); InputStream imageStream = getAssets().open(assetImagePath);
Bitmap image = BitmapFactory.decodeStream(imageStream); Bitmap image = BitmapFactory.decodeStream(imageStream);
// 这里输入是Bitmap // Input is Bitmap
predictor.setInputImage(image); predictor.setInputImage(image);
return predictor.isLoaded() && predictor.runModel(); return predictor.isLoaded() && predictor.runModel();
} catch (IOException e) { } catch (IOException e) {
......
...@@ -228,7 +228,7 @@ public class Predictor { ...@@ -228,7 +228,7 @@ public class Predictor {
for (int i = 0; i < warmupIterNum; i++) { for (int i = 0; i < warmupIterNum; i++) {
paddlePredictor.runImage(inputData, width, height, channels, inputImage); paddlePredictor.runImage(inputData, width, height, channels, inputImage);
} }
warmupIterNum = 0; // 之后不要再warm了 warmupIterNum = 0; // do not need warm
// Run inference // Run inference
start = new Date(); start = new Date();
ArrayList<OcrResultModel> results = paddlePredictor.runImage(inputData, width, height, channels, inputImage); ArrayList<OcrResultModel> results = paddlePredictor.runImage(inputData, width, height, channels, inputImage);
...@@ -323,7 +323,7 @@ public class Predictor { ...@@ -323,7 +323,7 @@ public class Predictor {
for (Point p : result.getPoints()) { for (Point p : result.getPoints()) {
sb.append("(").append(p.x).append(",").append(p.y).append(") "); sb.append("(").append(p.x).append(",").append(p.y).append(") ");
} }
Log.i(TAG, sb.toString()); // 这里在logcat里打印结果 Log.i(TAG, sb.toString()); // show LOG in Logcat panel
outputResultSb.append(i + 1).append(": ").append(result.getLabel()).append("\n"); outputResultSb.append(i + 1).append(": ").append(result.getLabel()).append("\n");
} }
outputResult = outputResultSb.toString(); outputResult = outputResultSb.toString();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册