提交 c3785440 编写于 作者: A authorfu

注释改为英语的

上级 c3c417f5
......@@ -30,7 +30,7 @@ Java_com_baidu_paddle_lite_demo_ocr_OCRPredictorNative_init(JNIEnv *env, jobject
}
/**
* "LITE_POWER_HIGH" 转为 paddle::lite_api::LITE_POWER_HIGH
* "LITE_POWER_HIGH" convert to paddle::lite_api::LITE_POWER_HIGH
* @param cpu_mode
* @return
*/
......
......@@ -37,7 +37,7 @@ int OCR_PPredictor::init_from_file(const std::string &det_model_path, const std:
return RETURN_OK;
}
/**
* 调试用,保存第一步的框选结果
* for debug use, show result of First Step
* @param filter_boxes
* @param boxes
* @param srcimg
......
......@@ -12,26 +12,26 @@
namespace ppredictor {
/**
* 配置
* Config
*/
struct OCR_Config {
int thread_num = 4; // 线程数
int thread_num = 4; // Thread num
paddle::lite_api::PowerMode mode = paddle::lite_api::LITE_POWER_HIGH; // PaddleLite Mode
};
/**
* 一个四边形内图片的推理结果,
* PolyGone Result
*/
struct OCRPredictResult {
std::vector<int> word_index; //
std::vector<int> word_index;
std::vector<std::vector<int>> points;
float score;
};
/**
* OCR 一共有2个模型进行推理,
* 1. 使用第一个模型(det),框选出多个四边形
* 2. 从原图从抠出这些多边形,使用第二个模型(rec),获取文本
* OCR there are 2 models
* 1. First model(det),select polygones to show where are the texts
* 2. crop from the origin images, use these polygones to infer
*/
class OCR_PPredictor : public PPredictor_Interface {
public:
......@@ -50,7 +50,7 @@ public:
int init(const std::string &det_model_content, const std::string &rec_model_content);
int init_from_file(const std::string &det_model_path, const std::string &rec_model_path);
/**
* 返回OCR结果
* Return OCR result
* @param dims
* @param input_data
* @param input_len
......@@ -69,7 +69,7 @@ public:
private:
/**
* 从第一个模型的结果中计算有文字的四边形
* calcul Polygone from the result image of first model
* @param pred
* @param output_height
* @param output_width
......@@ -81,7 +81,7 @@ private:
const cv::Mat &origin);
/**
* 第二个模型的推理
* infer for second model
*
* @param boxes
* @param origin
......@@ -91,14 +91,14 @@ private:
infer_rec(const std::vector<std::vector<std::vector<int>>> &boxes, const cv::Mat &origin);
/**
* 第二个模型提取文字的后处理
* Postprocess or sencod model to extract text
* @param res
* @return
*/
std::vector<int> postprocess_rec_word_index(const PredictorOutput &res);
/**
* 计算第二个模型的文字的置信度
* calculate confidence of second model text result
* @param res
* @return
*/
......
......@@ -7,7 +7,7 @@
namespace ppredictor {
/**
* PaddleLite Preditor 通用接口
* PaddleLite Preditor Common Interface
*/
class PPredictor_Interface {
public:
......@@ -21,7 +21,7 @@ public:
};
/**
* 通用推理
* Common Predictor
*/
class PPredictor : public PPredictor_Interface {
public:
......@@ -33,9 +33,9 @@ public:
}
/**
* 初始化paddlitelite的opt模型,nb格式,与init_paddle二选一
* init paddlitelite opt model,nb format ,or use ini_paddle
* @param model_content
* @return 0 目前是固定值0, 之后其他值表示失败
* @return 0
*/
virtual int init_nb(const std::string &model_content);
......
......@@ -21,10 +21,10 @@ public:
const std::vector<std::vector<uint64_t>> get_lod() const;
const std::vector<int64_t> get_shape() const;
std::vector<float> data; // 通常是float返回,与下面的data_int二选一
std::vector<int> data_int; // 少数层是int返回,与 data二选一
std::vector<int64_t> shape; // PaddleLite输出层的shape
std::vector<std::vector<uint64_t>> lod; // PaddleLite输出层的lod
std::vector<float> data; // return float, or use data_int
std::vector<int> data_int; // several layers return int ,or use data
std::vector<int64_t> shape; // PaddleLite output shape
std::vector<std::vector<uint64_t>> lod; // PaddleLite output lod
private:
std::unique_ptr<const paddle::lite_api::Tensor> _tensor;
......
......@@ -41,8 +41,8 @@ public class MiniActivity extends AppCompatActivity {
private String assetlabelFilePath = "labels/ppocr_keys_v1.txt";
private Button button;
private ImageView imageView; // 显示图像
private TextView textView; // 显示结果
private ImageView imageView; // image result
private TextView textView; // text result
@Override
protected void onCreate(Bundle savedInstanceState) {
......@@ -85,7 +85,7 @@ public class MiniActivity extends AppCompatActivity {
}
}
};
sender.sendEmptyMessage(REQUEST_LOAD_MODEL); // 对应上面的REQUEST_LOAD_MODEL, 调用onLoadModel()
sender.sendEmptyMessage(REQUEST_LOAD_MODEL); // corresponding to REQUEST_LOAD_MODEL, to call onLoadModel()
imageView = findViewById(R.id.imageView);
textView = findViewById(R.id.sample_text);
......@@ -112,7 +112,7 @@ public class MiniActivity extends AppCompatActivity {
}
/**
* onCreate的时候调用, 模型初始化
* call in onCreate, model init
*
* @return
*/
......@@ -124,7 +124,8 @@ public class MiniActivity extends AppCompatActivity {
}
/**
* on
* init engine
* call in onCreate
*
* @return
*/
......@@ -133,7 +134,7 @@ public class MiniActivity extends AppCompatActivity {
String assetImagePath = "images/5.jpg";
InputStream imageStream = getAssets().open(assetImagePath);
Bitmap image = BitmapFactory.decodeStream(imageStream);
// 这里输入是Bitmap
// Input is Bitmap
predictor.setInputImage(image);
return predictor.isLoaded() && predictor.runModel();
} catch (IOException e) {
......
......@@ -228,7 +228,7 @@ public class Predictor {
for (int i = 0; i < warmupIterNum; i++) {
paddlePredictor.runImage(inputData, width, height, channels, inputImage);
}
warmupIterNum = 0; // 之后不要再warm了
warmupIterNum = 0; // do not need warm
// Run inference
start = new Date();
ArrayList<OcrResultModel> results = paddlePredictor.runImage(inputData, width, height, channels, inputImage);
......@@ -323,7 +323,7 @@ public class Predictor {
for (Point p : result.getPoints()) {
sb.append("(").append(p.x).append(",").append(p.y).append(") ");
}
Log.i(TAG, sb.toString()); // 这里在logcat里打印结果
Log.i(TAG, sb.toString()); // show LOG in Logcat panel
outputResultSb.append(i + 1).append(": ").append(result.getLabel()).append("\n");
}
outputResult = outputResultSb.toString();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册