未验证 提交 f7e1ba1d 编写于 作者: S Shuangchi He 提交者: GitHub

Fix some typos for Jetson, metrics and result. (#6118)

上级 37e3bc16
...@@ -246,7 +246,7 @@ class MyMetric(object): ...@@ -246,7 +246,7 @@ class MyMetric(object):
def get_metric(self): def get_metric(self):
""" """
return metircs { return metrics {
'acc': 0, 'acc': 0,
'norm_edit_dis': 0, 'norm_edit_dis': 0,
} }
......
...@@ -237,7 +237,7 @@ class MyMetric(object): ...@@ -237,7 +237,7 @@ class MyMetric(object):
def get_metric(self): def get_metric(self):
""" """
return metircs { return metrics {
'acc': 0, 'acc': 0,
'norm_edit_dis': 0, 'norm_edit_dis': 0,
} }
......
...@@ -65,9 +65,9 @@ class DetMetric(object): ...@@ -65,9 +65,9 @@ class DetMetric(object):
} }
""" """
metircs = self.evaluator.combine_results(self.results) metrics = self.evaluator.combine_results(self.results)
self.reset() self.reset()
return metircs return metrics
def reset(self): def reset(self):
self.results = [] # clear results self.results = [] # clear results
...@@ -78,9 +78,9 @@ class E2EMetric(object): ...@@ -78,9 +78,9 @@ class E2EMetric(object):
self.results.append(result) self.results.append(result)
def get_metric(self): def get_metric(self):
metircs = combine_results(self.results) metrics = combine_results(self.results)
self.reset() self.reset()
return metircs return metrics
def reset(self): def reset(self):
self.results = [] # clear results self.results = [] # clear results
...@@ -59,9 +59,9 @@ class KIEMetric(object): ...@@ -59,9 +59,9 @@ class KIEMetric(object):
def get_metric(self): def get_metric(self):
metircs = self.combine_results(self.results) metrics = self.combine_results(self.results)
self.reset() self.reset()
return metircs return metrics
def reset(self): def reset(self):
self.results = [] # clear results self.results = [] # clear results
......
...@@ -149,7 +149,7 @@ if __name__ == "__main__": ...@@ -149,7 +149,7 @@ if __name__ == "__main__":
result, _ = ser_engine(img) result, _ = ser_engine(img)
fout.write(img_path + "\t" + json.dumps( fout.write(img_path + "\t" + json.dumps(
{ {
"ser_resule": result, "ser_result": result,
}, ensure_ascii=False) + "\n") }, ensure_ascii=False) + "\n")
img_res = draw_ser_results(img, result) img_res = draw_ser_results(img, result)
......
...@@ -145,7 +145,7 @@ def train(args): ...@@ -145,7 +145,7 @@ def train(args):
global_step = 0 global_step = 0
model.clear_gradients() model.clear_gradients()
train_dataloader_len = len(train_dataloader) train_dataloader_len = len(train_dataloader)
best_metirc = {'f1': 0} best_metric = {'f1': 0}
model.train() model.train()
train_reader_cost = 0.0 train_reader_cost = 0.0
...@@ -192,8 +192,8 @@ def train(args): ...@@ -192,8 +192,8 @@ def train(args):
# Log metrics # Log metrics
# Only evaluate when single GPU otherwise metrics may not average well # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(model, eval_dataloader, logger) results = evaluate(model, eval_dataloader, logger)
if results['f1'] >= best_metirc['f1']: if results['f1'] >= best_metric['f1']:
best_metirc = results best_metric = results
output_dir = os.path.join(args.output_dir, "best_model") output_dir = os.path.join(args.output_dir, "best_model")
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
if distributed: if distributed:
...@@ -206,7 +206,7 @@ def train(args): ...@@ -206,7 +206,7 @@ def train(args):
logger.info("Saving model checkpoint to {}".format( logger.info("Saving model checkpoint to {}".format(
output_dir)) output_dir))
logger.info("eval results: {}".format(results)) logger.info("eval results: {}".format(results))
logger.info("best_metirc: {}".format(best_metirc)) logger.info("best_metric: {}".format(best_metric))
reader_start = time.time() reader_start = time.time()
if rank == 0: if rank == 0:
...@@ -220,7 +220,7 @@ def train(args): ...@@ -220,7 +220,7 @@ def train(args):
tokenizer.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir)
paddle.save(args, os.path.join(output_dir, "training_args.bin")) paddle.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to {}".format(output_dir)) logger.info("Saving model checkpoint to {}".format(output_dir))
logger.info("best_metirc: {}".format(best_metirc)) logger.info("best_metric: {}".format(best_metric))
if __name__ == "__main__": if __name__ == "__main__":
......
# Jeston端基础训练预测功能测试 # Jetson端基础训练预测功能测试
Jeston端基础训练预测功能测试的主程序为`test_inference_inference.sh`,由于Jeston端CPU较差,Jeston只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。 Jetson端基础训练预测功能测试的主程序为`test_inference_inference.sh`,由于Jetson端CPU较差,Jetson只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。
## 1. 测试结论汇总 ## 1. 测试结论汇总
...@@ -42,7 +42,7 @@ Jeston端基础训练预测功能测试的主程序为`test_inference_inference. ...@@ -42,7 +42,7 @@ Jeston端基础训练预测功能测试的主程序为`test_inference_inference.
先运行`prepare.sh`准备数据和模型,然后运行`test_inference_inference.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。 先运行`prepare.sh`准备数据和模型,然后运行`test_inference_inference.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。
`test_inference_inference.sh`仅有一个模式`whole_infer`,在Jeston端,仅需要测试预测推理的模式即可: `test_inference_inference.sh`仅有一个模式`whole_infer`,在Jetson端,仅需要测试预测推理的模式即可:
``` ```
- 模式3:whole_infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度; - 模式3:whole_infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度;
...@@ -51,7 +51,7 @@ bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_lin ...@@ -51,7 +51,7 @@ bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_lin
# 用法1: # 用法1:
bash test_tipc/test_inference_inference.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' bash test_tipc/test_inference_inference.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer'
# 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 # 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号
bash test_tipc/test_inference_jeston.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' '1' bash test_tipc/test_inference_jetson.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' '1'
``` ```
运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`whole_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件: 运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`whole_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件:
......
...@@ -93,11 +93,11 @@ class TextSystem(object): ...@@ -93,11 +93,11 @@ class TextSystem(object):
self.draw_crop_rec_res(self.args.crop_res_save_dir, img_crop_list, self.draw_crop_rec_res(self.args.crop_res_save_dir, img_crop_list,
rec_res) rec_res)
filter_boxes, filter_rec_res = [], [] filter_boxes, filter_rec_res = [], []
for box, rec_reuslt in zip(dt_boxes, rec_res): for box, rec_result in zip(dt_boxes, rec_res):
text, score = rec_reuslt text, score = rec_result
if score >= self.drop_score: if score >= self.drop_score:
filter_boxes.append(box) filter_boxes.append(box)
filter_rec_res.append(rec_reuslt) filter_rec_res.append(rec_result)
return filter_boxes, filter_rec_res return filter_boxes, filter_rec_res
......
...@@ -187,7 +187,7 @@ def create_predictor(args, mode, logger): ...@@ -187,7 +187,7 @@ def create_predictor(args, mode, logger):
gpu_id = get_infer_gpuid() gpu_id = get_infer_gpuid()
if gpu_id is None: if gpu_id is None:
logger.warning( logger.warning(
"GPU is not found in current device by nvidia-smi. Please check your device or ignore it if run on jeston." "GPU is not found in current device by nvidia-smi. Please check your device or ignore it if run on jetson."
) )
config.enable_use_gpu(args.gpu_mem, 0) config.enable_use_gpu(args.gpu_mem, 0)
if args.use_tensorrt: if args.use_tensorrt:
......
...@@ -73,8 +73,8 @@ def main(): ...@@ -73,8 +73,8 @@ def main():
images = paddle.to_tensor(images) images = paddle.to_tensor(images)
preds = model(images) preds = model(images)
post_result = post_process_class(preds) post_result = post_process_class(preds)
for rec_reuslt in post_result: for rec_result in post_result:
logger.info('\t result: {}'.format(rec_reuslt)) logger.info('\t result: {}'.format(rec_result))
logger.info("success!") logger.info("success!")
......
...@@ -104,7 +104,7 @@ def main(): ...@@ -104,7 +104,7 @@ def main():
preds = model(images) preds = model(images)
post_result = post_process_class(preds, shape_list) post_result = post_process_class(preds, shape_list)
points, strs = post_result['points'], post_result['texts'] points, strs = post_result['points'], post_result['texts']
# write resule # write result
dt_boxes_json = [] dt_boxes_json = []
for poly, str in zip(points, strs): for poly, str in zip(points, strs):
tmp_json = {"transcription": str} tmp_json = {"transcription": str}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册