From 0c75cbc55bca196a6705732594ede603f29e2f82 Mon Sep 17 00:00:00 2001 From: littletomatodonkey Date: Mon, 1 Feb 2021 14:03:32 +0000 Subject: [PATCH] fix doc --- doc/doc_ch/recognition.md | 2 +- doc/doc_en/recognition_en.md | 2 +- tools/program.py | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/doc_ch/recognition.md b/doc/doc_ch/recognition.md index f36e8019..91d64907 100644 --- a/doc/doc_ch/recognition.md +++ b/doc/doc_ch/recognition.md @@ -133,7 +133,7 @@ word_dict.txt 每行有一个单字,将字符与数字索引映射在一起, 您可以按需使用。 目前的多语言模型仍处在demo阶段,会持续优化模型并补充语种,**非常欢迎您为我们提供其他语言的字典和字体**, -如您愿意可将字典文件提交至 [dict](../../ppocr/utils/dict) 将语料文件提交至[corpus](../../ppocr/utils/corpus),我们会在Repo中感谢您。 +如您愿意可将字典文件提交至 [dict](../../ppocr/utils/dict),我们会在Repo中感谢您。 - 自定义字典 diff --git a/doc/doc_en/recognition_en.md b/doc/doc_en/recognition_en.md index c2ff2022..14ddcc75 100644 --- a/doc/doc_en/recognition_en.md +++ b/doc/doc_en/recognition_en.md @@ -126,7 +126,7 @@ In `word_dict.txt`, there is a single word in each line, which maps characters a You can use it on demand. The current multi-language model is still in the demo stage and will continue to optimize the model and add languages. **You are very welcome to provide us with dictionaries and fonts in other languages**, -If you like, you can submit the dictionary file to [dict](../../ppocr/utils/dict) or corpus file to [corpus](../../ppocr/utils/corpus) and we will thank you in the Repo. +If you like, you can submit the dictionary file to [dict](../../ppocr/utils/dict) and we will thank you in the Repo. To customize the dict file, please modify the `character_dict_path` field in `configs/rec/rec_icdar15_train.yml` and set `character_type` to `ch`. diff --git a/tools/program.py b/tools/program.py index a24d6ca7..99a37432 100755 --- a/tools/program.py +++ b/tools/program.py @@ -222,8 +222,8 @@ def train(config, batch = [item.numpy() for item in batch] post_result = post_process_class(preds, batch[1]) eval_class(post_result, batch) - metirc = eval_class.get_metric() - train_stats.update(metirc) + metric = eval_class.get_metric() + train_stats.update(metric) if vdl_writer is not None and dist.get_rank() == 0: for k, v in train_stats.get().items(): @@ -251,7 +251,7 @@ def train(config, min_average_window=10000, max_average_window=15625) Model_Average.apply() - cur_metric = eval(model, valid_dataloader, post_process_class, + cur_metric = eval(model, valid_dataloader, post_process_class, eval_class) cur_metric_str = 'cur metric, {}'.format(', '.join( ['{}: {}'.format(k, v) for k, v in cur_metric.items()])) -- GitLab