From adc7c7085d5d9c3d1f0f03345d0955ec68832bb0 Mon Sep 17 00:00:00 2001
From: HydrogenSulfate <490868991@qq.com>
Date: Thu, 15 Sep 2022 20:14:17 +0800
Subject: [PATCH] fix small typos in docs, cherry pick from develop
---
docs/en/PPShiTu/PPShiTuV2_introduction.md | 2 +-
.../feature_extraction_en.md | 6 +++---
docs/zh_CN/PPShiTu/PPShiTuV2_introduction.md | 10 +++++-----
.../image_recognition_pipeline/feature_extraction.md | 8 ++++----
docs/zh_CN/inference_deployment/python_deploy.md | 2 +-
5 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/docs/en/PPShiTu/PPShiTuV2_introduction.md b/docs/en/PPShiTu/PPShiTuV2_introduction.md
index bae44aea..e420bd71 100644
--- a/docs/en/PPShiTu/PPShiTuV2_introduction.md
+++ b/docs/en/PPShiTu/PPShiTuV2_introduction.md
@@ -38,7 +38,7 @@ The following table lists the relevant metric obtained by PP-ShiTuV2 with compar
| :--------- | :------------------------------------------------ | :------- |
| | | recall@1 |
| PP-ShiTuV1 | 64(30+34)MB | 66.8% |
-| PP-ShiTuV2 | 49(30+19) | 73.8% |
+| PP-ShiTuV2 | 49(30+19)MB | 73.8% |
**Note:**
- For the introduction of recall and mAP metric, please refer to [Retrieval Metric](../algorithm_introduction/reid.md).
diff --git a/docs/en/image_recognition_pipeline/feature_extraction_en.md b/docs/en/image_recognition_pipeline/feature_extraction_en.md
index 26216543..856d4753 100644
--- a/docs/en/image_recognition_pipeline/feature_extraction_en.md
+++ b/docs/en/image_recognition_pipeline/feature_extraction_en.md
@@ -176,14 +176,14 @@ Model training mainly includes the starting training and restoring training from
**Notice:**
The online evaluation method is used by default in the configuration file. If you want to speed up the training, you can turn off the online evaluation function, just add `-o Global.eval_during_train=False` after the above scripts.
-After training, the final model files `latest.pdparams`, `best_model.pdarams` and the training log file `train.log` will be generated in the output directory. Among them, `best_model` saves the best model under the current evaluation index, and `latest` is used to save the latest generated model, which is convenient to resume training from the checkpoint when training task is interrupted. Training can be resumed from a checkpoint by adding `-o Global.checkpoint="path_to_resume_checkpoint"` to the end of the above training scripts, as shown below.
+After training, the final model files `latest.pdparams`, `best_model.pdarams` and the training log file `train.log` will be generated in the output directory. Among them, `best_model` saves the best model under the current evaluation index, and `latest` is used to save the latest generated model, which is convenient to resume training from the checkpoint when training task is interrupted. Training can be resumed from a checkpoint by adding `-o Global.checkpoints="path_to_resume_checkpoint"` to the end of the above training scripts, as shown below.
- Single machine and single card checkpoint recovery training
```shell
export CUDA_VISIBLE_DEVICES=0
python3.7 tools/train.py \
-c ./ppcls/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml \
- -o Global.checkpoint="output/RecModel/latest"
+ -o Global.checkpoints="output/RecModel/latest"
```
- Single-machine multi-card checkpoint recovery training
```shell
@@ -191,7 +191,7 @@ After training, the final model files `latest.pdparams`, `best_model.pdarams` an
python3.7 -m paddle.distributed.launch --gpus="0,1,2,3" \
tools/train.py \
-c ./ppcls/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml \
- -o Global.checkpoint="output/RecModel/latest"
+ -o Global.checkpoints="output/RecModel/latest"
```
diff --git a/docs/zh_CN/PPShiTu/PPShiTuV2_introduction.md b/docs/zh_CN/PPShiTu/PPShiTuV2_introduction.md
index bac4f8ac..7e819efa 100644
--- a/docs/zh_CN/PPShiTu/PPShiTuV2_introduction.md
+++ b/docs/zh_CN/PPShiTu/PPShiTuV2_introduction.md
@@ -40,7 +40,7 @@ PP-ShiTuV2 是基于 PP-ShiTuV1 改进的一个实用轻量级通用图像识别
| :--------- | :---------------------- | :------------------ |
| | | recall@1 |
| PP-ShiTuV1 | 64(30+34)MB | 66.8% |
-| PP-ShiTuV2 | 49(30+19) | 73.8% |
+| PP-ShiTuV2 | 49(30+19)MB | 73.8% |
**注:**
- recall及mAP指标的介绍可以参考 [常用指标](../algorithm_introduction/reid.md#22-常用指标)。
@@ -70,10 +70,10 @@ PP-ShiTuV2 是基于 PP-ShiTuV1 改进的一个实用轻量级通用图像识别
```shell
# 如果您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装
python3.7 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple
-
+
# 如果您的机器是CPU,请运行以下命令安装
python3.7 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
-
+
# 安装 faiss 库
python3.7 -m pip install faiss-cpu==1.7.1post2
```
@@ -82,7 +82,7 @@ PP-ShiTuV2 是基于 PP-ShiTuV1 改进的一个实用轻量级通用图像识别
```shell
# 进入到PaddleClas根目录下
cd PaddleClas
-
+
# 安装paddleclas
python3.7 setup.py install
```
@@ -92,7 +92,7 @@ PP-ShiTuV2 是基于 PP-ShiTuV1 改进的一个实用轻量级通用图像识别
```shell
# 下载并解压demo数据
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v2.0.tar && tar -xf drink_dataset_v2.0.tar
-
+
# 执行识别命令
paddleclas \
--model_name=PP-ShiTuV2 \
diff --git a/docs/zh_CN/image_recognition_pipeline/feature_extraction.md b/docs/zh_CN/image_recognition_pipeline/feature_extraction.md
index f037dc25..a2107fa6 100644
--- a/docs/zh_CN/image_recognition_pipeline/feature_extraction.md
+++ b/docs/zh_CN/image_recognition_pipeline/feature_extraction.md
@@ -38,7 +38,7 @@
- **Backbone**: 用于提取输入图像初步特征的骨干网络,一般由配置文件中的 [Backbone](../../../ppcls/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml#L33-L37) 以及 [BackboneStopLayer](../../../ppcls/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml#L38-L39) 字段共同指定。
- **Neck**: 用以特征增强及特征维度变换。可以是一个简单的 FC Layer,用来做特征维度变换;也可以是较复杂的 FPN 结构,用以做特征增强,一般由配置文件中的 [Neck](../../../ppcls/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml#L40-L51) 字段指定。
-- **Head**: 用来将 `Neck` 的输出 feature 转化为 logits,让模型在训练阶段能以分类任务的形式进行训练。除了常用的 FC Layer 外,还可以替换为 [CosMargin](../../../ppcls/arch/gears/cosmargin.py), [ArcMargin](../../../ppcls/arch/gears/arcmargin.py), [CircleMargin](../../../ppcls/arch/gears/circlemargin.py) 等模块,一般由配置文件中的 [Head](`../../../ppcls/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml#L52-L60) 字段指定。
+- **Head**: 用来将 `Neck` 的输出 feature 转化为 logits,让模型在训练阶段能以分类任务的形式进行训练。除了常用的 FC Layer 外,还可以替换为 [CosMargin](../../../ppcls/arch/gears/cosmargin.py), [ArcMargin](../../../ppcls/arch/gears/arcmargin.py), [CircleMargin](../../../ppcls/arch/gears/circlemargin.py) 等模块,一般由配置文件中的 [Head](../../../ppcls/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml#L52) 字段指定。
- **Loss**: 指定所使用的 Loss 函数。我们将 Loss 设计为组合 loss 的形式,可以方便地将 Classification Loss 和 Metric learning Loss 组合在一起,一般由配置文件中的 [Loss](../../../ppcls/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml#L63-L77) 字段指定。
@@ -185,14 +185,14 @@ Loss 部分选用 [Cross entropy loss](../../../ppcls/loss/celoss.py) 和 [Tripl
**注意:**
配置文件中默认采用`在线评估`的方式,如果你想加快训练速度,可以关闭`在线评估`功能,只需要在上述命令的后面,增加 `-o Global.eval_during_train=False`。
-训练完毕后,在 output 目录下会生成最终模型文件 `latest.pdparams`,`best_model.pdarams` 和训练日志文件 `train.log`。其中,`best_model` 保存了当前评测指标下的最佳模型,`latest` 用来保存最新生成的模型, 方便在任务中断的情况下从断点位置恢复训练。通过在上述训练命令的末尾加上`-o Global.checkpoint="path_to_resume_checkpoint"`即可从断点恢复训练,示例如下。
+训练完毕后,在 output 目录下会生成最终模型文件 `latest.pdparams`,`best_model.pdarams` 和训练日志文件 `train.log`。其中,`best_model` 保存了当前评测指标下的最佳模型,`latest` 用来保存最新生成的模型, 方便在任务中断的情况下从断点位置恢复训练。通过在上述训练命令的末尾加上`-o Global.checkpoints="path_to_resume_checkpoint"`即可从断点恢复训练,示例如下。
- 单机单卡断点恢复训练
```shell
export CUDA_VISIBLE_DEVICES=0
python3.7 tools/train.py \
-c ./ppcls/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml \
- -o Global.checkpoint="output/RecModel/latest"
+ -o Global.checkpoints="output/RecModel/latest"
```
- 单机多卡断点恢复训练
```shell
@@ -200,7 +200,7 @@ Loss 部分选用 [Cross entropy loss](../../../ppcls/loss/celoss.py) 和 [Tripl
python3.7 -m paddle.distributed.launch --gpus="0,1,2,3" \
tools/train.py \
-c ./ppcls/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml \
- -o Global.checkpoint="output/RecModel/latest"
+ -o Global.checkpoints="output/RecModel/latest"
```
diff --git a/docs/zh_CN/inference_deployment/python_deploy.md b/docs/zh_CN/inference_deployment/python_deploy.md
index 06b3b670..e432326b 100644
--- a/docs/zh_CN/inference_deployment/python_deploy.md
+++ b/docs/zh_CN/inference_deployment/python_deploy.md
@@ -103,7 +103,7 @@ python3.7 python/predict_rec.py -c configs/inference_rec.yaml
上述预测命令可以得到一个 512 维的特征向量,直接输出在在命令行中。
-在配置文件 `configs/inference_det.yaml` 中有以下字段用于配置预测参数:
+在配置文件 `configs/inference_rec.yaml` 中有以下字段用于配置预测参数:
* `Global.infer_imgs`:待预测的图片文件路径;
* `Global.use_gpu`: 是否使用 GPU 预测,默认为 `True`。
--
GitLab