From da166b0e2b7e2508b06b27b0ccf7848b1447e22e Mon Sep 17 00:00:00 2001 From: chenxujun Date: Tue, 31 Jan 2023 10:13:40 +0800 Subject: [PATCH] Fix some words (#7652) --- ...\224\347\226\221\347\254\254\344\270\200\346\234\237.md" | 6 +++--- benchmark/run_benchmark.sh | 2 +- configs/keypoint/README_en.md | 4 ++-- configs/keypoint/hrnet/hrnet_w32_256x192.yml | 2 +- .../bytetrack/detector/yolox_x_24e_800x1440_mix_mot_ch.yml | 2 +- docs/tutorials/KeyPointConfigGuide_en.md | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git "a/activity/\347\233\264\346\222\255\347\255\224\347\226\221\347\254\254\344\270\200\346\234\237.md" "b/activity/\347\233\264\346\222\255\347\255\224\347\226\221\347\254\254\344\270\200\346\234\237.md" index f94f0dd09..393bf18f7 100644 --- "a/activity/\347\233\264\346\222\255\347\255\224\347\226\221\347\254\254\344\270\200\346\234\237.md" +++ "b/activity/\347\233\264\346\222\255\347\255\224\347\226\221\347\254\254\344\270\200\346\234\237.md" @@ -44,13 +44,13 @@ A4. PP-YOLOE的backbone和neck部分的Conv是没有bias参数的,head部分 A5. PaddleInference会将paddle导出的预测模型会前向算子做融合,从而实现速度优化,并且实际部署过程也是使用PaddleInference实现 #### Q6. PP-YOLOE系列在部署的时候,前后处理是不是一样的啊? -A6. PP-YOLO系列模型在部署时的前处理都是 decode-resize-nomalize-permute的流程,后处理方面PP-YOLOv2使用了Matrix NMS,PP-YOLOE使用的是普通的NMS算法 +A6. PP-YOLO系列模型在部署时的前处理都是 decode-resize-normalize-permute的流程,后处理方面PP-YOLOv2使用了Matrix NMS,PP-YOLOE使用的是普通的NMS算法 #### Q7. 针对小目标和类别不平衡的数据集,PP-YOLOE有什么调整策略吗 A7 针对小目标数据集,可以适当增大ppyoloe的输入尺寸,然后在模型中增加注意力机制,目前基于PP-YOLOE的小目标检测正在开发中;针对类别不平衡问题,可以从数据采样的角度处理,目前PP-YOLOE还没有专门针对类别不平衡问题的优化 ## PP-Human问题 -#### Q1. 请问pphuman用导出的模型18个点(不是官方17个点)去预测时,报错是问什么 +#### Q1. 请问pphuman用导出的模型18个点(不是官方17个点)去预测时,报错是为什么 A1. 这个问题是关键点模型输出点的数量与行为识别模型不一致导致的。如果希望用18点模型预测,除了关键点用18点模型以外,还需要自建18点的动作识别模型。 #### Q2. 为什么官方导出模型设置的window_size是50 @@ -110,7 +110,7 @@ A4. 在PaddleDetection中,支持负样本训练,TrainDataset下设置allow_e #### Q1. PaddleDetection训练的模型导出inference model后,在做推理部署的时候,前后处理相关代码如何编写,有什么参考教程吗? A1. 目前PaddleDetection下的网络模型大部分都能够支持c++ inference,不同的处理方式针对不同功能,例如:PP-YOLOE速度测试不包含后处理,PicoDet为支持不同的第三方推理引擎会设置是否导出nms -object_detector.cc是针对所有检测模型的流程,其中前处理大部分都是decode-resize-nomalize-permute 部分网络会加入padding的操作;大部分模型的后处理操作都放在模型里面了,picodet有单独提供nms的后处理代码 +object_detector.cc是针对所有检测模型的流程,其中前处理大部分都是decode-resize-normalize-permute 部分网络会加入padding的操作;大部分模型的后处理操作都放在模型里面了,picodet有单独提供nms的后处理代码 检测模型的输入统一为image,im_shape,scale_factor ,如果模型中没有使用im_shape,输出个数会减少,但是整套预处理流程不需要额外开发 diff --git a/benchmark/run_benchmark.sh b/benchmark/run_benchmark.sh index c9eb52eb1..908bfe59f 100644 --- a/benchmark/run_benchmark.sh +++ b/benchmark/run_benchmark.sh @@ -85,7 +85,7 @@ function _train(){ fi } -source ${BENCHMARK_ROOT}/scripts/run_model.sh # 在该脚本中会对符合benchmark规范的log使用analysis.py 脚本进行性能数据解析;该脚本在连调时可从benchmark repo中下载https://github.com/PaddlePaddle/benchmark/blob/master/scripts/run_model.sh;如果不联调只想要产出训练log可以注掉本行,提交时需打开 +source ${BENCHMARK_ROOT}/scripts/run_model.sh # 在该脚本中会对符合benchmark规范的log使用analysis.py 脚本进行性能数据解析;该脚本在联调时可从benchmark repo中下载https://github.com/PaddlePaddle/benchmark/blob/master/scripts/run_model.sh;如果不联调只想要产出训练log可以注掉本行,提交时需打开 _set_params $@ # _train # 如果只想产出训练log,不解析,可取消注释 _run # 该函数在run_model.sh中,执行时会调用_train; 如果不联调只想要产出训练log可以注掉本行,提交时需打开 diff --git a/configs/keypoint/README_en.md b/configs/keypoint/README_en.md index 76e3d543a..cfb2e7c8f 100644 --- a/configs/keypoint/README_en.md +++ b/configs/keypoint/README_en.md @@ -198,7 +198,7 @@ We take an example of [tinypose_256x192](./tiny_pose/README_en.md) to show how t #### 1、For configs [tinypose_256x192.yml](../../configs/keypoint/tiny_pose/tinypose_256x192.yml) -you may need to modity these for your job: +you may need to modify these for your job: ``` num_joints: &num_joints 17 #the number of joints in your job @@ -207,7 +207,7 @@ train_width: &train_width 192 #the width of model input hmsize: &hmsize [48, 64] #the shape of model output,usually 1/4 of [w,h] flip_perm: &flip_perm [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]] #the correspondence between left and right keypoint id,used for flip transform。You can add an line(by "flip: False") behind of flip_pairs in RandomFlipHalfBodyTransform of TrainReader if you don't need it num_joints_half_body: 8 #The joint numbers of half body, used for half_body transform -prob_half_body: 0.3 #The probility of half_body transform, set to 0 if you don't need it +prob_half_body: 0.3 #The probability of half_body transform, set to 0 if you don't need it upper_body_ids: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] #The joint ids of half(upper) body, used to get the upper joints in half_body transform ``` diff --git a/configs/keypoint/hrnet/hrnet_w32_256x192.yml b/configs/keypoint/hrnet/hrnet_w32_256x192.yml index 37782b748..d80d97264 100644 --- a/configs/keypoint/hrnet/hrnet_w32_256x192.yml +++ b/configs/keypoint/hrnet/hrnet_w32_256x192.yml @@ -139,4 +139,4 @@ TestReader: is_scale: true - Permute: {} batch_size: 1 - fuse_normalize: false #whether to fuse nomalize layer into model while export model + fuse_normalize: false #whether to fuse normalize layer into model while export model diff --git a/configs/mot/bytetrack/detector/yolox_x_24e_800x1440_mix_mot_ch.yml b/configs/mot/bytetrack/detector/yolox_x_24e_800x1440_mix_mot_ch.yml index 34678d52b..ae0fba92e 100644 --- a/configs/mot/bytetrack/detector/yolox_x_24e_800x1440_mix_mot_ch.yml +++ b/configs/mot/bytetrack/detector/yolox_x_24e_800x1440_mix_mot_ch.yml @@ -10,7 +10,7 @@ snapshot_epoch: 2 # schedule configuration for fine-tuning epoch: 24 LearningRate: - base_lr: 0.00075 # fintune + base_lr: 0.00075 # fine-tune schedulers: - !CosineDecay max_epochs: 24 diff --git a/docs/tutorials/KeyPointConfigGuide_en.md b/docs/tutorials/KeyPointConfigGuide_en.md index fa700a28e..8ad821881 100644 --- a/docs/tutorials/KeyPointConfigGuide_en.md +++ b/docs/tutorials/KeyPointConfigGuide_en.md @@ -175,9 +175,9 @@ TestDataset: worker_num: 2 #the workers to load Dataset -global_mean: &global_mean [0.485, 0.456, 0.406] #means used to nomalize image +global_mean: &global_mean [0.485, 0.456, 0.406] #means used to normalize image -global_std: &global_std [0.229, 0.224, 0.225] #stds used to nomalize image +global_std: &global_std [0.229, 0.224, 0.225] #stds used to normalize image TrainReader: #TrainReader configs -- GitLab