From 7fa2a4a67ea6d6baf65f01e0b982e1d9551076c3 Mon Sep 17 00:00:00 2001 From: JYChen Date: Sat, 13 Nov 2021 16:30:16 +0800 Subject: [PATCH] [Cherry-Pick] tinypose readme & support picodet in det-keypoint unite infer (#4572) * add tinypose readme * default fusenorm to False in export model * fix picodet error in det_keypoint_unite_infer * Optimize configuration file path --- configs/keypoint/tiny_pose/README.md | 190 ++++++++++++++++++ .../{keypoint => }/tinypose_128x96.yml | 4 +- .../{keypoint => }/tinypose_256x192.yml | 4 +- .../picodet_s_192_pedestrian.yml | 143 +++++++++++++ .../picodet_s_320_pedestrian.yml | 0 deploy/python/det_keypoint_unite_infer.py | 27 +-- 6 files changed, 352 insertions(+), 16 deletions(-) create mode 100644 configs/keypoint/tiny_pose/README.md rename configs/keypoint/tiny_pose/{keypoint => }/tinypose_128x96.yml (98%) rename configs/keypoint/tiny_pose/{keypoint => }/tinypose_256x192.yml (98%) create mode 100644 configs/picodet/application/pedestrian_detection/picodet_s_192_pedestrian.yml rename configs/{keypoint/tiny_pose => picodet/application}/pedestrian_detection/picodet_s_320_pedestrian.yml (100%) diff --git a/configs/keypoint/tiny_pose/README.md b/configs/keypoint/tiny_pose/README.md new file mode 100644 index 000000000..043fe7a50 --- /dev/null +++ b/configs/keypoint/tiny_pose/README.md @@ -0,0 +1,190 @@ +# PP-TinyPose + +## 简介 +PP-TinyPose是PaddleDetecion针对移动端设备优化的实时姿态检测模型,可流畅地在移动端设备上执行多人姿态估计任务。借助PaddleDetecion自研的优秀轻量级检测模型[PicoDet](../../picodet/README.md),我们同时提供了特色的轻量级垂类行人检测模型。TinyPose的运行环境有以下依赖要求: +- [PaddlePaddle](https://github.com/PaddlePaddle/Paddle)>=2.2 + +如希望在移动端部署,则还需要: +- [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite)>=2.10 + +## 模型库 +### 姿态检测模型 +| 模型 | 输入尺寸 | AP (coco val) | 单人推理耗时 (FP32)| 单人推理耗时(FP16) | 配置文件 | 模型权重 | 预测部署模型 | Paddle-Lite部署模型(FP32) | Paddle-Lite部署模型(FP16)| +| :------------------------ | :-------: | :------: | :------: |:---: | :---: | :---: | :---: | :---: | :---: | +| PP-TinyPose | 128*96 | 58.1 | 4.57ms | 3.27ms | [Config](./tinypose_128x96.yml) |[Model](https://bj.bcebos.com/v1/paddledet/models/keypoint/tinypose_128x96.pdparams) | [预测部署模型](https://bj.bcebos.com/v1/paddledet/models/keypoint/tinypose_128x96.tar) | [Lite部署模型](https://bj.bcebos.com/v1/paddledet/models/keypoint/tinypose_128x96.nb) | [Lite部署模型(FP16)](https://bj.bcebos.com/v1/paddledet/models/keypoint/tinypose_128x96_fp16.nb) | +| PP-TinyPose | 256*192 | 68.8 | 14.07ms | 8.33ms | [Config](./tinypose_256x192.yml) | [Model](https://bj.bcebos.com/v1/paddledet/models/keypoint/tinypose_256x192.pdparams) | [预测部署模型](https://bj.bcebos.com/v1/paddledet/models/keypoint/tinypose_256x192.nb) | [Lite部署模型](https://bj.bcebos.com/v1/paddledet/models/keypoint/tinypose_256x192.tar) | [Lite部署模型(FP16)](https://bj.bcebos.com/v1/paddledet/models/keypoint/tinypose_256x192_fp16.nb) | + +### 行人检测模型 +| 模型 | 输入尺寸 | mAP (coco val) | 平均推理耗时 (FP32) | 平均推理耗时 (FP16) | 配置文件 | 模型权重 | 预测部署模型 | Paddle-Lite部署模型(FP32) | Paddle-Lite部署模型(FP16)| +| :------------------------ | :-------: | :------: | :------: | :---: | :---: | :---: | :---: | :---: | :---: | +| PicoDet-S-Pedestrian | 192*192 | 29.0 | 4.30ms | 2.37ms | [Config](../../picodet/application/pedestrian_detection/picodet_s_192_pedestrian.yml) |[Model](https://bj.bcebos.com/v1/paddledet/models/keypoint/picodet_s_192_pedestrian.pdparams) | [预测部署模型](https://bj.bcebos.com/v1/paddledet/models/keypoint/picodet_s_192_pedestrian.tar) | [Lite部署模型](https://bj.bcebos.com/v1/paddledet/models/keypoint/picodet_s_192_pedestrian.nb) | [Lite部署模型(FP16)](https://bj.bcebos.com/v1/paddledet/models/keypoint/picodet_s_192_pedestrian_fp16.nb) | +| PicoDet-S-Pedestrian | 320*320 | 38.5 | 10.26ms | 6.30ms | [Config](../../picodet/application/pedestrian_detection/picodet_s_320_pedestrian.yml) | [Model](https://bj.bcebos.com/v1/paddledet/models/keypoint/picodet_s_320_pedestrian.pdparams) | [预测部署模型](https://bj.bcebos.com/v1/paddledet/models/keypoint/picodet_s_320_pedestrian.tar) | [Lite部署模型](https://bj.bcebos.com/v1/paddledet/models/keypoint/picodet_s_320_pedestrian.nb) | [Lite部署模型(FP16)](https://bj.bcebos.com/v1/paddledet/models/keypoint/picodet_s_320_pedestrian_fp16.nb) | + + +**说明** +- 姿态检测模型与行人检测模型均使用`COCO train2017`和`AI Challenger trainset`作为训练集。姿态检测模型使用`COCO person keypoints val2017`作为测试集,行人检测模型采用`COCO instances val2017`作为测试集。 +- 姿态检测模型的精度指标所依赖的检测框为ground truth标注得到。 +- 姿态检测模型与行人检测模型均在4卡环境下训练,若实际训练环境需要改变GPU数量或batch size, 须参考[FAQ](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/FAQ.md)对应调整学习率。 +- 推理速度测试环境为 Qualcomm Snapdragon 865,采用arm8下4线程推理得到。 + +### Pipeline性能 +| 行人检测模型 | 姿态检测模型 | mAP (coco val) | 单人耗时 (FP32) | 单人耗时 (FP16) | 6人耗时 (FP32) | 6人耗时 (FP16)| +| :------------------------ | :-------: | :------: | :---: | :---: | :---: | :---: | +| PicoDet-S-Pedestrian-192*192 | PP-TinyPose-128*96 | 36.7 | 11.72 ms| 8.18 ms | 36.22 ms| 26.33 ms | +| PicoDet-S-Pedestrian-320*320 | PP-TinyPose-128*96 | 44.2 | 19.45 ms| 14.41 ms | 44.0 ms| 32.57 ms | + +**说明** +- 姿态检测模型的精度指标是基于对应行人检测模型检测得到的检测框。 +- 精度测试中去除了flip操作,且检测置信度阈值要求0.5。 +- 速度测试环境为qualcomm snapdragon 865,采用arm8下4线程、FP32推理得到。 +- Pipeline速度包含模型的预处理、推理及后处理部分。 + + +## 模型训练 +姿态检测模型与行人检测模型的训练集在`COCO`以外还扩充了[AI Challenger](https://arxiv.org/abs/1711.06475)数据集,各数据集关键点定义如下: +``` +COCO keypoint Description: + 0: "Nose", + 1: "Left Eye", + 2: "Right Eye", + 3: "Left Ear", + 4: "Right Ear", + 5: "Left Shoulder, + 6: "Right Shoulder", + 7: "Left Elbow", + 8: "Right Elbow", + 9: "Left Wrist", + 10: "Right Wrist", + 11: "Left Hip", + 12: "Right Hip", + 13: "Left Knee", + 14: "Right Knee", + 15: "Left Ankle", + 16: "Right Ankle" + +AI Challenger Description: + 0: "Right Shoulder", + 1: "Right Elbow", + 2: "Right Wrist", + 3: "Left Shoulder", + 4: "Left Elbow", + 5: "Left Wrist", + 6: "Right Hip", + 7: "Right Knee", + 8: "Right Ankle", + 9: "Left Hip", + 10: "Left Knee", + 11: "Left Ankle", + 12: "Head top", + 13: "Neck" +``` + +由于两个数据集的关键点标注形式不同,我们将两个数据集的标注进行了对齐,仍然沿用COCO的标注形式,[训练的参考列表](https://bj.bcebos.com/v1/paddledet/data/keypoint/aic_coco_train_cocoformat.json),其主要处理如下: +- `AI Challenger`关键点标注顺序调整至与COCO一致,统一是否标注/可见的标志位; +- 舍弃了`AI Challenger`中特有的点位;将`AI Challenger`数据中`COCO`特有点位标记为未标注; +- 重新排列了`image_id`与`annotation id`; +利用转换为`COCO`形式的合并数据标注,执行模型训练: +```bash +# 姿态检测模型 +python3 -m paddle.distributed.launch tools/train.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml + +# 行人检测模型 +python3 -m paddle.distributed.launch tools/train.py -c configs/picodet/application/pedestrian_detection/picodet_s_320_pedestrian.yml +``` + +## 部署流程 +### 实现部署预测 +1. 通过以下命令将训练得到的模型导出: +```bash +python3 tools/export_model.py -c configs/picodet/application/pedestrian_detection/picodet_s_192_pedestrian.yml --output_dir=outut_inference -o weights=output/picodet_s_192_pedestrian/model_final + +python3 tools/export_model.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml --output_dir=outut_inference -o weights=output/tinypose_128x96/model_final +``` +导出后的模型如: +``` +picodet_s_192_pedestrian +├── infer_cfg.yml +├── model.pdiparams +├── model.pdiparams.info +└── model.pdmodel +``` +您也可以直接下载模型库中提供的对应`预测部署模型`,分别获取得到行人检测模型和姿态检测模型的预测部署模型,解压即可。 + +2. 执行Python联合部署预测 +```bash +# 预测一张图片 +python3 deploy/python/det_keypoint_unite_infer.py --det_model_dir=output_inference/picodet_s_320_pedestrian --keypoint_model_dir=output_inference/tinypose_128x96 --image_file={your image file} --device=GPU + +# 预测多张图片 +python3 deploy/python/det_keypoint_unite_infer.py --det_model_dir=output_inference/picodet_s_320_pedestrian --keypoint_model_dir=output_inference/tinypose_128x96 --image_dir={dir of image file} --device=GPU + +# 预测一个视频 +python3 deploy/python/det_keypoint_unite_infer.py --det_model_dir=output_inference/picodet_s_320_pedestrian --keypoint_model_dir=output_inference/tinypose_128x96 --video_file={your video file} --device=GPU +``` + +3. 执行C++联合部署预测 +- 请先按照[C++端预测部署](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.3/deploy/cpp),根据您的实际环境准备对应的`paddle_inference`库及相关依赖。 +- 我们提供了[一键编译脚本](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.3/deploy/cpp/scripts/build.sh),您可以通过该脚本填写相关环境变量的位置,编译上述代码后,得到可执行文件。该过程中请保证`WITH_KEYPOINT=ON`. +- 编译完成后,即可执行部署预测,例如: +```bash +# 预测一张图片 +./build/main --model_dir=output_inference/picodet_s_320_pedestrian --model_dir_keypoint=output_inference/tinypose_128x96 --image_file={your image file} --device=GPU + +# 预测多张图片 +./build/main --model_dir=output_inference/picodet_s_320_pedestrian --model_dir_keypoint=output_inference/tinypose_128x96 --image_dir={dir of image file} --device=GPU + +# 预测一个视频 +./build/main --model_dir=output_inference/picodet_s_320_pedestrian --model_dir_keypoint=output_inference/tinypose_128x96 --video_file={your video file} --device=GPU +``` + +### 实现移动端部署 +#### 直接使用我们提供的模型进行部署 +1. 下载模型库中提供的`Paddle-Lite部署模型`,分别获取得到行人检测模型和姿态检测模型的`.nb`格式文件。 +2. 准备Paddle-Lite运行环境, 可直接通过[PaddleLite预编译库下载](https://paddle-lite.readthedocs.io/zh/latest/quick_start/release_lib.html)获取预编译库,无需自行编译。如需要采用FP16推理,则需要下载[FP16的预编译库](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.android.armv8_clang_c++_static_with_extra_with_cv_with_fp16.tiny_publish_427e46.zip) +3. 编译模型运行代码,详细步骤见[Paddle-Lite端侧部署](../../../deploy/lite/README.md)。 + +#### 将训练的模型实现端侧部署 +如果您希望将自己训练的模型应用于部署,可以参考以下步骤: +1. 将训练的模型导出 +```bash +python3 tools/export_model.py -c configs/picodet/application/pedestrian_detection/picodet_s_192_pedestrian.yml --output_dir=outut_inference -o weights=output/picodet_s_192_pedestrian/model_final TestReader.fuse_normalize=true + +python3 tools/export_model.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml --output_dir=outut_inference -o weights=output/tinypose_128x96/model_final TestReader.fuse_normalize=true +``` +2. 转换为Lite模型(依赖[Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite)) + +- 安装Paddle-Lite: +```bash +pip install paddlelite +``` +- 执行以下步骤,以得到对应后缀为`.nb`的Paddle-Lite模型用于端侧部署: +``` +# 1. 转换行人检测模型 +# FP32 +paddle_lite_opt --model_dir=inference_model/picodet_s_192_pedestrian --valid_targets=arm --optimize_out=picodet_s_192_pedestrian_fp32 +# FP16 +paddle_lite_opt --model_dir=inference_model/picodet_s_192_pedestrian --valid_targets=arm --optimize_out=picodet_s_192_pedestrian_fp16 --enable_fp16=true + +# 2. 转换姿态检测模型 +# FP32 +paddle_lite_opt --model_dir=inference_model/tinypose_128x96 --valid_targets=arm --optimize_out=tinypose_128x96_fp32 +# FP16 +paddle_lite_opt --model_dir=inference_model/tinypose_128x96 --valid_targets=arm --optimize_out=tinypose_128x96_fp16 --enable_fp16=true +``` + +3. 编译模型运行代码,详细步骤见[Paddle-Lite端侧部署](../../../deploy/lite/README.md)。 + +我们已提供包含数据预处理、模型推理及模型后处理的[全流程示例代码](../../../deploy/lite/),可根据实际需求进行修改。 + +**注意** +- 在导出模型时增加`TestReader.fuse_normalize=true`参数,可以将对图像的Normalize操作合并在模型中执行,从而实现加速。 +- FP16推理可实现更快的模型推理速度。若希望部署FP16模型,除模型转换步骤外,还需要编译支持FP16的Paddle-Lite预测库,详见[Paddle Lite 使用 ARM CPU 预测部署](https://paddle-lite.readthedocs.io/zh/latest/demo_guides/arm_cpu.html)。 + +## 优化策略 +TinyPose采用了以下策略来平衡模型的速度和精度表现: +- 轻量级的姿态估计任务骨干网络,[wider naive Lite-HRNet](https://arxiv.org/abs/2104.06403)。 +- 更小的输入尺寸。 +- 加入Distribution-Aware coordinate Representation of Keypoints ([DARK](https://arxiv.org/abs/1910.06278)),以提升低分辨率热力图下模型的精度表现。 +- Unbiased Data Processing ([UDP](https://arxiv.org/abs/1911.07524))。 +- Augmentation by Information Dropping ([AID](https://arxiv.org/abs/2008.07139v2))。 +- FP16 推理。 diff --git a/configs/keypoint/tiny_pose/keypoint/tinypose_128x96.yml b/configs/keypoint/tiny_pose/tinypose_128x96.yml similarity index 98% rename from configs/keypoint/tiny_pose/keypoint/tinypose_128x96.yml rename to configs/keypoint/tiny_pose/tinypose_128x96.yml index a9ee77e4e..e213c2990 100644 --- a/configs/keypoint/tiny_pose/keypoint/tinypose_128x96.yml +++ b/configs/keypoint/tiny_pose/tinypose_128x96.yml @@ -77,7 +77,7 @@ EvalDataset: trainsize: *trainsize pixel_std: *pixel_std use_gt_bbox: True - image_thre: 0.0 + image_thre: 0.5 TestDataset: !ImageFolder @@ -144,4 +144,4 @@ TestReader: is_scale: true - Permute: {} batch_size: 1 - fuse_normalize: true + fuse_normalize: false diff --git a/configs/keypoint/tiny_pose/keypoint/tinypose_256x192.yml b/configs/keypoint/tiny_pose/tinypose_256x192.yml similarity index 98% rename from configs/keypoint/tiny_pose/keypoint/tinypose_256x192.yml rename to configs/keypoint/tiny_pose/tinypose_256x192.yml index 01c57212f..9de2a635f 100644 --- a/configs/keypoint/tiny_pose/keypoint/tinypose_256x192.yml +++ b/configs/keypoint/tiny_pose/tinypose_256x192.yml @@ -77,7 +77,7 @@ EvalDataset: trainsize: *trainsize pixel_std: *pixel_std use_gt_bbox: True - image_thre: 0.0 + image_thre: 0.5 TestDataset: !ImageFolder @@ -144,4 +144,4 @@ TestReader: is_scale: true - Permute: {} batch_size: 1 - fuse_normalize: true + fuse_normalize: false diff --git a/configs/picodet/application/pedestrian_detection/picodet_s_192_pedestrian.yml b/configs/picodet/application/pedestrian_detection/picodet_s_192_pedestrian.yml new file mode 100644 index 000000000..94dfa7ba9 --- /dev/null +++ b/configs/picodet/application/pedestrian_detection/picodet_s_192_pedestrian.yml @@ -0,0 +1,143 @@ +use_gpu: true +log_iter: 20 +save_dir: output +snapshot_epoch: 1 +print_flops: false +pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ESNet_x0_75_pretrained.pdparams +weights: output/picodet_s_192_pedestrian/model_final +find_unused_parameters: True +use_ema: true +cycle_epoch: 40 +snapshot_epoch: 10 +epoch: 300 +metric: COCO +num_classes: 1 + +architecture: PicoDet + +PicoDet: + backbone: ESNet + neck: CSPPAN + head: PicoHead + +ESNet: + scale: 0.75 + feature_maps: [4, 11, 14] + act: hard_swish + channel_ratio: [0.875, 0.5, 0.5, 0.5, 0.625, 0.5, 0.625, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5] + +CSPPAN: + out_channels: 96 + use_depthwise: True + num_csp_blocks: 1 + num_features: 4 + +PicoHead: + conv_feat: + name: PicoFeat + feat_in: 96 + feat_out: 96 + num_convs: 2 + num_fpn_stride: 4 + norm_type: bn + share_cls_reg: True + fpn_stride: [8, 16, 32, 64] + feat_in_chan: 96 + prior_prob: 0.01 + reg_max: 7 + cell_offset: 0.5 + loss_class: + name: VarifocalLoss + use_sigmoid: True + iou_weighted: True + loss_weight: 1.0 + loss_dfl: + name: DistributionFocalLoss + loss_weight: 0.25 + loss_bbox: + name: GIoULoss + loss_weight: 2.0 + assigner: + name: SimOTAAssigner + candidate_topk: 10 + iou_weight: 6 + nms: + name: MultiClassNMS + nms_top_k: 1000 + keep_top_k: 100 + score_threshold: 0.025 + nms_threshold: 0.6 + +LearningRate: + base_lr: 0.4 + schedulers: + - !CosineDecay + max_epochs: 300 + - !LinearWarmup + start_factor: 0.1 + steps: 300 + +OptimizerBuilder: + optimizer: + momentum: 0.9 + type: Momentum + regularizer: + factor: 0.00004 + type: L2 + +TrainDataset: + !COCODataSet + image_dir: "" + anno_path: aic_coco_train_cocoformat.json + dataset_dir: dataset + data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd'] + +EvalDataset: + !COCODataSet + image_dir: val2017 + anno_path: annotations/instances_val2017.json + dataset_dir: dataset/coco + +TestDataset: + !ImageFolder + anno_path: annotations/instances_val2017.json + +worker_num: 8 +TrainReader: + sample_transforms: + - Decode: {} + - RandomCrop: {} + - RandomFlip: {prob: 0.5} + - RandomDistort: {} + batch_transforms: + - BatchRandomResize: {target_size: [128, 160, 192, 224, 256], random_size: True, random_interp: True, keep_ratio: False} + - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]} + - Permute: {} + batch_size: 128 + shuffle: true + drop_last: true + collate_batch: false + +EvalReader: + sample_transforms: + - Decode: {} + - Resize: {interp: 2, target_size: [192, 192], keep_ratio: False} + - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]} + - Permute: {} + batch_transforms: + - PadBatch: {pad_to_stride: 32} + batch_size: 8 + shuffle: false + +TestReader: + inputs_def: + image_shape: [1, 3, 192, 192] + sample_transforms: + - Decode: {} + - Resize: {interp: 2, target_size: [192, 192], keep_ratio: False} + - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]} + - Permute: {} + batch_transforms: + - PadBatch: {pad_to_stride: 32} + batch_size: 1 + shuffle: false diff --git a/configs/keypoint/tiny_pose/pedestrian_detection/picodet_s_320_pedestrian.yml b/configs/picodet/application/pedestrian_detection/picodet_s_320_pedestrian.yml similarity index 100% rename from configs/keypoint/tiny_pose/pedestrian_detection/picodet_s_320_pedestrian.yml rename to configs/picodet/application/pedestrian_detection/picodet_s_320_pedestrian.yml diff --git a/deploy/python/det_keypoint_unite_infer.py b/deploy/python/det_keypoint_unite_infer.py index 6b81a6f78..b5be15f98 100644 --- a/deploy/python/det_keypoint_unite_infer.py +++ b/deploy/python/det_keypoint_unite_infer.py @@ -21,7 +21,7 @@ import paddle from det_keypoint_unite_utils import argsparser from preprocess import decode_image -from infer import Detector, PredictConfig, print_arguments, get_test_images +from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images from keypoint_infer import KeyPoint_Detector, PredictConfig_KeyPoint from visualize import draw_pose from benchmark_utils import PaddleInferBenchmark @@ -183,17 +183,20 @@ def topdown_unite_predict_video(detector, def main(): pred_config = PredictConfig(FLAGS.det_model_dir) - detector = Detector( - pred_config, - FLAGS.det_model_dir, - device=FLAGS.device, - run_mode=FLAGS.run_mode, - trt_min_shape=FLAGS.trt_min_shape, - trt_max_shape=FLAGS.trt_max_shape, - trt_opt_shape=FLAGS.trt_opt_shape, - trt_calib_mode=FLAGS.trt_calib_mode, - cpu_threads=FLAGS.cpu_threads, - enable_mkldnn=FLAGS.enable_mkldnn) + detector_func = 'Detector' + if pred_config.arch == 'PicoDet': + detector_func = 'DetectorPicoDet' + + detector = eval(detector_func)(pred_config, + FLAGS.det_model_dir, + device=FLAGS.device, + run_mode=FLAGS.run_mode, + trt_min_shape=FLAGS.trt_min_shape, + trt_max_shape=FLAGS.trt_max_shape, + trt_opt_shape=FLAGS.trt_opt_shape, + trt_calib_mode=FLAGS.trt_calib_mode, + cpu_threads=FLAGS.cpu_threads, + enable_mkldnn=FLAGS.enable_mkldnn) pred_config = PredictConfig_KeyPoint(FLAGS.keypoint_model_dir) assert KEYPOINT_SUPPORT_MODELS[ -- GitLab